diff --git a/Makefile b/Makefile index e879da0d55..a738dd3387 100644 --- a/Makefile +++ b/Makefile @@ -38,7 +38,7 @@ else VERSION ?= ${VERSION} endif -RELEASE = v1.30.0 +RELEASE = v1.30.1 GOOS ?= linux ARCH ?= amd64 diff --git a/README.md b/README.md index 9babc62253..78933b1096 100644 --- a/README.md +++ b/README.md @@ -34,8 +34,9 @@ cloud-provider specific code out of the Kubernetes codebase. | v1.26.4 | v1.26 | - | | v1.27.3 | v1.27 | - | | v1.28.2 | v1.28 | - | -| v1.29.1 | v1.29 | - | -| v1.30.0 | v1.30 | - | +| v1.29.2 | v1.29 | - | +| v1.30.1 | v1.30 | - | +| v1.31.0 | v1.31 | - | Note: diff --git a/THIRD_PARTY_LICENSES.txt b/THIRD_PARTY_LICENSES.txt index e8f0ff0580..91e91726fd 100644 --- a/THIRD_PARTY_LICENSES.txt +++ b/THIRD_PARTY_LICENSES.txt @@ -4,6 +4,9 @@ SPDX:Apache-2.0 ---------------------------------- Copyright ----------------------------------- +Copyright (C) 2017, 2025, Oracle and/or its affiliates. +Copyright (C) 2018, 2025, Oracle and/or its affiliates. +Copyright (C) 2019, 2025, Oracle and/or its affiliates. Copyright (c) 2017, 2023 Oracle and/or its affiliates. All rights reserved. Copyright 2014 The Kubernetes Authors. Copyright 2016 The Kubernetes Authors. @@ -1179,6 +1182,7 @@ SPDX:MIT == Copyright Copyright (c) 2013-2014 Onsi Fakhouri +Copyright (c) 2014 Amit Kumar Gupta --------------------------------- (separator) ---------------------------------- @@ -1311,12 +1315,16 @@ END OF TERMS AND CONDITIONS == Copyright +Copyright (c) 2012-2020 Mat Ryer, Tyler Bunnell and contributors. +Copyright (c) 2013 The Go Authors. All rights reserved. Copyright (c) 2016, 2018, 2020, Oracle and/or its affiliates. Copyright (c) 2016, 2018, 2020, Oracle and/or its affiliates. All rights reserved. Copyright (c) 2016, 2018, 2024, Oracle and/or its affiliates. All rights reserved. Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved. Copyright (c) 2016, 2023 Oracle and/or its affiliates. Copyright (c) 2016, 2023, Oracle and/or its affiliates. All rights reserved. +Copyright © 2012-2020 Mat Ryer, Tyler Bunnell and contributors. +Copyright © 2013 The Go Authors. All rights reserved. == Notices Copyright (c) 2016, 2018, 2020, Oracle and/or its affiliates. @@ -1770,6 +1778,7 @@ go.uber.org/zap SPDX:MIT == Copyright +Copyright (c) "*" Uber Technologies, Inc.") Copyright (c) 2016 Uber Technologies, Inc. Copyright (c) 2016, 2017 Uber Technologies, Inc. Copyright (c) 2016-2017 Uber Technologies, Inc. @@ -4242,5 +4251,5 @@ the Mozilla Public License, v. 2.0. === ATTRIBUTION-HELPER-GENERATED: -=== Attribution helper version: {Major:0 Minor:11 GitVersion:0.10.0-114-g3747dab9 GitCommit:3747dab92eb29c0dbe6409ffbb824b9ae3a04b87 GitTreeState:dirty BuildDate:2024-02-28T16:52:52Z GoVersion:go1.21.0 Compiler:gc Platform:darwin/amd64} -=== License file based on go.mod with md5 sum: 14c2ab8049fc7f49372d85182845f8d1 +=== Attribution helper version: {Major:0 Minor:11 GitVersion: GitCommit: GitTreeState:dirty BuildDate:1970-01-01T00:00:00Z GoVersion:go1.19.3 Compiler:gc Platform:darwin/arm64} +=== License file based on go.mod with md5 sum: cfaca8bb69c36cdf198d538fbab61dee diff --git a/docs/load-balancer-annotations.md b/docs/load-balancer-annotations.md index 084ecb0c58..23cc685a41 100644 --- a/docs/load-balancer-annotations.md +++ b/docs/load-balancer-annotations.md @@ -44,7 +44,7 @@ spec: | `service.beta.kubernetes.io/oci-load-balancer-health-check-interval` | The interval between [health checks][6] requests, in milliseconds. | `10000` | | | `service.beta.kubernetes.io/oci-load-balancer-connection-idle-timeout` | The maximum idle time, in seconds, allowed between two successive receive or two successive send operations between the client and backend servers. | `300` for TCP listeners, `60` for HTTP listeners | | | `service.beta.kubernetes.io/oci-load-balancer-security-list-management-mode` | Specifies the [security list mode](##security-list-management-modes) (`"All"`, `"Frontend"`,`"None"`) to configure how security lists are managed by the CCM. | `"All"` | | -| `service.beta.kubernetes.io/oci-load-balancer-backend-protocol` | Specifies protocol on which the listener accepts connection requests. To get a list of valid protocols, use the [`ListProtocols`][5] operation. | `"TCP"` | | +| `service.beta.kubernetes.io/oci-load-balancer-backend-protocol` | Specifies protocol on which the listener accepts connection requests. To get a list of valid protocols, use the [`ListProtocols`][5] operation. Supported[listener protocols][13] | `"TCP"` | | | `service.beta.kubernetes.io/oci-load-balancer-ssl-ports` | The ports to enable SSL termination on the corresponding load balancer listener | `443` | | | `service.beta.kubernetes.io/oci-load-balancer-tls-secret` | The TLS secret to install on the load balancer listeners which have SSL enabled. | `N/A` | | | `oci.oraclecloud.com/oci-network-security-groups` | Specifies Network Security Groups' OCIDs to be associated with the loadbalancer. Please refer [here][8] for NSG details. Example NSG OCID: `ocid1.networksecuritygroup.oc1.iad.aaa` | `N/A` | `"ocid1...aaa, ocid1...bbb"` | @@ -101,6 +101,7 @@ Note: | `oci.oraclecloud.com/oci-backend-network-security-group` | Specifies backend Network Security Group(s)' OCID(s) for management of ingress / egress security rules for the LB/NLB by the CCM. Example NSG OCID: `ocid1.networksecuritygroup.oc1.iad.aaa` | `N/A` | | `oci.oraclecloud.com/ingress-ip-mode` | Specifies ".status.loadBalancer.ingress.ipMode" for a Service with type set to LoadBalancer. Refer: [Specifying IPMode to adjust traffic routing][11] | `VIP` | | `oci-network-load-balancer.oraclecloud.com/is-ppv2-enabled` | To enable/disable PPv2 feature for the listeners of your NLB managed by the CCM. | `false` | +| `oci-network-load-balancer.oraclecloud.com/external-ip-only` | Specifies public ip only if set to true under ".status.loadBalancer.ingress.ip" for a Service. Refer: [Concealing a Network Load Balancer's Private IP Address][12] | `false` | Note: - The only security list management mode allowed when backend protocol is UDP is "None" @@ -145,3 +146,5 @@ Note: [9]: https://docs.oracle.com/en-us/iaas/Content/NetworkLoadBalancer/introducton.htm#Overview [10]: https://docs.oracle.com/en-us/iaas/Content/Balance/Concepts/balanceoverview.htm [11]: https://docs.oracle.com/en-us/iaas/Content/ContEng/Tasks/contengconfiguringloadbalancersnetworkloadbalancers-subtopic.htm#contengcreatingloadbalancer_topic_Specifying_IPMode +[12]: https://docs.oracle.com/en-us/iaas/Content/ContEng/Tasks/contengconfiguringloadbalancersnetworkloadbalancers-subtopic.htm#contengcreatingloadbalancer_topic_Skip_private_IP_addresses +[13]: https://docs.oracle.com/en-us/iaas/Content/ContEng/Tasks/contengcreatingloadbalancers-subtopic.htm#listenerprotocol diff --git a/go.mod b/go.mod index 4ef95d7722..0e477bd153 100644 --- a/go.mod +++ b/go.mod @@ -53,8 +53,8 @@ require ( github.com/spf13/pflag v1.0.5 github.com/spf13/viper v1.19.0 go.uber.org/zap v1.27.0 - golang.org/x/net v0.31.0 - golang.org/x/sys v0.27.0 // indirect + golang.org/x/net v0.33.0 + golang.org/x/sys v0.28.0 // indirect google.golang.org/grpc v1.62.1 gopkg.in/natefinch/lumberjack.v2 v2.2.1 gopkg.in/yaml.v2 v2.4.0 @@ -78,7 +78,7 @@ require ( require ( github.com/stretchr/testify v1.9.0 - golang.org/x/sync v0.9.0 + golang.org/x/sync v0.10.0 google.golang.org/protobuf v1.33.0 gopkg.in/yaml.v3 v3.0.1 k8s.io/apiextensions-apiserver v0.30.6 @@ -167,12 +167,12 @@ require ( go.opentelemetry.io/otel/trace v1.24.0 // indirect go.opentelemetry.io/proto/otlp v1.0.0 // indirect go.uber.org/multierr v1.11.0 // indirect - golang.org/x/crypto v0.29.0 // indirect + golang.org/x/crypto v0.31.0 // indirect golang.org/x/exp v0.0.0-20231006140011-7918f672742d // indirect golang.org/x/mod v0.17.0 // indirect golang.org/x/oauth2 v0.18.0 // indirect - golang.org/x/term v0.26.0 // indirect - golang.org/x/text v0.20.0 // indirect + golang.org/x/term v0.27.0 // indirect + golang.org/x/text v0.21.0 // indirect golang.org/x/time v0.5.0 // indirect golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d // indirect google.golang.org/appengine v1.6.8 // indirect diff --git a/go.sum b/go.sum index 27421d92ae..f385e3c8ed 100644 --- a/go.sum +++ b/go.sum @@ -296,8 +296,8 @@ golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACk golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.29.0 h1:L5SG1JTTXupVV3n6sUqMTeWbjAyfPwoda2DLX8J8FrQ= -golang.org/x/crypto v0.29.0/go.mod h1:+F4F4N5hv6v38hfeYwTdx20oUvLLc+QfrE9Ax9HtgRg= +golang.org/x/crypto v0.31.0 h1:ihbySMvVjLAeSH1IbfcRTkD/iNscyz8rGzjF/E5hV6U= +golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk= golang.org/x/exp v0.0.0-20231006140011-7918f672742d h1:jtJma62tbqLibJ5sFQz8bKtEM8rJBtfilJ2qTU199MI= golang.org/x/exp v0.0.0-20231006140011-7918f672742d/go.mod h1:ldy0pHrwJyGW56pPQzzkH36rKxoZW1tw7ZJpeKx+hdo= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= @@ -313,8 +313,8 @@ golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/ golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.31.0 h1:68CPQngjLL0r2AlUKiSxtQFKvzRVbnzLwMUn5SzcLHo= -golang.org/x/net v0.31.0/go.mod h1:P4fl1q7dY2hnZFxEk4pPSkDHF+QqjitcnDjUQyMM+pM= +golang.org/x/net v0.33.0 h1:74SYHlV8BIgHIFC/LrYkOGIwL19eTYXQ5wc6TBuO36I= +golang.org/x/net v0.33.0/go.mod h1:HXLR5J+9DxmrqMwG9qjGCxZ+zKXxBru04zlTvWlWuN4= golang.org/x/oauth2 v0.18.0 h1:09qnuIAgzdx1XplqJvW6CQqMCtGZykZWcXzPMPUusvI= golang.org/x/oauth2 v0.18.0/go.mod h1:Wf7knwG0MPoWIMMBgFlEaSUDaKskp0dCfrlJRJXbBi8= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -322,8 +322,8 @@ golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.9.0 h1:fEo0HyrW1GIgZdpbhCRO0PkJajUS5H9IFUztCgEo2jQ= -golang.org/x/sync v0.9.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ= +golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -339,18 +339,18 @@ golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.27.0 h1:wBqf8DvsY9Y/2P8gAfPDEYNuS30J4lPHJxXSb/nJZ+s= -golang.org/x/sys v0.27.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA= +golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.26.0 h1:WEQa6V3Gja/BhNxg540hBip/kkaYtRg3cxg4oXSw4AU= -golang.org/x/term v0.26.0/go.mod h1:Si5m1o57C5nBNQo5z1iq+XDijt21BDBDp2bK0QI8e3E= +golang.org/x/term v0.27.0 h1:WP60Sv1nlK1T6SupCHbXzSaN0b9wUmsPoRS9b61A23Q= +golang.org/x/term v0.27.0/go.mod h1:iMsnZpn0cago0GOrHO2+Y7u7JPn5AylBrcoWkElMTSM= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= -golang.org/x/text v0.20.0 h1:gK/Kv2otX8gz+wn7Rmb3vT96ZwuoxnQlY+HlJVj7Qug= -golang.org/x/text v0.20.0/go.mod h1:D4IsuqiFMhST5bX19pQ9ikHC2GsaKyk/oF+pn3ducp4= +golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo= +golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= diff --git a/manifests/cloud-controller-manager/oci-cloud-controller-manager.yaml b/manifests/cloud-controller-manager/oci-cloud-controller-manager.yaml index 3dcc92b9ff..59a8e3fee3 100644 --- a/manifests/cloud-controller-manager/oci-cloud-controller-manager.yaml +++ b/manifests/cloud-controller-manager/oci-cloud-controller-manager.yaml @@ -42,7 +42,7 @@ spec: path: /etc/kubernetes containers: - name: oci-cloud-controller-manager - image: ghcr.io/oracle/cloud-provider-oci:v1.30.0 + image: ghcr.io/oracle/cloud-provider-oci:v1.30.1 command: ["/usr/local/bin/oci-cloud-controller-manager"] args: - --cloud-config=/etc/oci/cloud-provider.yaml diff --git a/manifests/container-storage-interface/oci-csi-controller-driver.yaml b/manifests/container-storage-interface/oci-csi-controller-driver.yaml index de60c7f583..6398ca71a7 100644 --- a/manifests/container-storage-interface/oci-csi-controller-driver.yaml +++ b/manifests/container-storage-interface/oci-csi-controller-driver.yaml @@ -96,7 +96,7 @@ spec: - --fss-csi-endpoint=unix://var/run/shared-tmpfs/csi-fss.sock command: - /usr/local/bin/oci-csi-controller-driver - image: ghcr.io/oracle/cloud-provider-oci:v1.30.0 + image: ghcr.io/oracle/cloud-provider-oci:v1.30.1 imagePullPolicy: IfNotPresent volumeMounts: - name: config diff --git a/manifests/container-storage-interface/oci-csi-node-driver.yaml b/manifests/container-storage-interface/oci-csi-node-driver.yaml index e1a1267b39..a974588876 100644 --- a/manifests/container-storage-interface/oci-csi-node-driver.yaml +++ b/manifests/container-storage-interface/oci-csi-node-driver.yaml @@ -117,7 +117,7 @@ spec: fieldPath: spec.nodeName - name: PATH value: /usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/host/usr/bin:/host/sbin - image: ghcr.io/oracle/cloud-provider-oci:v1.30.0 + image: ghcr.io/oracle/cloud-provider-oci:v1.30.1 securityContext: privileged: true volumeMounts: diff --git a/manifests/flexvolume-driver/oci-flexvolume-driver.yaml b/manifests/flexvolume-driver/oci-flexvolume-driver.yaml index fb9c167fa2..21c5b07fbb 100644 --- a/manifests/flexvolume-driver/oci-flexvolume-driver.yaml +++ b/manifests/flexvolume-driver/oci-flexvolume-driver.yaml @@ -40,7 +40,7 @@ spec: secretName: oci-flexvolume-driver containers: - name: oci-flexvolume-driver - image: ghcr.io/oracle/cloud-provider-oci:v1.30.0 + image: ghcr.io/oracle/cloud-provider-oci:v1.30.1 command: ["/usr/local/bin/install.py", "-c", "/tmp/config.yaml"] securityContext: privileged: true @@ -76,7 +76,7 @@ spec: type: DirectoryOrCreate containers: - name: oci-flexvolume-driver - image: ghcr.io/oracle/cloud-provider-oci:v1.30.0 + image: ghcr.io/oracle/cloud-provider-oci:v1.30.1 command: ["/usr/local/bin/install.py"] securityContext: privileged: true diff --git a/manifests/volume-provisioner/oci-volume-provisioner-fss.yaml b/manifests/volume-provisioner/oci-volume-provisioner-fss.yaml index b13a11eb79..3d525563cf 100644 --- a/manifests/volume-provisioner/oci-volume-provisioner-fss.yaml +++ b/manifests/volume-provisioner/oci-volume-provisioner-fss.yaml @@ -35,7 +35,7 @@ spec: secretName: oci-volume-provisioner containers: - name: oci-volume-provisioner - image: ghcr.io/oracle/cloud-provider-oci:v1.30.0 + image: ghcr.io/oracle/cloud-provider-oci:v1.30.1 command: ["/usr/local/bin/oci-volume-provisioner"] env: - name: NODE_NAME diff --git a/manifests/volume-provisioner/oci-volume-provisioner.yaml b/manifests/volume-provisioner/oci-volume-provisioner.yaml index 0f73e15a30..7721987ca2 100644 --- a/manifests/volume-provisioner/oci-volume-provisioner.yaml +++ b/manifests/volume-provisioner/oci-volume-provisioner.yaml @@ -35,7 +35,7 @@ spec: secretName: oci-volume-provisioner containers: - name: oci-volume-provisioner - image: ghcr.io/oracle/cloud-provider-oci:v1.30.0 + image: ghcr.io/oracle/cloud-provider-oci:v1.30.1 command: ["/usr/local/bin/oci-volume-provisioner"] env: - name: NODE_NAME diff --git a/pkg/cloudprovider/providers/oci/ccm.go b/pkg/cloudprovider/providers/oci/ccm.go index 6af041abe3..d6a620e9f6 100644 --- a/pkg/cloudprovider/providers/oci/ccm.go +++ b/pkg/cloudprovider/providers/oci/ccm.go @@ -1,4 +1,4 @@ -// Copyright 2017 Oracle and/or its affiliates. All rights reserved. +// Copyright (C) 2017, 2025, Oracle and/or its affiliates. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -100,7 +100,7 @@ func NewCloudProvider(config *providercfg.Config) (cloudprovider.Interface, erro rateLimiter := client.NewRateLimiter(logger.Sugar(), config.RateLimiter) - c, err := client.New(logger.Sugar(), cp, &rateLimiter, config.Auth.TenancyID) + c, err := client.New(logger.Sugar(), cp, &rateLimiter, config) if err != nil { return nil, err } diff --git a/pkg/cloudprovider/providers/oci/load_balancer.go b/pkg/cloudprovider/providers/oci/load_balancer.go index e9b25a49eb..b6af906858 100644 --- a/pkg/cloudprovider/providers/oci/load_balancer.go +++ b/pkg/cloudprovider/providers/oci/load_balancer.go @@ -104,6 +104,7 @@ type CloudLoadBalancerProvider struct { logger *zap.SugaredLogger metricPusher *metrics.MetricPusher config *providercfg.Config + ociConfig *client.OCIClientConfig } type IpVersions struct { @@ -141,6 +142,10 @@ func (cp *CloudProvider) getLoadBalancerProvider(ctx context.Context, svc *v1.Se logger: cp.logger, metricPusher: cp.metricPusher, config: cp.config, + ociConfig: &client.OCIClientConfig{ + SaToken: serviceAccountToken, + TenancyId: cp.config.Auth.TenancyID, + }, }, nil } @@ -207,8 +212,11 @@ func (cp *CloudProvider) GetLoadBalancer(ctx context.Context, clusterName string return nil, false, err } - - lbStatus, err := loadBalancerToStatus(lb, nil) + skipPrivateIP, err := isSkipPrivateIP(service) + if err != nil { + return nil, false, err + } + lbStatus, err := loadBalancerToStatus(lb, nil, skipPrivateIP) return lbStatus, err == nil, err } @@ -425,7 +433,7 @@ func (clb *CloudLoadBalancerProvider) createLoadBalancer(ctx context.Context, sp } if spec.LoadBalancerIP != "" { - reservedIpOCID, err := getReservedIpOcidByIpAddress(ctx, spec.LoadBalancerIP, clb.client.Networking(nil)) + reservedIpOCID, err := getReservedIpOcidByIpAddress(ctx, spec.LoadBalancerIP, clb.client.Networking(clb.ociConfig)) if err != nil { return nil, "", err } @@ -459,7 +467,12 @@ func (clb *CloudLoadBalancerProvider) createLoadBalancer(ctx context.Context, sp } logger.With("loadBalancerID", *lb.Id).Info("Load balancer created") - status, err := loadBalancerToStatus(lb, spec.ingressIpMode) + + skipPrivateIP, err := isSkipPrivateIP(spec.service) + if err != nil { + return nil, "", err + } + status, err := loadBalancerToStatus(lb, spec.ingressIpMode, skipPrivateIP) if status != nil && len(status.Ingress) > 0 { // If the LB is successfully provisioned then open lb/node subnet seclists egress/ingress. @@ -898,7 +911,11 @@ func (cp *CloudProvider) EnsureLoadBalancer(ctx context.Context, clusterName str dimensionsMap[metrics.BackendSetsCountDimension] = strconv.Itoa(len(lb.BackendSets)) metrics.SendMetricData(cp.metricPusher, getMetric(loadBalancerType, Update), syncTime, dimensionsMap) - return loadBalancerToStatus(lb, spec.ingressIpMode) + skipPrivateIP, err := isSkipPrivateIP(service) + if err != nil { + return nil, err + } + return loadBalancerToStatus(lb, spec.ingressIpMode, skipPrivateIP) } func getDefaultLBSubnets(subnet1, subnet2 string) []string { @@ -1949,7 +1966,7 @@ func (clb *CloudLoadBalancerProvider) updateLoadBalancerIpVersion(ctx context.Co } // Given an OCI load balancer, return a LoadBalancerStatus -func loadBalancerToStatus(lb *client.GenericLoadBalancer, ipMode *v1.LoadBalancerIPMode) (*v1.LoadBalancerStatus, error) { +func loadBalancerToStatus(lb *client.GenericLoadBalancer, ipMode *v1.LoadBalancerIPMode, skipPrivateIp bool) (*v1.LoadBalancerStatus, error) { if len(lb.IpAddresses) == 0 { return nil, errors.Errorf("no ip addresses found for load balancer %q", *lb.DisplayName) } @@ -1959,6 +1976,12 @@ func loadBalancerToStatus(lb *client.GenericLoadBalancer, ipMode *v1.LoadBalance if ip.IpAddress == nil { continue // should never happen but appears to when EnsureLoadBalancer is called with 0 nodes. } + + if skipPrivateIp { + if !pointer.BoolDeref(ip.IsPublic, false) { + continue + } + } ingress = append(ingress, v1.LoadBalancerIngress{IP: *ip.IpAddress, IPMode: ipMode}) } @@ -2029,31 +2052,23 @@ func (cp *CloudProvider) getFrontendNsgByName(ctx context.Context, logger *zap.S func (cp *CloudProvider) checkPendingLBWorkRequests(ctx context.Context, logger *zap.SugaredLogger, lbProvider CloudLoadBalancerProvider, lb *client.GenericLoadBalancer, service *v1.Service, startTime time.Time) (err error) { listWorkRequestTime := time.Now() loadBalancerType := getLoadBalancerType(service) - lbName := GetLoadBalancerName(service) - dimensionsMap := make(map[string]string) - dimensionsMap[metrics.ResourceOCIDDimension] = *lb.Id - lbInProgressWorkRequests, err := lbProvider.lbClient.ListWorkRequests(ctx, *lb.CompartmentId, *lb.Id) - logger.With("loadBalancerID", *lb.Id).Infof("time (in seconds) to list work-requests for LB %f", time.Since(listWorkRequestTime).Seconds()) - if err != nil { - logger.With(zap.Error(err)).Error("Failed to list work-requests in-progress") - errorType := util.GetError(err) - lbMetricDimension := util.GetMetricDimensionForComponent(errorType, util.LoadBalancerType) - dimensionsMap[metrics.ComponentDimension] = lbMetricDimension - dimensionsMap[metrics.ResourceOCIDDimension] = lbName - metrics.SendMetricData(cp.metricPusher, getMetric(loadBalancerType, List), time.Since(startTime).Seconds(), dimensionsMap) - return err - } - for _, wr := range lbInProgressWorkRequests { - switch loadBalancerType { - case NLB: - if wr.Status == string(networkloadbalancer.OperationStatusInProgress) || wr.Status == string(networkloadbalancer.OperationStatusAccepted) { - logger.With("loadBalancerID", *lb.Id).Infof("current in-progress work requests for Network Load Balancer %s", *wr.Id) - return errors.New("Network Load Balancer has work requests in progress, will wait and retry") - } - default: + switch loadBalancerType { + case NLB: + if *lb.LifecycleState == string(networkloadbalancer.LifecycleStateUpdating) { + logger.Info("Load Balancer is in UPDATING state, possibly a work request is in progress") + return errors.New("Load Balancer might have work requests in progress, will wait and retry") + } + default: + lbInProgressWorkRequests, err := lbProvider.lbClient.ListWorkRequests(ctx, *lb.CompartmentId, *lb.Id) + logger.Infof("time (in seconds) to list work-requests for LB %f", time.Since(listWorkRequestTime).Seconds()) + if err != nil { + logger.With(zap.Error(err)).Error("Failed to list work-requests in-progress") + return err + } + for _, wr := range lbInProgressWorkRequests { if *wr.LifecycleState == string(loadbalancer.WorkRequestLifecycleStateInProgress) || *wr.LifecycleState == string(loadbalancer.WorkRequestLifecycleStateAccepted) { - logger.With("loadBalancerID", *lb.Id).Infof("current in-progress work requests for Load Balancer %s", *wr.Id) + logger.Infof("current in-progress work requests for Load Balancer %s", *wr.Id) return errors.New("Load Balancer has work requests in progress, will wait and retry") } } diff --git a/pkg/cloudprovider/providers/oci/load_balancer_spec.go b/pkg/cloudprovider/providers/oci/load_balancer_spec.go index a35021d9e8..5265c81188 100644 --- a/pkg/cloudprovider/providers/oci/load_balancer_spec.go +++ b/pkg/cloudprovider/providers/oci/load_balancer_spec.go @@ -244,6 +244,14 @@ const ( // ServiceAnnotationNetworkLoadBalancerIsPpv2Enabled is a service annotation to enable/disable PPv2 feature for the listeners of this NLB. ServiceAnnotationNetworkLoadBalancerIsPpv2Enabled = "oci-network-load-balancer.oraclecloud.com/is-ppv2-enabled" + + // ServiceAnnotationNetworkLoadBalancerExternalIpOnly is a service a boolean annotation to skip private ip when assigning to ingress resource for NLB service + ServiceAnnotationNetworkLoadBalancerExternalIpOnly = "oci-network-load-balancer.oraclecloud.com/external-ip-only" +) + +const ( + ProtocolGrpc = "GRPC" + DefaultCipherSuiteForGRPC = "oci-default-http2-ssl-cipher-suite-v1" ) // certificateData is a structure containing the data about a K8S secret required @@ -1032,10 +1040,10 @@ func getListenersOciLoadBalancer(svc *v1.Service, sslCfg *SSLConfig) (map[string if p == "" { p = DefaultLoadBalancerBEProtocol } - if strings.EqualFold(p, "HTTP") || strings.EqualFold(p, "TCP") { + if strings.EqualFold(p, "HTTP") || strings.EqualFold(p, "TCP") || strings.EqualFold(p, "GRPC") { protocol = p } else { - return nil, fmt.Errorf("invalid backend protocol %q requested for load balancer listener. Only 'HTTP' and 'TCP' protocols supported", p) + return nil, fmt.Errorf("invalid backend protocol %q requested for load balancer listener. Only 'HTTP', 'TCP' and 'GRPC' protocols supported", p) } } port := int(servicePort.Port) @@ -1051,6 +1059,15 @@ func getListenersOciLoadBalancer(svc *v1.Service, sslCfg *SSLConfig) (map[string return nil, err } } + if strings.EqualFold(protocol, "GRPC") { + protocol = ProtocolGrpc + if sslConfiguration == nil { + return nil, fmt.Errorf("SSL configuration cannot be empty for GRPC protocol") + } + if sslConfiguration.CipherSuiteName == nil { + sslConfiguration.CipherSuiteName = common.String(DefaultCipherSuiteForGRPC) + } + } name := getListenerName(protocol, port) listener := client.GenericListener{ @@ -1569,7 +1586,7 @@ func isServiceDualStack(svc *v1.Service) bool { return false } -// patchIngressIpMode reads ingress ipMode specified in the service annotation if exists +// getIngressIpMode reads ingress ipMode specified in the service annotation if exists func getIngressIpMode(service *v1.Service) (*v1.LoadBalancerIPMode, error) { var ipMode, exists = "", false @@ -1588,3 +1605,33 @@ func getIngressIpMode(service *v1.Service) (*v1.LoadBalancerIPMode, error) { return nil, errors.New("IpMode can only be set as Proxy or VIP") } } + +// isSkipPrivateIP determines if skipPrivateIP annotation is set or not +func isSkipPrivateIP(svc *v1.Service) (bool, error) { + lbType := getLoadBalancerType(svc) + annotationValue := "" + annotationExists := false + annotationString := "" + annotationValue, annotationExists = svc.Annotations[ServiceAnnotationNetworkLoadBalancerExternalIpOnly] + if !annotationExists { + return false, nil + } + + if lbType != NLB { + return false, nil + } + + internal, err := isInternalLB(svc) + if err != nil { + return false, err + } + if internal { + return false, nil + } + + skipPrivateIp, err := strconv.ParseBool(annotationValue) + if err != nil { + return false, errors.Wrap(err, fmt.Sprintf("invalid value: %s provided for annotation: %s", annotationValue, annotationString)) + } + return skipPrivateIp, nil +} diff --git a/pkg/cloudprovider/providers/oci/load_balancer_spec_test.go b/pkg/cloudprovider/providers/oci/load_balancer_spec_test.go index 69935306c9..ed53086499 100644 --- a/pkg/cloudprovider/providers/oci/load_balancer_spec_test.go +++ b/pkg/cloudprovider/providers/oci/load_balancer_spec_test.go @@ -18,7 +18,6 @@ import ( "context" "encoding/json" "fmt" - "k8s.io/utils/pointer" "net/http" "reflect" "testing" @@ -28,6 +27,7 @@ import ( v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/utils/pointer" providercfg "github.com/oracle/oci-cloud-controller-manager/pkg/cloudprovider/providers/oci/config" "github.com/oracle/oci-cloud-controller-manager/pkg/oci/client" @@ -4543,6 +4543,162 @@ func TestNewLBSpecSuccess(t *testing.T) { }, }, }, + "GRPC listeners": { + defaultSubnetOne: "one", + defaultSubnetTwo: "two", + IpVersions: &IpVersions{ + IpFamilies: []string{IPv4}, + IpFamilyPolicy: common.String(string(v1.IPFamilyPolicySingleStack)), + LbEndpointIpVersion: GenericIpVersion(client.GenericIPv4), + ListenerBackendIpVersion: []client.GenericIpVersion{client.GenericIPv4}, + }, + nodes: []*v1.Node{ + { + TypeMeta: metav1.TypeMeta{}, + ObjectMeta: metav1.ObjectMeta{}, + Spec: v1.NodeSpec{ + ProviderID: testNodeString, + }, + Status: v1.NodeStatus{ + Capacity: nil, + Allocatable: nil, + Phase: "", + Conditions: nil, + Addresses: []v1.NodeAddress{ + { + Address: "0.0.0.0", + Type: "InternalIP", + }, + }, + DaemonEndpoints: v1.NodeDaemonEndpoints{}, + NodeInfo: v1.NodeSystemInfo{}, + Images: nil, + VolumesInUse: nil, + VolumesAttached: nil, + Config: nil, + }, + }, + }, + service: &v1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "kube-system", + Name: "testservice", + UID: "test-uid", + Annotations: map[string]string{ + ServiceAnnotationLoadBalancerBEProtocol: "GRPC", + }, + }, + Spec: v1.ServiceSpec{ + IPFamilies: []v1.IPFamily{v1.IPFamily(IPv4)}, + SessionAffinity: v1.ServiceAffinityNone, + Ports: []v1.ServicePort{ + { + Protocol: v1.ProtocolTCP, + Port: int32(443), + }, + }, + }, + }, + expected: &LBSpec{ + Name: "test-uid", + Type: "lb", + Shape: "100Mbps", + Internal: false, + Subnets: []string{"one", "two"}, + Listeners: map[string]client.GenericListener{ + fmt.Sprintf("GRPC-443"): { + Name: common.String("GRPC-443"), + DefaultBackendSetName: common.String("TCP-443"), + Port: common.Int(443), + Protocol: common.String("GRPC"), + SslConfiguration: &client.GenericSslConfigurationDetails{ + CertificateName: &listenerSecret, + VerifyDepth: common.Int(0), + VerifyPeerCertificate: common.Bool(false), + CipherSuiteName: common.String(DefaultCipherSuiteForGRPC), + }, + }, + }, + BackendSets: map[string]client.GenericBackendSetDetails{ + "TCP-443": { + Name: common.String("TCP-443"), + Backends: []client.GenericBackend{{IpAddress: common.String("0.0.0.0"), Port: common.Int(0), Weight: common.Int(1), TargetId: &testNodeString}}, + HealthChecker: &client.GenericHealthChecker{ + Protocol: "HTTP", + IsForcePlainText: common.Bool(false), + Port: common.Int(10256), + UrlPath: common.String("/healthz"), + Retries: common.Int(3), + TimeoutInMillis: common.Int(3000), + IntervalInMillis: common.Int(10000), + ReturnCode: common.Int(http.StatusOK), + }, + IsPreserveSource: common.Bool(false), + Policy: common.String("ROUND_ROBIN"), + SslConfiguration: &client.GenericSslConfigurationDetails{ + CertificateName: &backendSecret, + VerifyDepth: common.Int(0), + VerifyPeerCertificate: common.Bool(false), + }, + IpVersion: GenericIpVersion(client.GenericIPv4), + }, + }, + IsPreserveSource: common.Bool(false), + NetworkSecurityGroupIds: []string{}, + SourceCIDRs: []string{"0.0.0.0/0"}, + Ports: map[string]portSpec{ + "TCP-443": { + ListenerPort: 443, + HealthCheckerPort: 10256, + }, + }, + securityListManager: newSecurityListManagerNOOP(), + SSLConfig: &SSLConfig{ + Ports: sets.NewInt(443), + ListenerSSLSecretName: listenerSecret, + BackendSetSSLSecretName: backendSecret, + }, + ManagedNetworkSecurityGroup: &ManagedNetworkSecurityGroup{frontendNsgId: "", backendNsgId: []string{}, nsgRuleManagementMode: ManagementModeNone}, + IpVersions: &IpVersions{ + IpFamilies: []string{IPv4}, + IpFamilyPolicy: common.String(string(v1.IPFamilyPolicySingleStack)), + LbEndpointIpVersion: GenericIpVersion(client.GenericIPv4), + ListenerBackendIpVersion: []client.GenericIpVersion{client.GenericIPv4}, + }, + nodes: []*v1.Node{ + { + TypeMeta: metav1.TypeMeta{}, + ObjectMeta: metav1.ObjectMeta{}, + Spec: v1.NodeSpec{ + ProviderID: testNodeString, + }, + Status: v1.NodeStatus{ + Capacity: nil, + Allocatable: nil, + Phase: "", + Conditions: nil, + Addresses: []v1.NodeAddress{ + { + Address: "0.0.0.0", + Type: "InternalIP", + }, + }, + DaemonEndpoints: v1.NodeDaemonEndpoints{}, + NodeInfo: v1.NodeSystemInfo{}, + Images: nil, + VolumesInUse: nil, + VolumesAttached: nil, + Config: nil, + }, + }, + }, + }, + sslConfig: &SSLConfig{ + Ports: sets.NewInt(443), + ListenerSSLSecretName: listenerSecret, + BackendSetSSLSecretName: backendSecret, + }, + }, } cp := &CloudProvider{ @@ -4577,7 +4733,7 @@ func TestNewLBSpecSuccess(t *testing.T) { if !reflect.DeepEqual(result, tc.expected) { results, _ := json.Marshal(result) expected, _ := json.Marshal(tc.expected) - t.Errorf("Expected load balancer spec failed want: %s \n got: %s \n", expected, results) + t.Errorf("Expected load balancer spec failed\nExpected: %s\nResults: %s\n", expected, results) } }) } @@ -8291,6 +8447,132 @@ func Test_getListeners(t *testing.T) { }, }, }, + { + name: "grpc protocol no ssl", + service: &v1.Service{ + Spec: v1.ServiceSpec{ + Ports: []v1.ServicePort{ + { + Protocol: v1.Protocol("GRPC"), + Port: int32(80), + }, + }, + }, + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{}, + }, + }, + sslConfig: nil, + listenerBackendIpVersion: []string{IPv4}, + want: nil, + }, + { + name: "grpc protocol with ssl configuration and smart default cipher suite", + service: &v1.Service{ + Spec: v1.ServiceSpec{ + Ports: []v1.ServicePort{ + { + Protocol: v1.Protocol("TCP"), + Port: int32(443), + }, + }, + }, + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + ServiceAnnotationLoadBalancerBEProtocol: ProtocolGrpc, + }, + }, + }, + listenerBackendIpVersion: []string{IPv4}, + sslConfig: &SSLConfig{ + Ports: sets.NewInt(443), + ListenerSSLSecretName: listenerSecret, + BackendSetSSLSecretName: backendSecret, + }, + want: map[string]client.GenericListener{ + "GRPC-443": { + Name: common.String("GRPC-443"), + Port: common.Int(443), + Protocol: common.String("GRPC"), + DefaultBackendSetName: common.String("TCP-443"), + SslConfiguration: &client.GenericSslConfigurationDetails{ + CertificateName: &listenerSecret, + VerifyDepth: common.Int(0), + VerifyPeerCertificate: common.Bool(false), + CipherSuiteName: common.String(DefaultCipherSuiteForGRPC), + }, + }, + }, + }, + { + name: "grpc protocol with ssl configuration and cipher suite", + service: &v1.Service{ + Spec: v1.ServiceSpec{ + Ports: []v1.ServicePort{ + { + Protocol: v1.Protocol("TCP"), + Port: int32(443), + }, + }, + }, + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + ServiceAnnotationLoadBalancerBEProtocol: ProtocolGrpc, + ServiceAnnotationLoadbalancerListenerSSLConfig: `{"cipherSuiteName":"oci-default-http2-ssl-cipher-suite-v1", "protocols":["TLSv1.2"]}`, + ServiceAnnotationLoadBalancerSSLPorts: "443", + }, + }, + }, + listenerBackendIpVersion: []string{IPv4}, + sslConfig: &SSLConfig{ + Ports: sets.NewInt(443), + ListenerSSLSecretName: listenerSecret, + BackendSetSSLSecretName: backendSecret, + }, + want: map[string]client.GenericListener{ + "GRPC-443": { + Name: common.String("GRPC-443"), + Port: common.Int(443), + Protocol: common.String("GRPC"), + DefaultBackendSetName: common.String("TCP-443"), + SslConfiguration: &client.GenericSslConfigurationDetails{ + CertificateName: &listenerSecret, + VerifyDepth: common.Int(0), + VerifyPeerCertificate: common.Bool(false), + CipherSuiteName: common.String("oci-default-http2-ssl-cipher-suite-v1"), + Protocols: []string{"TLSv1.2"}, + }, + }, + }, + }, + { + name: "Listeners with cipher suites", + service: &v1.Service{ + Spec: v1.ServiceSpec{ + Ports: []v1.ServicePort{ + { + Protocol: v1.ProtocolTCP, + Port: int32(80), + }, + }, + }, + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + ServiceAnnotationLoadbalancerListenerSSLConfig: `{"cipherSuiteName":"oci-default-http2-ssl-cipher-suite-v1", "protocols":["TLSv1.2"]}`, + }, + }, + }, + listenerBackendIpVersion: []string{IPv4}, + sslConfig: nil, + want: map[string]client.GenericListener{ + "TCP-80": { + Name: common.String("TCP-80"), + Port: common.Int(80), + Protocol: common.String("TCP"), + DefaultBackendSetName: common.String("TCP-80"), + }, + }, + }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { @@ -11576,3 +11858,74 @@ func Test_getLoadBalancerSourceRanges(t *testing.T) { }) } } + +func TestIsSkipPrivateIP_NLB(t *testing.T) { + tests := []struct { + name string + svcAnnotations map[string]string + expected bool + wantErr bool + }{ + { + name: "skip-private-ip-enabled", + svcAnnotations: map[string]string{ + ServiceAnnotationLoadBalancerType: NLB, + ServiceAnnotationNetworkLoadBalancerExternalIpOnly: "true", + }, + expected: true, + wantErr: false, + }, + { + name: "skip-private-ip-disabled", + svcAnnotations: map[string]string{ + ServiceAnnotationLoadBalancerType: NLB, + ServiceAnnotationNetworkLoadBalancerExternalIpOnly: "false", + }, + expected: false, + wantErr: false, + }, + { + name: "skip-private-ip-invalid-value", + svcAnnotations: map[string]string{ + ServiceAnnotationLoadBalancerType: NLB, + ServiceAnnotationNetworkLoadBalancerExternalIpOnly: "invalid", + }, + expected: false, + wantErr: true, + }, + { + name: "skip-private-ip with internal loadbalancer", + svcAnnotations: map[string]string{ + ServiceAnnotationLoadBalancerType: NLB, + ServiceAnnotationNetworkLoadBalancerInternal: "true", + ServiceAnnotationNetworkLoadBalancerExternalIpOnly: "true", + }, + expected: false, + wantErr: false, + }, + { + name: "no-skip-private-ip-annotation", + svcAnnotations: map[string]string{}, + expected: false, + wantErr: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + svc := &v1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: tt.svcAnnotations, + }, + } + got, err := isSkipPrivateIP(svc) + if (err != nil) != tt.wantErr { + t.Errorf("isSkipPrivateIP() error = %v, wantErr %v", err, tt.wantErr) + return + } + if got != tt.expected { + t.Errorf("isSkipPrivateIP() = %v, expected %v", got, tt.expected) + } + }) + } +} diff --git a/pkg/cloudprovider/providers/oci/load_balancer_test.go b/pkg/cloudprovider/providers/oci/load_balancer_test.go index f6a1bd0590..c917d33859 100644 --- a/pkg/cloudprovider/providers/oci/load_balancer_test.go +++ b/pkg/cloudprovider/providers/oci/load_balancer_test.go @@ -1359,7 +1359,7 @@ func Test_getGoadBalancerStatus(t *testing.T) { } for name, test := range tests { t.Run(name, func(t *testing.T) { - actual, err := loadBalancerToStatus(test.lb, test.setIpMode) + actual, err := loadBalancerToStatus(test.lb, test.setIpMode, false) if !assertError(err, test.wantErr) { t.Errorf("Expected error = %v, but got %v", test.wantErr, err) return @@ -2249,3 +2249,132 @@ func Test_getOciIpVersions(t *testing.T) { }) } } + +func TestLoadBalancerToStatus(t *testing.T) { + type testCase struct { + name string + lb *client.GenericLoadBalancer + ipMode v1.LoadBalancerIPMode + skipPrivateIp bool + expectedOutput *v1.LoadBalancerStatus + expectedError error + } + + testCases := []testCase{ + { + name: "No IP Addresses", + lb: &client.GenericLoadBalancer{ + DisplayName: common.String("test-lb"), + IpAddresses: []client.GenericIpAddress{}, + }, + ipMode: v1.LoadBalancerIPModeVIP, + skipPrivateIp: false, + expectedOutput: nil, + expectedError: errors1.Errorf("no ip addresses found for load balancer \"test-lb\""), + }, + { + name: "Single Public IP Address", + lb: &client.GenericLoadBalancer{ + DisplayName: common.String("test-lb"), + IpAddresses: []client.GenericIpAddress{ + { + IpAddress: common.String("192.168.1.100"), + IsPublic: common.Bool(true), + }, + }, + }, + ipMode: v1.LoadBalancerIPModeVIP, + skipPrivateIp: false, + expectedOutput: &v1.LoadBalancerStatus{ + Ingress: []v1.LoadBalancerIngress{ + { + IP: "192.168.1.100", + }, + }, + }, + expectedError: nil, + }, + { + name: "Multiple IP Addresses", + lb: &client.GenericLoadBalancer{ + DisplayName: common.String("test-lb"), + IpAddresses: []client.GenericIpAddress{ + { + IpAddress: common.String("192.168.1.100"), + IsPublic: common.Bool(true), + }, + { + IpAddress: common.String("10.0.0.100"), + IsPublic: common.Bool(false), + }, + }, + }, + ipMode: v1.LoadBalancerIPModeVIP, + skipPrivateIp: false, + expectedOutput: &v1.LoadBalancerStatus{ + Ingress: []v1.LoadBalancerIngress{ + { + IP: "192.168.1.100", + }, + { + IP: "10.0.0.100", + }, + }, + }, + expectedError: nil, + }, + { + name: "Skip Private IP", + lb: &client.GenericLoadBalancer{ + DisplayName: common.String("test-lb"), + IpAddresses: []client.GenericIpAddress{ + { + IpAddress: common.String("192.168.1.100"), + IsPublic: common.Bool(true), + }, + { + IpAddress: common.String("10.0.0.100"), + IsPublic: common.Bool(false), + }, + }, + }, + ipMode: v1.LoadBalancerIPModeVIP, + skipPrivateIp: true, + expectedOutput: &v1.LoadBalancerStatus{ + Ingress: []v1.LoadBalancerIngress{ + { + IP: "192.168.1.100", + }, + }, + }, + expectedError: nil, + }, + { + name: "Nil IpAddress", + lb: &client.GenericLoadBalancer{ + DisplayName: common.String("test-lb"), + IpAddresses: []client.GenericIpAddress{ + { + IpAddress: nil, + }, + }, + }, + ipMode: v1.LoadBalancerIPModeVIP, + skipPrivateIp: false, + expectedOutput: &v1.LoadBalancerStatus{ + Ingress: []v1.LoadBalancerIngress{}, + }, + expectedError: nil, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + actualOutput, actualError := loadBalancerToStatus(tc.lb, &tc.ipMode, tc.skipPrivateIp) + reflect.DeepEqual(tc.expectedOutput, actualOutput) + if tc.expectedError != nil { + reflect.DeepEqual(tc.expectedError.Error(), actualError.Error()) + } + }) + } +} diff --git a/pkg/cloudprovider/providers/oci/load_balancer_util.go b/pkg/cloudprovider/providers/oci/load_balancer_util.go index 6f4f985e54..7fb5cf56cf 100644 --- a/pkg/cloudprovider/providers/oci/load_balancer_util.go +++ b/pkg/cloudprovider/providers/oci/load_balancer_util.go @@ -440,11 +440,14 @@ func getSSLConfigurationChanges(actual *client.GenericSslConfigurationDetails, d if toBool(actual.VerifyPeerCertificate) != toBool(desired.VerifyPeerCertificate) { sslConfigurationChanges = append(sslConfigurationChanges, fmt.Sprintf(changeFmtStr, "Listener:SSLConfiguration:VerifyPeerCertificate", toBool(actual.VerifyPeerCertificate), toBool(desired.VerifyPeerCertificate))) } - if toString(actual.CipherSuiteName) != toString(desired.CipherSuiteName) { - sslConfigurationChanges = append(sslConfigurationChanges, fmt.Sprintf(changeFmtStr, "Listener:SSLConfiguration:CipherSuiteName", toString(actual.CipherSuiteName), toString(desired.CipherSuiteName))) - } - if !reflect.DeepEqual(actual.Protocols, desired.Protocols) { - sslConfigurationChanges = append(sslConfigurationChanges, fmt.Sprintf(changeFmtStr, "Listener:SSLConfiguration:Protocols", strings.Join(actual.Protocols, ","), strings.Join(desired.Protocols, ","))) + + if desired.CipherSuiteName != nil && len(*desired.CipherSuiteName) != 0 { + if toString(actual.CipherSuiteName) != toString(desired.CipherSuiteName) { + sslConfigurationChanges = append(sslConfigurationChanges, fmt.Sprintf(changeFmtStr, "Listener:SSLConfiguration:CipherSuiteName", toString(actual.CipherSuiteName), toString(desired.CipherSuiteName))) + } + if !reflect.DeepEqual(actual.Protocols, desired.Protocols) { + sslConfigurationChanges = append(sslConfigurationChanges, fmt.Sprintf(changeFmtStr, "Listener:SSLConfiguration:Protocols", strings.Join(actual.Protocols, ","), strings.Join(desired.Protocols, ","))) + } } return sslConfigurationChanges diff --git a/pkg/cloudprovider/providers/oci/load_balancer_util_test.go b/pkg/cloudprovider/providers/oci/load_balancer_util_test.go index 16a1cf86ba..c317668c67 100644 --- a/pkg/cloudprovider/providers/oci/load_balancer_util_test.go +++ b/pkg/cloudprovider/providers/oci/load_balancer_util_test.go @@ -1555,6 +1555,22 @@ func TestHasListenerChanged(t *testing.T) { expected: false, }, + { + name: "GRPC protocol", + desired: client.GenericListener{ + Name: common.String("TCP-443"), + DefaultBackendSetName: common.String("TCP-443"), + Port: common.Int(443), + Protocol: common.String("TCP"), + }, + actual: client.GenericListener{ + Name: common.String("GRPC-443"), + DefaultBackendSetName: common.String("TCP-443"), + Port: common.Int(443), + Protocol: common.String("GRPC"), + }, + expected: true, + }, } for _, tt := range testCases { @@ -2139,9 +2155,12 @@ func TestGetSSLConfigurationChanges(t *testing.T) { { name: "Protocol Changed", desired: client.GenericSslConfigurationDetails{ - Protocols: []string{"TLSv1.2"}, + CipherSuiteName: common.String("value"), + Protocols: []string{"TLSv1.2"}, + }, + actual: client.GenericSslConfigurationDetails{ + CipherSuiteName: common.String("value"), }, - actual: client.GenericSslConfigurationDetails{}, expected: []string{ fmt.Sprintf(changeFmtStr, "Listener:SSLConfiguration:Protocols", "", "TLSv1.2"), }, @@ -2149,15 +2168,41 @@ func TestGetSSLConfigurationChanges(t *testing.T) { { name: "TLS Protocol Changed", desired: client.GenericSslConfigurationDetails{ - Protocols: []string{"TLSv1.1", "TLSv1.2"}, + CipherSuiteName: common.String("value"), + Protocols: []string{"TLSv1.1", "TLSv1.2"}, }, actual: client.GenericSslConfigurationDetails{ - Protocols: []string{"TLSv1.1", "TLSv1.2", "TLSv1.3"}, + CipherSuiteName: common.String("value"), + Protocols: []string{"TLSv1.1", "TLSv1.2", "TLSv1.3"}, }, expected: []string{ fmt.Sprintf(changeFmtStr, "Listener:SSLConfiguration:Protocols", "", "TLSv1.2"), }, }, + { + name: "Empty ciphersuite test", + desired: client.GenericSslConfigurationDetails{ + CipherSuiteName: common.String(""), + Protocols: []string{"TLSv1.1", "TLSv1.2"}, + }, + actual: client.GenericSslConfigurationDetails{ + CipherSuiteName: common.String("value"), + Protocols: []string{"TLSv1.1", "TLSv1.2", "TLSv1.3"}, + }, + expected: []string{}, + }, + { + name: "Default scenario, nil value", + desired: client.GenericSslConfigurationDetails{ + CipherSuiteName: nil, + Protocols: []string{"TLSv1.1", "TLSv1.2"}, + }, + actual: client.GenericSslConfigurationDetails{ + CipherSuiteName: common.String("value"), + Protocols: []string{"TLSv1.1", "TLSv1.2", "TLSv1.3"}, + }, + expected: []string{}, + }, } for _, tt := range testCases { diff --git a/pkg/cloudprovider/providers/oci/node_info_controller.go b/pkg/cloudprovider/providers/oci/node_info_controller.go index 1c64698585..f28e83dfb5 100644 --- a/pkg/cloudprovider/providers/oci/node_info_controller.go +++ b/pkg/cloudprovider/providers/oci/node_info_controller.go @@ -17,6 +17,7 @@ package oci import ( "context" "fmt" + "strings" "time" "go.uber.org/zap" @@ -43,6 +44,7 @@ import ( const ( FaultDomainLabel = "oci.oraclecloud.com/fault-domain" CompartmentIDAnnotation = "oci.oraclecloud.com/compartment-id" + AvailabilityDomainLabel = "csi-ipv6-full-ad-name" timeout = 10 * time.Second ) @@ -198,10 +200,14 @@ func getNodePatchBytes(cacheNode *v1.Node, instance *core.Instance, logger *zap. return nil } _, isFaultDomainLabelPresent := cacheNode.ObjectMeta.Labels[FaultDomainLabel] + _, isAvailabilityDomainLabelPresent := cacheNode.ObjectMeta.Labels[AvailabilityDomainLabel] _, isCompartmentIDAnnotationPresent := cacheNode.ObjectMeta.Annotations[CompartmentIDAnnotation] + //labels only allow ., -, _ special characters + availabilityDomainLabelValue := strings.ReplaceAll(*instance.AvailabilityDomain, ":", ".") + var nodePatchBytes []byte - if isFaultDomainLabelPresent { + if isFaultDomainLabelPresent && (!client.IsIpv6SingleStackCluster() || isAvailabilityDomainLabelPresent) { //In this case CompartmentIDAnnotation not present but FaultDomainLabel present logger.Infof("Adding node annotation from cloud provider: %s=%s", CompartmentIDAnnotation, *instance.CompartmentId) nodePatchBytes = []byte(fmt.Sprintf("{\"metadata\": {\"annotations\": {\"%s\":\"%s\"}}}", @@ -209,13 +215,27 @@ func getNodePatchBytes(cacheNode *v1.Node, instance *core.Instance, logger *zap. } else if isCompartmentIDAnnotationPresent { //In this case FaultDomainLabel not present but CompartmentIDAnnotation present logger.Infof("Adding node label from cloud provider: %s=%s", FaultDomainLabel, *instance.FaultDomain) - nodePatchBytes = []byte(fmt.Sprintf("{\"metadata\": {\"labels\": {\"%s\":\"%s\"}}}", FaultDomainLabel, *instance.FaultDomain)) + if client.IsIpv6SingleStackCluster() { + logger.Infof("Adding node label from cloud provider: %s=%s", AvailabilityDomainLabel, availabilityDomainLabelValue) + nodePatchBytes = []byte(fmt.Sprintf("{\"metadata\": {\"labels\": {\"%s\":\"%s\",\"%s\":\"%s\"}}}", FaultDomainLabel, *instance.FaultDomain, + AvailabilityDomainLabel, availabilityDomainLabelValue)) + } else { + nodePatchBytes = []byte(fmt.Sprintf("{\"metadata\": {\"labels\": {\"%s\":\"%s\"}}}", FaultDomainLabel, *instance.FaultDomain)) + } + } else { //In this case none of FaultDomainLabel or CompartmentIDAnnotation present logger.Infof("Adding node label from cloud provider: %s=%s", FaultDomainLabel, *instance.FaultDomain) logger.Infof("Adding node annotation from cloud provider: %s=%s", CompartmentIDAnnotation, *instance.CompartmentId) - nodePatchBytes = []byte(fmt.Sprintf("{\"metadata\": {\"labels\": {\"%s\":\"%s\"},\"annotations\": {\"%s\":\"%s\"}}}", - FaultDomainLabel, *instance.FaultDomain, CompartmentIDAnnotation, *instance.CompartmentId)) + + if client.IsIpv6SingleStackCluster() { + logger.Infof("Adding node label from cloud provider: %s=%s", AvailabilityDomainLabel, availabilityDomainLabelValue) + nodePatchBytes = []byte(fmt.Sprintf("{\"metadata\": {\"labels\": {\"%s\":\"%s\",\"%s\":\"%s\"},\"annotations\": {\"%s\":\"%s\"}}}", + FaultDomainLabel, *instance.FaultDomain, AvailabilityDomainLabel, availabilityDomainLabelValue, CompartmentIDAnnotation, *instance.CompartmentId)) + } else { + nodePatchBytes = []byte(fmt.Sprintf("{\"metadata\": {\"labels\": {\"%s\":\"%s\"},\"annotations\": {\"%s\":\"%s\"}}}", + FaultDomainLabel, *instance.FaultDomain, CompartmentIDAnnotation, *instance.CompartmentId)) + } } return nodePatchBytes } @@ -249,8 +269,9 @@ func getInstanceByNode(cacheNode *v1.Node, nic *NodeInfoController, logger *zap. func validateNodeHasRequiredLabels(node *v1.Node) bool { _, isFaultDomainLabelPresent := node.ObjectMeta.Labels[FaultDomainLabel] + _, isAvilabilityDomainNameLabelPresent := node.ObjectMeta.Labels[AvailabilityDomainLabel] _, isCompartmentIDAnnotationPresent := node.ObjectMeta.Annotations[CompartmentIDAnnotation] - if isFaultDomainLabelPresent && isCompartmentIDAnnotationPresent { + if isFaultDomainLabelPresent && isCompartmentIDAnnotationPresent && (!client.IsIpv6SingleStackCluster() || isAvilabilityDomainNameLabelPresent) { return true } return false diff --git a/pkg/cloudprovider/providers/oci/node_info_controller_test.go b/pkg/cloudprovider/providers/oci/node_info_controller_test.go index fde29ebf9f..a89d4e7fa0 100644 --- a/pkg/cloudprovider/providers/oci/node_info_controller_test.go +++ b/pkg/cloudprovider/providers/oci/node_info_controller_test.go @@ -15,6 +15,7 @@ package oci import ( + "os" "reflect" "testing" @@ -29,6 +30,7 @@ import ( var ( instanceCompID = "instanceCompID" instanceFD = "instanceFD" + instanceAD = "prefix:instanceAD" instanceID = "ocid1.instanceID" ) @@ -36,6 +38,7 @@ func TestGetNodePatchBytes(t *testing.T) { testCases := map[string]struct { node *v1.Node instance *core.Instance + clusterIpFamily string expectedPatchBytes []byte }{ "FD label and CompartmentID annotation already present": { @@ -52,24 +55,49 @@ func TestGetNodePatchBytes(t *testing.T) { instance: &core.Instance{ CompartmentId: &instanceCompID, FaultDomain: &instanceFD, + AvailabilityDomain: &instanceAD, }, expectedPatchBytes: nil, + clusterIpFamily: "IPv4", }, - "Only FD label present": { + "FD label, AD label and CompartmentID annotation already present for IPv6": { + node: &v1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + CompartmentIDAnnotation: "compID", + }, + Labels: map[string]string{ + FaultDomainLabel: "FD", + AvailabilityDomainLabel: "AD", + }, + }, + }, + instance: &core.Instance{ + CompartmentId: &instanceCompID, + FaultDomain: &instanceFD, + AvailabilityDomain: &instanceAD, + }, + expectedPatchBytes: nil, + clusterIpFamily: "IPv6", + }, + "Only FD label and AD label present": { node: &v1.Node{ ObjectMeta: metav1.ObjectMeta{ Labels: map[string]string{ FaultDomainLabel: "FD", + AvailabilityDomainLabel: "AD", }, }, }, instance: &core.Instance{ CompartmentId: &instanceCompID, FaultDomain: &instanceFD, + AvailabilityDomain: &instanceAD, }, + clusterIpFamily: "IPv4", expectedPatchBytes: []byte("{\"metadata\": {\"annotations\": {\"oci.oraclecloud.com/compartment-id\":\"instanceCompID\"}}}"), }, - "Only CompartmentID annotation present": { + "Only CompartmentID annotation present Ipv4": { node: &v1.Node{ ObjectMeta: metav1.ObjectMeta{ Annotations: map[string]string{ @@ -80,23 +108,122 @@ func TestGetNodePatchBytes(t *testing.T) { instance: &core.Instance{ CompartmentId: &instanceCompID, FaultDomain: &instanceFD, + AvailabilityDomain: &instanceAD, }, + clusterIpFamily: "IPv4", expectedPatchBytes: []byte("{\"metadata\": {\"labels\": {\"oci.oraclecloud.com/fault-domain\":\"instanceFD\"}}}"), }, - "none present": { + "Only CompartmentID annotation present Ipv6": { + node: &v1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + CompartmentIDAnnotation: "compID", + }, + }, + }, + instance: &core.Instance{ + CompartmentId: &instanceCompID, + FaultDomain: &instanceFD, + AvailabilityDomain: &instanceAD, + }, + clusterIpFamily: "IPv6", + expectedPatchBytes: []byte("{\"metadata\": {\"labels\": {\"oci.oraclecloud.com/fault-domain\":\"instanceFD\",\"csi-ipv6-full-ad-name\":\"prefix.instanceAD\"}}}"), + }, + "Only FD label is present IPv4 dual stack": { + node: &v1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + FaultDomainLabel: "FD", + }, + }, + }, + instance: &core.Instance{ + CompartmentId: &instanceCompID, + FaultDomain: &instanceFD, + AvailabilityDomain: &instanceAD, + }, + clusterIpFamily: "IPv4,IPv6", + expectedPatchBytes: []byte("{\"metadata\": {\"annotations\": {\"oci.oraclecloud.com/compartment-id\":\"instanceCompID\"}}}"), + }, + "Only FD label is present IPv6": { + node: &v1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + FaultDomainLabel: "FD", + }, + }, + }, + instance: &core.Instance{ + CompartmentId: &instanceCompID, + FaultDomain: &instanceFD, + AvailabilityDomain: &instanceAD, + }, + clusterIpFamily: "IPv6", + expectedPatchBytes: []byte("{\"metadata\": {\"labels\": {\"oci.oraclecloud.com/fault-domain\":\"instanceFD\",\"csi-ipv6-full-ad-name\":\"prefix.instanceAD\"},\"annotations\": {\"oci.oraclecloud.com/compartment-id\":\"instanceCompID\"}}}"), + }, + "Only AD label present Ipv4": { + node: &v1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + AvailabilityDomainLabel: "AD", + }, + }, + }, + instance: &core.Instance{ + CompartmentId: &instanceCompID, + FaultDomain: &instanceFD, + AvailabilityDomain: &instanceAD, + }, + clusterIpFamily: "IPv4", + expectedPatchBytes: []byte("{\"metadata\": {\"labels\": {\"oci.oraclecloud.com/fault-domain\":\"instanceFD\"},\"annotations\": {\"oci.oraclecloud.com/compartment-id\":\"instanceCompID\"}}}"), + }, + "Only AD label present Ipv6": { + node: &v1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + AvailabilityDomainLabel: "AD", + }, + }, + }, + instance: &core.Instance{ + CompartmentId: &instanceCompID, + FaultDomain: &instanceFD, + AvailabilityDomain: &instanceAD, + }, + clusterIpFamily: "IPv6", + expectedPatchBytes: []byte("{\"metadata\": {\"labels\": {\"oci.oraclecloud.com/fault-domain\":\"instanceFD\",\"csi-ipv6-full-ad-name\":\"prefix.instanceAD\"},\"annotations\": {\"oci.oraclecloud.com/compartment-id\":\"instanceCompID\"}}}"), + }, + "none present Ipv4": { node: &v1.Node{ ObjectMeta: metav1.ObjectMeta{}, }, instance: &core.Instance{ CompartmentId: &instanceCompID, FaultDomain: &instanceFD, + AvailabilityDomain: &instanceAD, + }, + clusterIpFamily: "IPv4", expectedPatchBytes: []byte("{\"metadata\": {\"labels\": {\"oci.oraclecloud.com/fault-domain\":\"instanceFD\"},\"annotations\": {\"oci.oraclecloud.com/compartment-id\":\"instanceCompID\"}}}"), }, + "none present Ipv6": { + node: &v1.Node{ + ObjectMeta: metav1.ObjectMeta{}, + }, + instance: &core.Instance{ + CompartmentId: &instanceCompID, + FaultDomain: &instanceFD, + AvailabilityDomain: &instanceAD, + + }, + clusterIpFamily: "IPv6", + expectedPatchBytes: []byte("{\"metadata\": {\"labels\": {\"oci.oraclecloud.com/fault-domain\":\"instanceFD\",\"csi-ipv6-full-ad-name\":\"prefix.instanceAD\"},\"annotations\": {\"oci.oraclecloud.com/compartment-id\":\"instanceCompID\"}}}"), + }, } logger := zap.L() for name, tc := range testCases { t.Run(name, func(t *testing.T) { + os.Setenv("CLUSTER_IP_FAMILY",tc.clusterIpFamily) patchedBytes := getNodePatchBytes(tc.node, tc.instance, logger.Sugar()) if !reflect.DeepEqual(patchedBytes, tc.expectedPatchBytes) { t.Errorf("Expected PatchBytes \n%+v\nbut got\n%+v", tc.expectedPatchBytes, patchedBytes) diff --git a/pkg/csi-util/utils.go b/pkg/csi-util/utils.go index 32f13b4d2b..ced31b6d53 100644 --- a/pkg/csi-util/utils.go +++ b/pkg/csi-util/utils.go @@ -82,6 +82,9 @@ const ( // For Raw Block Volumes, the name of the bind-mounted file inside StagingTargetPath RawBlockStagingFile = "mountfile" + + AvailabilityDomainLabel = "csi-ipv6-full-ad-name" + ) // Util interface @@ -120,11 +123,11 @@ func (u *Util) LookupNodeID(k kubernetes.Interface, nodeName string) (string, er return n.Spec.ProviderID, nil } -func (u *Util) LookupNodeAvailableDomain(k kubernetes.Interface, nodeID string) (string, error) { +func (u *Util) LookupNodeAvailableDomain(k kubernetes.Interface, nodeID string) (string, string, error) { n, err := k.CoreV1().Nodes().Get(context.Background(), nodeID, metav1.GetOptions{}) if err != nil { u.Logger.With(zap.Error(err)).With("nodeId", nodeID).Error("Failed to get Node by name.") - return "", fmt.Errorf("failed to get node %s", nodeID) + return "", "",fmt.Errorf("failed to get node %s", nodeID) } if n.Labels != nil { ad, ok := n.Labels[kubeAPI.LabelTopologyZone] @@ -132,12 +135,13 @@ func (u *Util) LookupNodeAvailableDomain(k kubernetes.Interface, nodeID string) ad, ok = n.Labels[kubeAPI.LabelZoneFailureDomain] } if ok { - return ad, nil + fullAdName, _ := n.Labels[AvailabilityDomainLabel] + return ad, fullAdName, nil } } errMsg := fmt.Sprintf("Did not find the label for the fault domain. Checked Topology Labels: %s, %s", kubeAPI.LabelTopologyZone, kubeAPI.LabelZoneFailureDomain) u.Logger.With("nodeId", nodeID).Error(errMsg) - return "", fmt.Errorf(errMsg) + return "","", fmt.Errorf(errMsg) } // waitForPathToExist waits for for a given filesystem path to exist. @@ -591,3 +595,10 @@ func IsDualStackSubnet(subnet *core.Subnet) bool { func IsValidIpFamilyPresentInClusterIpFamily(clusterIpFamily string) bool { return len(clusterIpFamily) > 0 && (strings.Contains(clusterIpFamily, Ipv4Stack) || strings.Contains(clusterIpFamily, Ipv6Stack)) } + +func IsIpv6SingleStackNode(nodeIpFamily *NodeIpFamily) bool { + if nodeIpFamily == nil { + return false + } + return nodeIpFamily.Ipv6Enabled == true && nodeIpFamily.Ipv4Enabled == false +} diff --git a/pkg/csi/driver/bv_controller.go b/pkg/csi/driver/bv_controller.go index 9d8d4a3b9f..272cbb68e3 100644 --- a/pkg/csi/driver/bv_controller.go +++ b/pkg/csi/driver/bv_controller.go @@ -243,6 +243,7 @@ func (d *BlockVolumeControllerDriver) CreateVolume(ctx context.Context, req *csi } availableDomainShortName := "" + fullAvailabilityDomainName := "" volumeName := req.Name dimensionsMap := make(map[string]string) @@ -331,6 +332,9 @@ func (d *BlockVolumeControllerDriver) CreateVolume(ctx context.Context, req *csi } srcVolumeId = id + if client.IsIpv6SingleStackCluster() { + fullAvailabilityDomainName = availableDomainShortName + } } } @@ -342,6 +346,13 @@ func (d *BlockVolumeControllerDriver) CreateVolume(ctx context.Context, req *csi availableDomainShortName, _ = t.Segments[kubeAPI.LabelZoneFailureDomain] } log.With("AD", availableDomainShortName).Info("Using preferred topology for AD.") + + fullAvailabilityDomainName, ok = t.Segments[csi_util.AvailabilityDomainLabel] + if ok { + fullAvailabilityDomainName = strings.ReplaceAll(fullAvailabilityDomainName, ".", ":") + log.With("fullAvailabilityDomainName", fullAvailabilityDomainName).Info("Identified full ad name from topology keys") + } + if len(availableDomainShortName) > 0 { break } @@ -350,6 +361,7 @@ func (d *BlockVolumeControllerDriver) CreateVolume(ctx context.Context, req *csi if availableDomainShortName == "" { if req.AccessibilityRequirements != nil && req.AccessibilityRequirements.Requisite != nil { + for _, t := range req.AccessibilityRequirements.Requisite { var ok bool availableDomainShortName, ok = t.Segments[kubeAPI.LabelTopologyZone] @@ -357,6 +369,13 @@ func (d *BlockVolumeControllerDriver) CreateVolume(ctx context.Context, req *csi availableDomainShortName, _ = t.Segments[kubeAPI.LabelZoneFailureDomain] } log.With("AD", availableDomainShortName).Info("Using requisite topology for AD.") + + fullAvailabilityDomainName, ok = t.Segments[csi_util.AvailabilityDomainLabel] + if ok { + fullAvailabilityDomainName = strings.ReplaceAll(fullAvailabilityDomainName, ".", ":") + log.With("fullAvailabilityDomainName", fullAvailabilityDomainName).Info("Identified full ad name from topology keys") + } + if len(availableDomainShortName) > 0 { break } @@ -413,24 +432,29 @@ func (d *BlockVolumeControllerDriver) CreateVolume(ctx context.Context, req *csi } else { // Creating new volume - ad, err := d.client.Identity(nil).GetAvailabilityDomainByName(ctx, d.config.CompartmentID, availableDomainShortName) - if err != nil { - log.With("Compartment Id", d.config.CompartmentID, "service", "identity", "verb", "get", "resource", "AD", "statusCode", util.GetHttpStatusCode(err)). - With(zap.Error(err)).Error("Failed to get available domain.") - errorType = util.GetError(err) - metricDimension = util.GetMetricDimensionForComponent(errorType, metricType) - dimensionsMap[metrics.ComponentDimension] = metricDimension - metrics.SendMetricData(d.metricPusher, metric, time.Since(startTime).Seconds(), dimensionsMap) - return nil, status.Errorf(codes.InvalidArgument, "invalid available domain: %s or compartment ID: %s", availableDomainShortName, d.config.CompartmentID) + if !client.IsIpv6SingleStackCluster() { + ad, err := d.client.Identity(nil).GetAvailabilityDomainByName(ctx, d.config.CompartmentID, availableDomainShortName) + if err != nil { + log.With("Compartment Id", d.config.CompartmentID, + "service", "identity", "verb", "get", "resource", "AD", "statusCode", util.GetHttpStatusCode(err)). + With(zap.Error(err)).Error("Failed to get available domain.") + errorType = util.GetError(err) + metricDimension = util.GetMetricDimensionForComponent(errorType, metricType) + dimensionsMap[metrics.ComponentDimension] = metricDimension + metrics.SendMetricData(d.metricPusher, metric, time.Since(startTime).Seconds(), dimensionsMap) + return nil, status.Errorf(codes.InvalidArgument, "invalid available domain: %s or compartment ID: %s", availableDomainShortName, d.config.CompartmentID) + } + fullAvailabilityDomainName = *ad.Name } + bvTags := getBVTags(log, d.config.Tags, volumeParams) - provisionedVolume, err = provision(ctx, log, d.client, volumeName, size, *ad.Name, d.config.CompartmentID, srcSnapshotId, srcVolumeId, + provisionedVolume, err = provision(ctx, log, d.client, volumeName, size, fullAvailabilityDomainName, d.config.CompartmentID, srcSnapshotId, srcVolumeId, volumeParams.diskEncryptionKey, volumeParams.vpusPerGB, bvTags) if err != nil && client.IsSystemTagNotFoundOrNotAuthorisedError(log, errors.Unwrap(err)) { - log.With("Ad name", *ad.Name, "Compartment Id", d.config.CompartmentID).With(zap.Error(err)).Warn("New volume creation failed due to oke system tags error. sending metric & retrying without oke system tags") + log.With("Ad name", fullAvailabilityDomainName, "Compartment Id", d.config.CompartmentID).With(zap.Error(err)).Warn("New volume creation failed due to oke system tags error. sending metric & retrying without oke system tags") errorType = util.SystemTagErrTypePrefix + util.GetError(err) metricDimension = util.GetMetricDimensionForComponent(errorType, metricType) dimensionsMap[metrics.ComponentDimension] = metricDimension @@ -438,11 +462,11 @@ func (d *BlockVolumeControllerDriver) CreateVolume(ctx context.Context, req *csi // retry provision without oke system tags delete(bvTags.DefinedTags, OkeSystemTagNamesapce) - provisionedVolume, err = provision(ctx, log, d.client, volumeName, size, *ad.Name, d.config.CompartmentID, srcSnapshotId, srcVolumeId, + provisionedVolume, err = provision(ctx, log, d.client, volumeName, size, fullAvailabilityDomainName, d.config.CompartmentID, srcSnapshotId, srcVolumeId, volumeParams.diskEncryptionKey, volumeParams.vpusPerGB, bvTags) } if err != nil { - log.With("Ad name", *ad.Name, "Compartment Id", d.config.CompartmentID).With(zap.Error(err)).Error("New volume creation failed.") + log.With("Ad name", fullAvailabilityDomainName, "Compartment Id", d.config.CompartmentID).With(zap.Error(err)).Error("New volume creation failed.") errorType = util.GetError(err) metricDimension = util.GetMetricDimensionForComponent(errorType, metricType) dimensionsMap[metrics.ComponentDimension] = metricDimension diff --git a/pkg/csi/driver/bv_node.go b/pkg/csi/driver/bv_node.go index a2fa5a6f29..8899d0d6b2 100644 --- a/pkg/csi/driver/bv_node.go +++ b/pkg/csi/driver/bv_node.go @@ -774,24 +774,37 @@ func (d BlockVolumeNodeDriver) NodeGetCapabilities(ctx context.Context, req *csi // NodeGetInfo returns the supported capabilities of the node server. // The result of this function will be used by the CO in ControllerPublishVolume. func (d BlockVolumeNodeDriver) NodeGetInfo(ctx context.Context, req *csi.NodeGetInfoRequest) (*csi.NodeGetInfoResponse, error) { - ad, err := d.util.LookupNodeAvailableDomain(d.KubeClient, d.nodeID) + ad, fullAvailabilityDomainName, err := d.util.LookupNodeAvailableDomain(d.KubeClient, d.nodeID) if err != nil { d.logger.With(zap.Error(err)).With("nodeId", d.nodeID, "availabilityDomain", ad).Error("Failed to get availability domain of node from kube api server.") return nil, status.Error(codes.Internal, "Failed to get availability domain of node from kube api server.") } + segments := map[string]string { + kubeAPI.LabelZoneFailureDomain: ad, + kubeAPI.LabelTopologyZone: ad, + } + + //set full ad name in segments only for IPv6 single stack + if csi_util.IsIpv6SingleStackNode(d.nodeIpFamily) { + if fullAvailabilityDomainName == "" { + d.logger.With(zap.Error(err)).With("nodeId", d.nodeID, "fullAvailabilityDomainName", fullAvailabilityDomainName).Error("Failed to get full availability domain name of IPv6 single stack node from node labels.") + return nil, status.Error(codes.Internal, "Failed to get full availability domain name of IPv6 single stack node from node labels.") + } + + segments[csi_util.AvailabilityDomainLabel] = fullAvailabilityDomainName + } + d.logger.With("nodeId", d.nodeID, "availabilityDomain", ad).Info("Availability domain of node identified.") + return &csi.NodeGetInfoResponse{ NodeId: d.nodeID, MaxVolumesPerNode: maxVolumesPerNode, // make sure that the driver works on this particular AD only AccessibleTopology: &csi.Topology{ - Segments: map[string]string{ - kubeAPI.LabelZoneFailureDomain: ad, - kubeAPI.LabelTopologyZone: ad, - }, + Segments: segments, }, }, nil } diff --git a/pkg/csi/driver/fss_controller.go b/pkg/csi/driver/fss_controller.go index 027462dd07..5b08ec3b35 100644 --- a/pkg/csi/driver/fss_controller.go +++ b/pkg/csi/driver/fss_controller.go @@ -625,14 +625,25 @@ func extractStorageClassParameters(ctx context.Context, d *FSSControllerDriver, return log, nil, nil, status.Errorf(codes.InvalidArgument, "AvailabilityDomain not provided in storage class"), true } - ad, err := identityClient.GetAvailabilityDomainByName(ctx, compartmentId, availabilityDomain) - if err != nil { - log.With(zap.Error(err)).Errorf("invalid available domain: %s or compartmentID: %s", availabilityDomain, compartmentId) - dimensionsMap[metrics.ComponentDimension] = util.GetMetricDimensionForComponent(util.GetError(err), util.CSIStorageType) - metrics.SendMetricData(d.metricPusher, metrics.FssAllProvision, time.Since(startTime).Seconds(), dimensionsMap) - return log, nil, nil, status.Errorf(codes.InvalidArgument, "invalid available domain: %s or compartment ID: %s, error: %s", availabilityDomain, compartmentId, err.Error()), true + if client.IsIpv6SingleStackCluster() { + if !strings.Contains(availabilityDomain,":") { + log.Errorf("Full AvailabilityDomain with prefix not provided in storage class for IPv6 single stack cluster.") + dimensionsMap[metrics.ComponentDimension] = util.GetMetricDimensionForComponent(util.ErrValidation, util.CSIStorageType) + metrics.SendMetricData(d.metricPusher, metrics.FssAllProvision, time.Since(startTime).Seconds(), dimensionsMap) + metrics.SendMetricData(d.metricPusher, metrics.MTProvision, time.Since(startTime).Seconds(), dimensionsMap) + return log, nil, nil, status.Errorf(codes.InvalidArgument, "Full AvailabilityDomain with prefix not provided in storage class for IPv6 single stack cluster."), true + + } + } else { + ad, err := identityClient.GetAvailabilityDomainByName(ctx, compartmentId, availabilityDomain) + if err != nil { + log.With(zap.Error(err)).Errorf("invalid available domain: %s or compartmentID: %s", availabilityDomain, compartmentId) + dimensionsMap[metrics.ComponentDimension] = util.GetMetricDimensionForComponent(util.GetError(err), util.CSIStorageType) + metrics.SendMetricData(d.metricPusher, metrics.FssAllProvision, time.Since(startTime).Seconds(), dimensionsMap) + return log, nil, nil, status.Errorf(codes.InvalidArgument, "invalid available domain: %s or compartment ID: %s, error: %s", availabilityDomain, compartmentId, err.Error()), true + } + availabilityDomain = *ad.Name } - availabilityDomain = *ad.Name log = log.With("availabilityDomain", availabilityDomain) storageClassParameters.availabilityDomain = availabilityDomain diff --git a/pkg/csi/driver/fss_controller_test.go b/pkg/csi/driver/fss_controller_test.go index 73c60979c1..4c5cdc31c4 100644 --- a/pkg/csi/driver/fss_controller_test.go +++ b/pkg/csi/driver/fss_controller_test.go @@ -17,6 +17,7 @@ package driver import ( "context" "fmt" + "os" "reflect" "strings" "testing" @@ -449,8 +450,8 @@ func TestFSSControllerDriver_CreateVolume(t *testing.T) { util *csi_util.Util } type args struct { - ctx context.Context - req *csi.CreateVolumeRequest + ctx context.Context + req *csi.CreateVolumeRequest tenancyId string } tests := []struct { @@ -765,8 +766,8 @@ func TestFSSControllerDriver_DeleteVolume(t *testing.T) { util *csi_util.Util } type args struct { - ctx context.Context - req *csi.DeleteVolumeRequest + ctx context.Context + req *csi.DeleteVolumeRequest tenancyId string } tests := []struct { @@ -830,8 +831,8 @@ func TestFSSControllerDriver_DeleteVolume(t *testing.T) { name: "Error while creating fss client", fields: fields{}, args: args{ - ctx: context.Background(), - req: &csi.DeleteVolumeRequest{VolumeId: "oc1.filesystem.xxxx:10.0.10.207:/export-path", Secrets: map[string]string{"serviceAccount": "", "serviceAccountNamespace": "", "parentRptURL": "testurl"}}, + ctx: context.Background(), + req: &csi.DeleteVolumeRequest{VolumeId: "oc1.filesystem.xxxx:10.0.10.207:/export-path", Secrets: map[string]string{"serviceAccount": "", "serviceAccountNamespace": "", "parentRptURL": "testurl"}}, tenancyId: "test2-tenancy", }, want: nil, @@ -867,6 +868,7 @@ func TestExtractStorageClassParameters(t *testing.T) { expectedStorageClassParameters *StorageClassParameters wantErr bool wantErrMessage string + clusterIPFamily string }{ "Extract storage class parameters with mountTargetOcid": { parameters: map[string]string{ @@ -884,8 +886,9 @@ func TestExtractStorageClassParameters(t *testing.T) { encryptInTransit: "false", scTags: &config.TagConfig{}, }, - wantErr: false, - wantErrMessage: "", + clusterIPFamily: "IPv4", + wantErr: false, + wantErrMessage: "", }, "Extract storage class parameters with mountTargetSubnetOcid": { parameters: map[string]string{ @@ -903,8 +906,9 @@ func TestExtractStorageClassParameters(t *testing.T) { encryptInTransit: "false", scTags: &config.TagConfig{}, }, - wantErr: false, - wantErrMessage: "", + clusterIPFamily: "IPv4", + wantErr: false, + wantErrMessage: "", }, "Extract storage class parameters with export-path": { parameters: map[string]string{ @@ -923,8 +927,9 @@ func TestExtractStorageClassParameters(t *testing.T) { encryptInTransit: "false", scTags: &config.TagConfig{}, }, - wantErr: false, - wantErrMessage: "", + clusterIPFamily: "IPv4", + wantErr: false, + wantErrMessage: "", }, "Extract storage class parameters with kmskey": { parameters: map[string]string{ @@ -943,8 +948,9 @@ func TestExtractStorageClassParameters(t *testing.T) { encryptInTransit: "false", scTags: &config.TagConfig{}, }, - wantErr: false, - wantErrMessage: "", + clusterIPFamily: "IPv4", + wantErr: false, + wantErrMessage: "", }, "Extract storage class parameters with in-transit encryption": { parameters: map[string]string{ @@ -963,8 +969,9 @@ func TestExtractStorageClassParameters(t *testing.T) { encryptInTransit: "true", scTags: &config.TagConfig{}, }, - wantErr: false, - wantErrMessage: "", + clusterIPFamily: "IPv4", + wantErr: false, + wantErrMessage: "", }, "Extract storage class parameters with different compartment": { parameters: map[string]string{ @@ -983,29 +990,66 @@ func TestExtractStorageClassParameters(t *testing.T) { encryptInTransit: "false", scTags: &config.TagConfig{}, }, - wantErr: false, - wantErrMessage: "", + clusterIPFamily: "IPv4", + wantErr: false, + wantErrMessage: "", }, "Error when availabilityDomain is not passed": { parameters: map[string]string{ "mountTargetOcid": "oc1.mounttarget.xxxx", }, expectedStorageClassParameters: &StorageClassParameters{}, + clusterIPFamily: "IPv4", wantErr: true, wantErrMessage: "AvailabilityDomain not provided in storage class", }, + "Error when mountTargetOcid and mountTargetSubnetOcid is not passed": { parameters: map[string]string{ "availabilityDomain": "AD1", }, expectedStorageClassParameters: &StorageClassParameters{}, + clusterIPFamily: "IPv4", wantErr: true, wantErrMessage: "Neither Mount Target Ocid nor Mount Target Subnet Ocid provided in storage class", }, + "Error when full ad name not provided in storage class parameters for IPv6 single stack cluster": { + parameters: map[string]string{ + "availabilityDomain": "AD1", + "mountTargetOcid": "oc1.mounttarget.xxxx", + "compartmentOcid": "oc1.compartment.yyyy", + }, + expectedStorageClassParameters: &StorageClassParameters{}, + clusterIPFamily: "IPv6", + wantErr: true, + wantErrMessage: "Full AvailabilityDomain with prefix not provided in storage class for IPv6 single stack cluster.", + }, + "Extract Storage class parameters when full ad name is provided in storage class parameters for IPv6 single stack cluster": { + parameters: map[string]string{ + "availabilityDomain": "jksl:PHX-AD-2", + "mountTargetOcid": "oc1.mounttarget.xxxx", + "compartmentOcid": "oc1.compartment.yyyy", + }, + expectedStorageClassParameters: &StorageClassParameters{ + availabilityDomain: "jksl:PHX-AD-2", + compartmentOcid: "oc1.compartment.yyyy", + kmsKey: "", + exportPath: "/ut-volume", + exportOptions: []filestorage.ClientOptions{}, + mountTargetOcid: "oc1.mounttarget.xxxx", + mountTargetSubnetOcid: "", + encryptInTransit: "false", + scTags: &config.TagConfig{}, + }, + clusterIPFamily: "IPv6", + wantErr: false, + wantErrMessage: "", + }, } ctx := context.Background() for name, tt := range tests { t.Run(name, func(t *testing.T) { + os.Setenv("CLUSTER_IP_FAMILY", tt.clusterIPFamily) d := &FSSControllerDriver{ControllerDriver: ControllerDriver{ KubeClient: nil, logger: zap.S(), diff --git a/pkg/csi/driver/fss_node.go b/pkg/csi/driver/fss_node.go index eedb5a91ae..477a3bc39d 100644 --- a/pkg/csi/driver/fss_node.go +++ b/pkg/csi/driver/fss_node.go @@ -474,7 +474,7 @@ func (d FSSNodeDriver) NodeGetCapabilities(ctx context.Context, req *csi.NodeGet // NodeGetInfo returns the supported capabilities of the node server. // The result of this function will be used by the CO in ControllerPublishVolume. func (d FSSNodeDriver) NodeGetInfo(ctx context.Context, req *csi.NodeGetInfoRequest) (*csi.NodeGetInfoResponse, error) { - ad, err := d.util.LookupNodeAvailableDomain(d.KubeClient, d.nodeID) + ad, _ , err := d.util.LookupNodeAvailableDomain(d.KubeClient, d.nodeID) if err != nil { d.logger.With(zap.Error(err)).With("nodeId", d.nodeID, "availabilityDomain", ad).Error("Failed to get availability domain of node from kube api server.") diff --git a/pkg/csi/driver/lustre_node.go b/pkg/csi/driver/lustre_node.go index 7f54e6c038..fb4760928c 100644 --- a/pkg/csi/driver/lustre_node.go +++ b/pkg/csi/driver/lustre_node.go @@ -361,7 +361,7 @@ func (d LustreNodeDriver) NodeGetCapabilities(ctx context.Context, request *csi. } func (d LustreNodeDriver) NodeGetInfo(ctx context.Context, request *csi.NodeGetInfoRequest) (*csi.NodeGetInfoResponse, error) { - ad, err := d.util.LookupNodeAvailableDomain(d.KubeClient, d.nodeID) + ad, _ , err := d.util.LookupNodeAvailableDomain(d.KubeClient, d.nodeID) if err != nil { d.logger.With(zap.Error(err)).With("nodeId", d.nodeID, "availableDomain", ad).Error("Available domain of node missing.") } diff --git a/pkg/oci/client/client.go b/pkg/oci/client/client.go index 405f96ed1b..65a213c667 100644 --- a/pkg/oci/client/client.go +++ b/pkg/oci/client/client.go @@ -18,6 +18,7 @@ import ( "context" "time" + providercfg "github.com/oracle/oci-cloud-controller-manager/pkg/cloudprovider/providers/oci/config" "github.com/oracle/oci-go-sdk/v65/common" "github.com/oracle/oci-go-sdk/v65/common/auth" "github.com/oracle/oci-go-sdk/v65/core" @@ -194,7 +195,7 @@ type client struct { } // New constructs an OCI API client. -func New(logger *zap.SugaredLogger, cp common.ConfigurationProvider, opRateLimiter *RateLimiter, targetTenancyID string) (Interface, error) { +func New(logger *zap.SugaredLogger, cp common.ConfigurationProvider, opRateLimiter *RateLimiter, cloudProviderConfig *providercfg.Config) (Interface, error) { compute, err := core.NewComputeClientWithConfigurationProvider(cp) if err != nil { @@ -283,23 +284,15 @@ func New(logger *zap.SugaredLogger, cp common.ConfigurationProvider, opRateLimit RetryPolicy: newRetryPolicy(), } - loadbalancer := loadbalancerClientStruct{ - loadbalancer: lb, - requestMetadata: requestMetadata, - rateLimiter: *opRateLimiter, - } - networkloadbalancer := networkLoadbalancer{ - networkloadbalancer: nlb, - requestMetadata: requestMetadata, - rateLimiter: *opRateLimiter, - } + loadbalancer := NewLBClient(lb, requestMetadata, opRateLimiter) + networkloadbalancer := NewNLBClient(nlb, requestMetadata, opRateLimiter) c := &client{ compute: &compute, network: &network, identity: &identity, - loadbalancer: &loadbalancer, - networkloadbalancer: &networkloadbalancer, + loadbalancer: loadbalancer, + networkloadbalancer: networkloadbalancer, bs: &bs, filestorage: &fss, //compartment: &compartment, diff --git a/pkg/oci/client/client_factory.go b/pkg/oci/client/client_factory.go index 1d7410e7aa..626917ec9c 100644 --- a/pkg/oci/client/client_factory.go +++ b/pkg/oci/client/client_factory.go @@ -1,4 +1,4 @@ -// Copyright 2019 Oracle and/or its affiliates. All rights reserved. +// Copyright (C) 2019, 2025, Oracle and/or its affiliates. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -29,6 +29,6 @@ func GetClient(logger *zap.SugaredLogger, cfg *config.Config) (Interface, error) rateLimiter := NewRateLimiter(logger, cfg.RateLimiter) - c, err := New(logger, cp, &rateLimiter, cfg.Auth.TenancyID) + c, err := New(logger, cp, &rateLimiter, cfg) return c, err } diff --git a/pkg/oci/client/compute.go b/pkg/oci/client/compute.go index 11bc0804bf..99e3993892 100644 --- a/pkg/oci/client/compute.go +++ b/pkg/oci/client/compute.go @@ -280,7 +280,7 @@ func (c *client) GetInstanceByNodeName(ctx context.Context, compartmentID, vcnID if (vnic.PublicIp != nil && *vnic.PublicIp == nodeName) || (vnic.PrivateIp != nil && *vnic.PrivateIp == nodeName) || - (len(vnic.Ipv6Addresses) > 0 && vnic.Ipv6Addresses[0] == nodeName) || + (len(vnic.Ipv6Addresses) > 0 && vnic.Ipv6Addresses[0] == strings.ReplaceAll(nodeName, "-", ":")) || (vnic.HostnameLabel != nil && (*vnic.HostnameLabel != "" && strings.HasPrefix(nodeName, *vnic.HostnameLabel))) { instance, err := c.GetInstance(ctx, *attachment.InstanceId) if err != nil { diff --git a/pkg/oci/client/load_balancer.go b/pkg/oci/client/load_balancer.go index f72337b197..85b935a87c 100644 --- a/pkg/oci/client/load_balancer.go +++ b/pkg/oci/client/load_balancer.go @@ -1,4 +1,4 @@ -// Copyright 2018 Oracle and/or its affiliates. All rights reserved. +// Copyright (C) 2018, 2025, Oracle and/or its affiliates. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -17,6 +17,7 @@ package client import ( "context" "go.uber.org/zap" + "sync" "time" "k8s.io/apimachinery/pkg/util/wait" @@ -33,6 +34,7 @@ const ( ) type loadbalancerClientStruct struct { + nameToOcid sync.Map loadbalancer loadBalancerClient requestMetadata common.RequestMetadata rateLimiter RateLimiter @@ -64,6 +66,15 @@ type GenericLoadBalancerInterface interface { UpdateLoadBalancer(ctx context.Context, lbID string, details *GenericUpdateLoadBalancerDetails) (string, error) } +func NewLBClient(lb loadBalancerClient, rm common.RequestMetadata, lim *RateLimiter) *loadbalancerClientStruct { + l := loadbalancerClientStruct{ + loadbalancer: lb, + requestMetadata: rm, + rateLimiter: *lim, + } + return &l +} + func (c *loadbalancerClientStruct) GetLoadBalancer(ctx context.Context, id string) (*GenericLoadBalancer, error) { if !c.rateLimiter.Reader.TryAccept() { return nil, RateLimitError(false, "GetLoadBalancer") @@ -83,6 +94,29 @@ func (c *loadbalancerClientStruct) GetLoadBalancer(ctx context.Context, id strin } func (c *loadbalancerClientStruct) GetLoadBalancerByName(ctx context.Context, compartmentID, name string) (*GenericLoadBalancer, error) { + logger := zap.L().Sugar() // TODO refactor after pull-requests/1389 + logger = logger.With("lbName", name, + "compartment-id", compartmentID, + "loadBalancerType", "lb", + ) + + if ocid, ok := c.nameToOcid.Load(name); ok { + var err error + ocidStr, ok := ocid.(string) + if ok { + lb, err := c.GetLoadBalancer(ctx, ocidStr) + if err == nil && *lb.DisplayName == name { + return lb, err + } + } + + if !ok || IsNotFound(err) { // Only remove the cached value on 404, not on a 5XX + c.nameToOcid.Delete(name) + } + } else { + logger.Info("LB name to OCID cache miss") + } + var page *string for { if !c.rateLimiter.Reader.TryAccept() { @@ -101,6 +135,7 @@ func (c *loadbalancerClientStruct) GetLoadBalancerByName(ctx context.Context, co } for _, lb := range resp.Items { if *lb.DisplayName == name { + c.nameToOcid.Store(name, *lb.Id) return c.loadbalancerToGenericLoadbalancer(&lb), nil } } diff --git a/pkg/oci/client/network_load_balancer.go b/pkg/oci/client/network_load_balancer.go index e95dab3bf4..d560b34942 100644 --- a/pkg/oci/client/network_load_balancer.go +++ b/pkg/oci/client/network_load_balancer.go @@ -1,4 +1,4 @@ -// Copyright 2018 Oracle and/or its affiliates. All rights reserved. +// Copyright (C) 2018, 2025, Oracle and/or its affiliates. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -16,6 +16,7 @@ package client import ( "context" + "sync" "go.uber.org/zap" "k8s.io/apimachinery/pkg/util/wait" @@ -26,6 +27,7 @@ import ( ) type networkLoadbalancer struct { + nameToOcid sync.Map networkloadbalancer networkLoadBalancerClient requestMetadata common.RequestMetadata rateLimiter RateLimiter @@ -33,8 +35,22 @@ type networkLoadbalancer struct { const ( NetworkLoadBalancerEntityType = "NetworkLoadBalancer" + // TODO move to utils? + dns1123LabelFmt = "[a-z0-9]([-a-z0-9]*[a-z0-9])?" + uuidFmt = "[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}" + // // + LBNameRegex = "^" + dns1123LabelFmt + "/" + dns1123LabelFmt + "/" + uuidFmt + "$" ) +func NewNLBClient(nlb networkLoadBalancerClient, rm common.RequestMetadata, lim *RateLimiter) *networkLoadbalancer { + n := networkLoadbalancer{ + networkloadbalancer: nlb, + requestMetadata: rm, + rateLimiter: *lim, + } + return &n +} + func (c *networkLoadbalancer) GetLoadBalancer(ctx context.Context, id string) (*GenericLoadBalancer, error) { if !c.rateLimiter.Reader.TryAccept() { return nil, RateLimitError(false, "GetLoadBalancer") @@ -54,6 +70,29 @@ func (c *networkLoadbalancer) GetLoadBalancer(ctx context.Context, id string) (* } func (c *networkLoadbalancer) GetLoadBalancerByName(ctx context.Context, compartmentID string, name string) (*GenericLoadBalancer, error) { + logger := zap.L().Sugar() // TODO refactor after pull-requests/1389 + logger = logger.With("lbName", name, + "compartment-id", compartmentID, + "loadBalancerType", "nlb", + ) + + if ocid, ok := c.nameToOcid.Load(name); ok { + var err error + ocidStr, ok := ocid.(string) + if ok { + lb, err := c.GetLoadBalancer(ctx, ocidStr) + if err == nil && *lb.DisplayName == name { + return lb, err + } + } + + if !ok || IsNotFound(err) { // Only remove the cached value on 404, not on a 5XX + c.nameToOcid.Delete(name) + } + } else { + logger.Info("NLB name to OCID cache miss") + } + var page *string for { if !c.rateLimiter.Reader.TryAccept() { @@ -72,6 +111,7 @@ func (c *networkLoadbalancer) GetLoadBalancerByName(ctx context.Context, compart } for _, lb := range resp.Items { if *lb.DisplayName == name { + c.nameToOcid.Store(name, *lb.Id) return c.networkLoadbalancerSummaryToGenericLoadbalancer(&lb), nil } } diff --git a/pkg/oci/client/network_load_balancer_test.go b/pkg/oci/client/network_load_balancer_test.go index 711d33d434..4e60679491 100644 --- a/pkg/oci/client/network_load_balancer_test.go +++ b/pkg/oci/client/network_load_balancer_test.go @@ -17,6 +17,7 @@ package client import ( "context" "errors" + "fmt" errors2 "github.com/pkg/errors" "log" "strings" @@ -98,10 +99,112 @@ func TestNLB_AwaitWorkRequest(t *testing.T) { } } +var ( + fakeNlbOcid1 = "ocid.nlb.fake1" + fakeNlbName1 = "fake display name 1" + fakeNlbOcid2 = "ocid.nlb.fake2" + fakeNlbName2 = "fake display name 2" + fakeSubnetOcid = "ocid.subnet.fake" + + NLBMap = map[string]networkloadbalancer.NetworkLoadBalancer{ + "ocid.nlb.fake1": networkloadbalancer.NetworkLoadBalancer{ + Id: &fakeNlbOcid1, + DisplayName: &fakeNlbName1, + SubnetId: &fakeSubnetOcid, + }, + "ocid.nlb.fake2": networkloadbalancer.NetworkLoadBalancer{ + Id: &fakeNlbOcid2, + DisplayName: &fakeNlbName2, + SubnetId: &fakeSubnetOcid, + }, + } +) + +func TestGetLoadBalancerByName(t *testing.T) { + var totalListCalls int + var loadbalancer = NewNLBClient( + &MockNetworkLoadBalancerClient{debug: true, listCalls: &totalListCalls}, + common.RequestMetadata{}, + &RateLimiter{ + Reader: flowcontrol.NewFakeAlwaysRateLimiter(), + Writer: flowcontrol.NewFakeAlwaysRateLimiter(), + }) + + var tests = []struct { + skip bool // set true to skip a test-case + compartment, name, testname string + want string + wantErr error + wantListCalls int + }{ + { + testname: "getFirstNLBFirstTime", + compartment: "ocid.compartment.fake", + name: fakeNlbName1, + want: fakeNlbOcid1, + wantErr: nil, + wantListCalls: 1, + }, + { + testname: "getFirstNLBSecondTime", + compartment: "ocid.compartment.fake", + name: fakeNlbName1, + want: fakeNlbOcid1, + wantErr: nil, + wantListCalls: 1, // totals, no new list should be performed + }, + { + testname: "getSecondNLBTime", + compartment: "ocid.compartment.fake", + name: fakeNlbName2, + want: fakeNlbOcid2, + wantErr: nil, + wantListCalls: 2, + }, + { + testname: "getFirstNLBThirdTime", + compartment: "ocid.compartment.fake", + name: fakeNlbName1, + want: fakeNlbOcid1, + wantErr: nil, + wantListCalls: 2, + }, + { + testname: "getSecondNLBSecondTime", + compartment: "ocid.compartment.fake", + name: fakeNlbName2, + want: fakeNlbOcid2, + wantErr: nil, + wantListCalls: 2, + }, + } + + for _, tt := range tests { + if tt.skip { + continue + } + + t.Run(tt.testname, func(t *testing.T) { + log.Println("running test ", tt.testname) + got, err := loadbalancer.GetLoadBalancerByName(context.Background(), tt.compartment, tt.name) + if got == nil || *got.Id != tt.want { + t.Errorf("Expected %v, but got %v", tt.want, got) + } + if !errors.Is(err, tt.wantErr) { + t.Errorf("Expected error = %v, but got %v", tt.wantErr, err) + } + if totalListCalls != tt.wantListCalls { + t.Errorf("Expected the total number of NLB list calls %d, but got %d", tt.wantListCalls, totalListCalls) + } + }) + } +} + type MockNetworkLoadBalancerClient struct { // MockLoadBalancerClient mocks LoadBalancer client implementation. - counter int - debug bool // set true to run tests with debug logs + counter int + debug bool // set true to run tests with debug logs + listCalls *int // number of list operations performed } type getNetworkLoadBalancerWorkRequestResponse struct { @@ -173,12 +276,27 @@ func (c *MockNetworkLoadBalancerClient) GetWorkRequest(ctx context.Context, requ } func (c *MockNetworkLoadBalancerClient) GetNetworkLoadBalancer(ctx context.Context, request networkloadbalancer.GetNetworkLoadBalancerRequest) (response networkloadbalancer.GetNetworkLoadBalancerResponse, err error) { + if c.debug { + log.Println(fmt.Sprintf("Getting NLB %v", *request.NetworkLoadBalancerId)) + } + + response = networkloadbalancer.GetNetworkLoadBalancerResponse{ + NetworkLoadBalancer: NLBMap[*request.NetworkLoadBalancerId], + } return } func (c *MockNetworkLoadBalancerClient) ListWorkRequests(ctx context.Context, request networkloadbalancer.ListWorkRequestsRequest) (response networkloadbalancer.ListWorkRequestsResponse, err error) { return } func (c *MockNetworkLoadBalancerClient) ListNetworkLoadBalancers(ctx context.Context, request networkloadbalancer.ListNetworkLoadBalancersRequest) (response networkloadbalancer.ListNetworkLoadBalancersResponse, err error) { + if c.debug { + log.Println(fmt.Sprintf("Lising NLBs in compartment %v", *request.CompartmentId)) + } + + for _, nlb := range NLBMap { + response.NetworkLoadBalancerCollection.Items = append(response.NetworkLoadBalancerCollection.Items, networkloadbalancer.NetworkLoadBalancerSummary(nlb)) + } + *c.listCalls += 1 return } func (c *MockNetworkLoadBalancerClient) CreateNetworkLoadBalancer(ctx context.Context, request networkloadbalancer.CreateNetworkLoadBalancerRequest) (response networkloadbalancer.CreateNetworkLoadBalancerResponse, err error) { diff --git a/pkg/volume/provisioner/core/provisioner.go b/pkg/volume/provisioner/core/provisioner.go index e1bb67872e..3cd8cf2d64 100644 --- a/pkg/volume/provisioner/core/provisioner.go +++ b/pkg/volume/provisioner/core/provisioner.go @@ -1,4 +1,4 @@ -// Copyright 2017 Oracle and/or its affiliates. All rights reserved. +// Copyright (C) 2017, 2025, Oracle and/or its affiliates. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -124,7 +124,7 @@ func NewOCIProvisioner(logger *zap.SugaredLogger, kubeClient kubernetes.Interfac rateLimiter := client.NewRateLimiter(logger, cfg.RateLimiter) - client, err := client.New(logger, cp, &rateLimiter, cfg.Auth.TenancyID) + client, err := client.New(logger, cp, &rateLimiter, cfg) if err != nil { return nil, errors.Wrapf(err, "unable to construct OCI client") } diff --git a/test/e2e/cloud-provider-oci/csi_snapshot_restore.go b/test/e2e/cloud-provider-oci/csi_snapshot_restore.go index 6db6bfeb8c..d0f1897856 100644 --- a/test/e2e/cloud-provider-oci/csi_snapshot_restore.go +++ b/test/e2e/cloud-provider-oci/csi_snapshot_restore.go @@ -39,88 +39,88 @@ const ( var _ = Describe("Snapshot Creation and Restore", func() { f := framework.NewBackupFramework("snapshot-restore") - Context("[cloudprovider][storage][csi][snapshot][restore][test1]", func() { - //tests := []struct{ - // attachmentType string - // backupType string - // fsType string - //}{ - // {framework.AttachmentTypeParavirtualized, framework.BackupTypeIncremental, ""}, - // {framework.AttachmentTypeParavirtualized, framework.BackupTypeFull, ""}, - // {framework.AttachmentTypeISCSI, framework.BackupTypeIncremental, ""}, - // {framework.AttachmentTypeISCSI, framework.BackupTypeFull, ""}, - // {framework.AttachmentTypeISCSI, framework.BackupTypeIncremental, "xfs"}, - // {framework.AttachmentTypeParavirtualized, framework.BackupTypeFull, "ext3"}, - //} - //for _, entry := range tests { - // entry := entry - // testName := "Should be able to create and restore " + entry.backupType + " snapshot from " + entry.attachmentType + " volume " - // if entry.fsType != "" { - // testName += " with " + entry.fsType + " fsType" - // } - // It(testName, func() { - // scParams := map[string]string{framework.AttachmentType: entry.attachmentType} - // vscParams := map[string]string{framework.BackupType: entry.backupType} - // scParams[framework.FstypeKey] = entry.fsType - // testSnapshotAndRestore(f, scParams, vscParams) - // }) - //} - //It("FS should get expanded when a PVC is restored with a lesser size backup (iscsi)", func() { - // checkOrInstallCRDs(f) - // scParams := map[string]string{framework.AttachmentType: framework.AttachmentTypeISCSI} - // vscParams := map[string]string{framework.BackupType: framework.BackupTypeFull} - // pvcJig := framework.NewPVCTestJig(f.ClientSet, "csi-snapshot-restore-e2e-tests") - // pvcJig.InitialiseSnapClient(f.SnapClientSet) - // - // scName := f.CreateStorageClassOrFail(f.Namespace.Name, BVDriverName, scParams, pvcJig.Labels, BindingModeWaitForFirstConsumer, true, ReclaimPolicyDelete, nil) - // pvc := pvcJig.CreateAndAwaitPVCOrFailCSI(f.Namespace.Name, framework.MinVolumeBlock, scName, nil, v1.PersistentVolumeFilesystem, v1.ReadWriteOnce, v1.ClaimPending) - // - // _ = pvcJig.NewPodForCSI("pod-original", f.Namespace.Name, pvc.Name, setupF.AdLabel) - // - // time.Sleep(60 * time.Second) //waiting for pod to up and running - // - // vscName := f.CreateVolumeSnapshotClassOrFail(f.Namespace.Name, BVDriverName, vscParams, ReclaimPolicyDelete) - // vs := pvcJig.CreateAndAwaitVolumeSnapshotOrFail(f.Namespace.Name, vscName, pvc.Name, nil) - // - // pvcRestore := pvcJig.CreateAndAwaitPVCOrFailSnapshotSource(f.Namespace.Name, framework.MaxVolumeBlock, scName, vs.Name, v1.ClaimPending, nil) - // podRestoreName := pvcJig.NewPodForCSI("pod-restored", f.Namespace.Name, pvcRestore.Name, setupF.AdLabel) - // - // time.Sleep(60 * time.Second) //waiting for pod to up and running - // - // pvcJig.CheckUsableVolumeSizeInsidePod(f.Namespace.Name, podRestoreName, "99G") - // - // f.VolumeIds = append(f.VolumeIds, pvc.Spec.VolumeName) - // _ = f.DeleteVolumeSnapshotClass(f.Namespace.Name) - // _ = f.DeleteStorageClass(f.Namespace.Name) - //}) - //It("FS should get expanded when a PVC is restored with a lesser size backup (paravirtualized)", func() { - // checkOrInstallCRDs(f) - // scParams := map[string]string{framework.AttachmentType: framework.AttachmentTypeParavirtualized} - // vscParams := map[string]string{framework.BackupType: framework.BackupTypeFull} - // pvcJig := framework.NewPVCTestJig(f.ClientSet, "csi-snapshot-restore-e2e-tests") - // pvcJig.InitialiseSnapClient(f.SnapClientSet) - // - // scName := f.CreateStorageClassOrFail(f.Namespace.Name, BVDriverName, scParams, pvcJig.Labels, BindingModeWaitForFirstConsumer, true, ReclaimPolicyDelete, nil) - // pvc := pvcJig.CreateAndAwaitPVCOrFailCSI(f.Namespace.Name, framework.MinVolumeBlock, scName, nil, v1.PersistentVolumeFilesystem, v1.ReadWriteOnce, v1.ClaimPending) - // - // _ = pvcJig.NewPodForCSI("pod-original", f.Namespace.Name, pvc.Name, setupF.AdLabel) - // - // time.Sleep(60 * time.Second) //waiting for pod to up and running - // - // vscName := f.CreateVolumeSnapshotClassOrFail(f.Namespace.Name, BVDriverName, vscParams, ReclaimPolicyDelete) - // vs := pvcJig.CreateAndAwaitVolumeSnapshotOrFail(f.Namespace.Name, vscName, pvc.Name, nil) - // - // pvcRestore := pvcJig.CreateAndAwaitPVCOrFailSnapshotSource(f.Namespace.Name, framework.MaxVolumeBlock, scName, vs.Name, v1.ClaimPending, nil) - // podRestoreName := pvcJig.NewPodForCSI("pod-restored", f.Namespace.Name, pvcRestore.Name, setupF.AdLabel) - // - // time.Sleep(60 * time.Second) //waiting for pod to up and running - // - // pvcJig.CheckUsableVolumeSizeInsidePod(f.Namespace.Name, podRestoreName, "99G") - // - // f.VolumeIds = append(f.VolumeIds, pvc.Spec.VolumeName) - // _ = f.DeleteVolumeSnapshotClass(f.Namespace.Name) - // _ = f.DeleteStorageClass(f.Namespace.Name) - //}) + Context("[cloudprovider][storage][csi][snapshot][restore]", func() { + tests := []struct{ + attachmentType string + backupType string + fsType string + }{ + {framework.AttachmentTypeParavirtualized, framework.BackupTypeIncremental, ""}, + {framework.AttachmentTypeParavirtualized, framework.BackupTypeFull, ""}, + {framework.AttachmentTypeISCSI, framework.BackupTypeIncremental, ""}, + {framework.AttachmentTypeISCSI, framework.BackupTypeFull, ""}, + {framework.AttachmentTypeISCSI, framework.BackupTypeIncremental, "xfs"}, + {framework.AttachmentTypeParavirtualized, framework.BackupTypeFull, "ext3"}, + } + for _, entry := range tests { + entry := entry + testName := "Should be able to create and restore " + entry.backupType + " snapshot from " + entry.attachmentType + " volume " + if entry.fsType != "" { + testName += " with " + entry.fsType + " fsType" + } + It(testName, func() { + scParams := map[string]string{framework.AttachmentType: entry.attachmentType} + vscParams := map[string]string{framework.BackupType: entry.backupType} + scParams[framework.FstypeKey] = entry.fsType + testSnapshotAndRestore(f, scParams, vscParams, v1.PersistentVolumeBlock) + }) + } + It("FS should get expanded when a PVC is restored with a lesser size backup (iscsi)", func() { + checkOrInstallCRDs(f) + scParams := map[string]string{framework.AttachmentType: framework.AttachmentTypeISCSI} + vscParams := map[string]string{framework.BackupType: framework.BackupTypeFull} + pvcJig := framework.NewPVCTestJig(f.ClientSet, "csi-snapshot-restore-e2e-tests") + pvcJig.InitialiseSnapClient(f.SnapClientSet) + + scName := f.CreateStorageClassOrFail(f.Namespace.Name, BVDriverName, scParams, pvcJig.Labels, BindingModeWaitForFirstConsumer, true, ReclaimPolicyDelete, nil) + pvc := pvcJig.CreateAndAwaitPVCOrFailCSI(f.Namespace.Name, framework.MinVolumeBlock, scName, nil, v1.PersistentVolumeFilesystem, v1.ReadWriteOnce, v1.ClaimPending) + + _ = pvcJig.NewPodForCSI("pod-original", f.Namespace.Name, pvc.Name, setupF.AdLabel, v1.PersistentVolumeBlock) + + time.Sleep(60 * time.Second) //waiting for pod to up and running + + vscName := f.CreateVolumeSnapshotClassOrFail(f.Namespace.Name, BVDriverName, vscParams, ReclaimPolicyDelete) + vs := pvcJig.CreateAndAwaitVolumeSnapshotOrFail(f.Namespace.Name, vscName, pvc.Name, nil) + + pvcRestore := pvcJig.CreateAndAwaitPVCOrFailSnapshotSource(f.Namespace.Name, framework.MaxVolumeBlock, scName, vs.Name, v1.ClaimPending, false, nil) + podRestoreName := pvcJig.NewPodForCSI("pod-restored", f.Namespace.Name, pvcRestore.Name, setupF.AdLabel, v1.PersistentVolumeBlock) + + time.Sleep(60 * time.Second) //waiting for pod to up and running + + pvcJig.CheckUsableVolumeSizeInsidePod(f.Namespace.Name, podRestoreName, "99G") + + f.VolumeIds = append(f.VolumeIds, pvc.Spec.VolumeName) + _ = f.DeleteVolumeSnapshotClass(f.Namespace.Name) + _ = f.DeleteStorageClass(f.Namespace.Name) + }) + It("FS should get expanded when a PVC is restored with a lesser size backup (paravirtualized)", func() { + checkOrInstallCRDs(f) + scParams := map[string]string{framework.AttachmentType: framework.AttachmentTypeParavirtualized} + vscParams := map[string]string{framework.BackupType: framework.BackupTypeFull} + pvcJig := framework.NewPVCTestJig(f.ClientSet, "csi-snapshot-restore-e2e-tests") + pvcJig.InitialiseSnapClient(f.SnapClientSet) + + scName := f.CreateStorageClassOrFail(f.Namespace.Name, BVDriverName, scParams, pvcJig.Labels, BindingModeWaitForFirstConsumer, true, ReclaimPolicyDelete, nil) + pvc := pvcJig.CreateAndAwaitPVCOrFailCSI(f.Namespace.Name, framework.MinVolumeBlock, scName, nil, v1.PersistentVolumeFilesystem, v1.ReadWriteOnce, v1.ClaimPending) + + _ = pvcJig.NewPodForCSI("pod-original", f.Namespace.Name, pvc.Name, setupF.AdLabel, v1.PersistentVolumeBlock) + + time.Sleep(60 * time.Second) //waiting for pod to up and running + + vscName := f.CreateVolumeSnapshotClassOrFail(f.Namespace.Name, BVDriverName, vscParams, ReclaimPolicyDelete) + vs := pvcJig.CreateAndAwaitVolumeSnapshotOrFail(f.Namespace.Name, vscName, pvc.Name, nil) + + pvcRestore := pvcJig.CreateAndAwaitPVCOrFailSnapshotSource(f.Namespace.Name, framework.MaxVolumeBlock, scName, vs.Name, v1.ClaimPending, false, nil) + podRestoreName := pvcJig.NewPodForCSI("pod-restored", f.Namespace.Name, pvcRestore.Name, setupF.AdLabel, v1.PersistentVolumeBlock) + + time.Sleep(60 * time.Second) //waiting for pod to up and running + + pvcJig.CheckUsableVolumeSizeInsidePod(f.Namespace.Name, podRestoreName, "99G") + + f.VolumeIds = append(f.VolumeIds, pvc.Spec.VolumeName) + _ = f.DeleteVolumeSnapshotClass(f.Namespace.Name) + _ = f.DeleteStorageClass(f.Namespace.Name) + }) It("Should be able to create and restore a snapshot from a backup(static case)", func() { checkOrInstallCRDs(f) scParams := map[string]string{framework.AttachmentType: framework.AttachmentTypeISCSI} diff --git a/test/e2e/cloud-provider-oci/load_balancer.go b/test/e2e/cloud-provider-oci/load_balancer.go index dd94d329f9..fc5c6bfbda 100644 --- a/test/e2e/cloud-provider-oci/load_balancer.go +++ b/test/e2e/cloud-provider-oci/load_balancer.go @@ -22,7 +22,6 @@ import ( "reflect" "strconv" "strings" - "sync" "time" . "github.com/onsi/ginkgo" @@ -40,24 +39,11 @@ import ( clientset "k8s.io/client-go/kubernetes" ) -var once sync.Once var _ = Describe("Service [Slow]", func() { baseName := "service" - var tcpService *v1.Service - var jig *sharedfw.ServiceTestJig f := sharedfw.NewDefaultFramework(baseName) - JustAfterEach(func() { - if tcpService == nil || jig == nil { - return - } - dp := metav1.DeletePropagationBackground // Default after k8s v1.20 - jig.Client.CoreV1().Services(f.Namespace.Name).Delete(context.Background(), tcpService.Name, metav1.DeleteOptions{ - PropagationPolicy: &dp, - }) - }) - testDefinedTags := map[string]map[string]interface{}{"oke-tag": {"oke-tagging": "ccm-test-integ"}} testDefinedTagsByteArray, _ := json.Marshal(testDefinedTags) @@ -100,206 +86,214 @@ var _ = Describe("Service [Slow]", func() { sharedfw.Logf("Skipping Workload Identity Principal test for LB Type (%s) because the cluster is not an OKE ENHANCED_CLUSTER", test.lbType) continue } + func() { + By("Running test for: " + test.lbType) + serviceName := "basic-" + test.lbType + "-test" + ns := f.Namespace.Name - By("Running test for: " + test.lbType) - serviceName := "basic-" + test.lbType + "-test" - ns := f.Namespace.Name + jig := sharedfw.NewServiceTestJig(f.ClientSet, serviceName) - jig = sharedfw.NewServiceTestJig(f.ClientSet, serviceName) + nodeIP := sharedfw.PickNodeIP(jig.Client) // for later - nodeIP := sharedfw.PickNodeIP(jig.Client) // for later + loadBalancerLagTimeout := sharedfw.LoadBalancerLagTimeoutDefault + loadBalancerCreateTimeout := sharedfw.LoadBalancerCreateTimeoutDefault + if nodes := sharedfw.GetReadySchedulableNodesOrDie(f.ClientSet); len(nodes.Items) > sharedfw.LargeClusterMinNodesNumber { + loadBalancerCreateTimeout = sharedfw.LoadBalancerCreateTimeoutLarge + } + var serviceAccount *v1.ServiceAccount + if sa, exists := test.CreationAnnotations[cloudprovider.ServiceAnnotationServiceAccountName]; exists { + // Create a service account in the same namespace as the service + By("creating service account \"sa\" in namespace " + ns) + serviceAccount = jig.CreateServiceAccountOrFail(ns, sa, nil) + } - loadBalancerLagTimeout := sharedfw.LoadBalancerLagTimeoutDefault - loadBalancerCreateTimeout := sharedfw.LoadBalancerCreateTimeoutDefault - if nodes := sharedfw.GetReadySchedulableNodesOrDie(f.ClientSet); len(nodes.Items) > sharedfw.LargeClusterMinNodesNumber { - loadBalancerCreateTimeout = sharedfw.LoadBalancerCreateTimeoutLarge - } - var serviceAccount *v1.ServiceAccount - if sa, exists := test.CreationAnnotations[cloudprovider.ServiceAnnotationServiceAccountName]; exists { - // Create a service account in the same namespace as the service - By("creating service account \"sa\" in namespace " + ns) - serviceAccount = jig.CreateServiceAccountOrFail(ns, sa, nil) - } + // TODO(apryde): Test that LoadBalancers can receive static IP addresses + // (in a provider agnostic manner?). OCI does not currently + // support this. + requestedIP := "" - // TODO(apryde): Test that LoadBalancers can receive static IP addresses - // (in a provider agnostic manner?). OCI does not currently - // support this. - requestedIP := "" + tcpService := jig.CreateTCPServiceOrFail(ns, func(s *v1.Service) { + s.Spec.Type = v1.ServiceTypeLoadBalancer + s.Spec.LoadBalancerIP = requestedIP // will be "" if not applicable + s.ObjectMeta.Annotations = test.CreationAnnotations + }) - tcpService = jig.CreateTCPServiceOrFail(ns, func(s *v1.Service) { - s.Spec.Type = v1.ServiceTypeLoadBalancer - s.Spec.LoadBalancerIP = requestedIP // will be "" if not applicable - s.ObjectMeta.Annotations = test.CreationAnnotations - }) + if _, exists := test.CreationAnnotations[cloudprovider.ServiceAnnotationServiceAccountName]; exists { + By("setting service account \"sa\" owner reference as the TCP service " + serviceName) + // Set SA owner reference as the service to prevent deletion of service account before the service + jig.SetServiceOwnerReferenceOnServiceAccountOrFail(ns, serviceAccount, tcpService) + defer func() { + dp := metav1.DeletePropagationForeground + jig.Client.CoreV1().Services(f.Namespace.Name).Delete(context.Background(), tcpService.Name, metav1.DeleteOptions{ + PropagationPolicy: &dp, + }) + time.Sleep(time.Second) + }() + } - if _, exists := test.CreationAnnotations[cloudprovider.ServiceAnnotationServiceAccountName]; exists { - By("setting service account \"sa\" owner reference as the TCP service " + serviceName) - // Set SA owner reference as the service to prevent deletion of service account before the service - jig.SetServiceOwnerReferenceOnServiceAccountOrFail(ns, serviceAccount, tcpService) - } + svcPort := int(tcpService.Spec.Ports[0].Port) - svcPort := int(tcpService.Spec.Ports[0].Port) + By("creating a pod to be part of the TCP service " + serviceName) + jig.RunOrFail(ns, nil) - By("creating a pod to be part of the TCP service " + serviceName) - jig.RunOrFail(ns, nil) + // TODO(apryde): Test UDP service. OCI does not currently support this. - // TODO(apryde): Test UDP service. OCI does not currently support this. + By("waiting for the TCP service to have a load balancer") + // Wait for the load balancer to be created asynchronously + tcpService = jig.WaitForLoadBalancerOrFail(ns, tcpService.Name, loadBalancerCreateTimeout) + jig.SanityCheckService(tcpService, v1.ServiceTypeLoadBalancer) - By("waiting for the TCP service to have a load balancer") - // Wait for the load balancer to be created asynchronously - tcpService = jig.WaitForLoadBalancerOrFail(ns, tcpService.Name, loadBalancerCreateTimeout) - jig.SanityCheckService(tcpService, v1.ServiceTypeLoadBalancer) - - if strings.HasSuffix(test.lbType, "-wris") { - lbName := cloudprovider.GetLoadBalancerName(tcpService) - sharedfw.Logf("LB Name is %s", lbName) - ctx := context.TODO() - compartmentId := "" - if setupF.Compartment1 != "" { - compartmentId = setupF.Compartment1 - } else if f.CloudProviderConfig.CompartmentID != "" { - compartmentId = f.CloudProviderConfig.CompartmentID - } else if f.CloudProviderConfig.Auth.CompartmentID != "" { - compartmentId = f.CloudProviderConfig.Auth.CompartmentID - } else { - sharedfw.Failf("Compartment Id undefined.") - } - lbType := strings.TrimSuffix(test.lbType, "-wris") - loadBalancer, err := f.Client.LoadBalancer(zap.L().Sugar(), lbType, "", nil).GetLoadBalancerByName(ctx, compartmentId, lbName) - sharedfw.ExpectNoError(err) + if strings.HasSuffix(test.lbType, "-wris") { + lbName := cloudprovider.GetLoadBalancerName(tcpService) + sharedfw.Logf("LB Name is %s", lbName) + ctx := context.TODO() + compartmentId := "" + if setupF.Compartment1 != "" { + compartmentId = setupF.Compartment1 + } else if f.CloudProviderConfig.CompartmentID != "" { + compartmentId = f.CloudProviderConfig.CompartmentID + } else if f.CloudProviderConfig.Auth.CompartmentID != "" { + compartmentId = f.CloudProviderConfig.Auth.CompartmentID + } else { + sharedfw.Failf("Compartment Id undefined.") + } + lbType := strings.TrimSuffix(test.lbType, "-wris") + loadBalancer, err := f.Client.LoadBalancer(zap.L().Sugar(), lbType, "", nil).GetLoadBalancerByName(ctx, compartmentId, lbName) + sharedfw.ExpectNoError(err) - if !reflect.DeepEqual(loadBalancer.DefinedTags, testDefinedTags) { - sharedfw.Failf("Defined tag mismatch! Expected: %v, Got: %v", testDefinedTags, loadBalancer.DefinedTags) + if !reflect.DeepEqual(loadBalancer.DefinedTags, testDefinedTags) { + sharedfw.Failf("Defined tag mismatch! Expected: %v, Got: %v", testDefinedTags, loadBalancer.DefinedTags) + } } - } - if strings.HasSuffix(test.lbType, "-wris") { - sharedfw.Logf("skip evaluating system tag when the principal type is Workload identity") - } else { - By("validating system tags on the loadbalancer") - lbName := cloudprovider.GetLoadBalancerName(tcpService) - sharedfw.Logf("LB Name is %s", lbName) - ctx := context.TODO() - compartmentId := "" - if setupF.Compartment1 != "" { - compartmentId = setupF.Compartment1 - } else if f.CloudProviderConfig.CompartmentID != "" { - compartmentId = f.CloudProviderConfig.CompartmentID - } else if f.CloudProviderConfig.Auth.CompartmentID != "" { - compartmentId = f.CloudProviderConfig.Auth.CompartmentID - } else { - sharedfw.Failf("Compartment Id undefined.") - } - lbType := test.lbType if strings.HasSuffix(test.lbType, "-wris") { - lbType = strings.TrimSuffix(test.lbType, "-wris") - } - loadBalancer, err := f.Client.LoadBalancer(zap.L().Sugar(), lbType, "", nil).GetLoadBalancerByName(ctx, compartmentId, lbName) - sharedfw.ExpectNoError(err) - sharedfw.Logf("Loadbalancer details %v:", loadBalancer) - if setupF.AddOkeSystemTags && !sharedfw.HasOkeSystemTags(loadBalancer.SystemTags) { - sharedfw.Failf("Loadbalancer is expected to have the system tags") + sharedfw.Logf("skip evaluating system tag when the principal type is Workload identity") + } else { + By("validating system tags on the loadbalancer") + lbName := cloudprovider.GetLoadBalancerName(tcpService) + sharedfw.Logf("LB Name is %s", lbName) + ctx := context.TODO() + compartmentId := "" + if setupF.Compartment1 != "" { + compartmentId = setupF.Compartment1 + } else if f.CloudProviderConfig.CompartmentID != "" { + compartmentId = f.CloudProviderConfig.CompartmentID + } else if f.CloudProviderConfig.Auth.CompartmentID != "" { + compartmentId = f.CloudProviderConfig.Auth.CompartmentID + } else { + sharedfw.Failf("Compartment Id undefined.") + } + lbType := test.lbType + if strings.HasSuffix(test.lbType, "-wris") { + lbType = strings.TrimSuffix(test.lbType, "-wris") + } + loadBalancer, err := f.Client.LoadBalancer(zap.L().Sugar(), lbType, "", nil).GetLoadBalancerByName(ctx, compartmentId, lbName) + sharedfw.ExpectNoError(err) + sharedfw.Logf("Loadbalancer details %v:", loadBalancer) + if setupF.AddOkeSystemTags && !sharedfw.HasOkeSystemTags(loadBalancer.SystemTags) { + sharedfw.Failf("Loadbalancer is expected to have the system tags") + } } - } - tcpNodePort := int(tcpService.Spec.Ports[0].NodePort) - sharedfw.Logf("TCP node port: %d", tcpNodePort) + tcpNodePort := int(tcpService.Spec.Ports[0].NodePort) + sharedfw.Logf("TCP node port: %d", tcpNodePort) - if requestedIP != "" && sharedfw.GetIngressPoint(&tcpService.Status.LoadBalancer.Ingress[0]) != requestedIP { - sharedfw.Failf("unexpected TCP Status.LoadBalancer.Ingress (expected %s, got %s)", requestedIP, sharedfw.GetIngressPoint(&tcpService.Status.LoadBalancer.Ingress[0])) - } - tcpIngressIP := sharedfw.GetIngressPoint(&tcpService.Status.LoadBalancer.Ingress[0]) - sharedfw.Logf("TCP load balancer: %s", tcpIngressIP) + if requestedIP != "" && sharedfw.GetIngressPoint(&tcpService.Status.LoadBalancer.Ingress[0]) != requestedIP { + sharedfw.Failf("unexpected TCP Status.LoadBalancer.Ingress (expected %s, got %s)", requestedIP, sharedfw.GetIngressPoint(&tcpService.Status.LoadBalancer.Ingress[0])) + } + tcpIngressIP := sharedfw.GetIngressPoint(&tcpService.Status.LoadBalancer.Ingress[0]) + sharedfw.Logf("TCP load balancer: %s", tcpIngressIP) - if f.NodePortTest { - By("hitting the TCP service's NodePort") - jig.TestReachableHTTP(false, nodeIP, tcpNodePort, sharedfw.KubeProxyLagTimeout) - } + if f.NodePortTest { + By("hitting the TCP service's NodePort") + jig.TestReachableHTTP(false, nodeIP, tcpNodePort, sharedfw.KubeProxyLagTimeout) + } - By("hitting the TCP service's LoadBalancer") - jig.TestReachableHTTP(false, tcpIngressIP, svcPort, loadBalancerLagTimeout) + By("hitting the TCP service's LoadBalancer") + jig.TestReachableHTTP(false, tcpIngressIP, svcPort, loadBalancerLagTimeout) - // Change the services' node ports. + // Change the services' node ports. - By("changing the TCP service's NodePort") - // Count the number of ingress/egress rules with the original port so - // we can check the correct number are updated. - numEgressRules, numIngressRules := sharedfw.CountSinglePortSecListRules(f.Client, f.CCMSecListID, f.K8SSecListID, tcpNodePort) - tcpService = jig.ChangeServiceNodePortOrFail(ns, tcpService.Name, tcpNodePort) - jig.SanityCheckService(tcpService, v1.ServiceTypeLoadBalancer) + By("changing the TCP service's NodePort") + // Count the number of ingress/egress rules with the original port so + // we can check the correct number are updated. + numEgressRules, numIngressRules := sharedfw.CountSinglePortSecListRules(f.Client, f.CCMSecListID, f.K8SSecListID, tcpNodePort) + tcpService = jig.ChangeServiceNodePortOrFail(ns, tcpService.Name, tcpNodePort) + jig.SanityCheckService(tcpService, v1.ServiceTypeLoadBalancer) - tcpNodePortOld := tcpNodePort - tcpNodePort = int(tcpService.Spec.Ports[0].NodePort) - if tcpNodePort == tcpNodePortOld { - sharedfw.Failf("TCP Spec.Ports[0].NodePort (%d) did not change", tcpNodePort) - } - if sharedfw.GetIngressPoint(&tcpService.Status.LoadBalancer.Ingress[0]) != tcpIngressIP { - sharedfw.Failf("TCP Status.LoadBalancer.Ingress changed (%s -> %s) when not expected", tcpIngressIP, sharedfw.GetIngressPoint(&tcpService.Status.LoadBalancer.Ingress[0])) - } + tcpNodePortOld := tcpNodePort + tcpNodePort = int(tcpService.Spec.Ports[0].NodePort) + if tcpNodePort == tcpNodePortOld { + sharedfw.Failf("TCP Spec.Ports[0].NodePort (%d) did not change", tcpNodePort) + } + if sharedfw.GetIngressPoint(&tcpService.Status.LoadBalancer.Ingress[0]) != tcpIngressIP { + sharedfw.Failf("TCP Status.LoadBalancer.Ingress changed (%s -> %s) when not expected", tcpIngressIP, sharedfw.GetIngressPoint(&tcpService.Status.LoadBalancer.Ingress[0])) + } - // Check the correct number of rules are present. - sharedfw.WaitForSinglePortEgressRulesAfterPortChangeOrFail(f.Client, f.CCMSecListID, numEgressRules, tcpNodePortOld, tcpNodePort) - sharedfw.WaitForSinglePortIngressRulesAfterPortChangeOrFail(f.Client, f.K8SSecListID, numIngressRules, tcpNodePortOld, tcpNodePort) + // Check the correct number of rules are present. + sharedfw.WaitForSinglePortEgressRulesAfterPortChangeOrFail(f.Client, f.CCMSecListID, numEgressRules, tcpNodePortOld, tcpNodePort) + sharedfw.WaitForSinglePortIngressRulesAfterPortChangeOrFail(f.Client, f.K8SSecListID, numIngressRules, tcpNodePortOld, tcpNodePort) - sharedfw.Logf("TCP node port: %d", tcpNodePort) + sharedfw.Logf("TCP node port: %d", tcpNodePort) - if f.NodePortTest { - By("hitting the TCP service's new NodePort") - jig.TestReachableHTTP(false, nodeIP, tcpNodePort, sharedfw.KubeProxyLagTimeout) + if f.NodePortTest { + By("hitting the TCP service's new NodePort") + jig.TestReachableHTTP(false, nodeIP, tcpNodePort, sharedfw.KubeProxyLagTimeout) - By("checking the old TCP NodePort is closed") - jig.TestNotReachableHTTP(nodeIP, tcpNodePortOld, sharedfw.KubeProxyLagTimeout) - } + By("checking the old TCP NodePort is closed") + jig.TestNotReachableHTTP(nodeIP, tcpNodePortOld, sharedfw.KubeProxyLagTimeout) + } - By("hitting the TCP service's LoadBalancer") - jig.TestReachableHTTP(false, tcpIngressIP, svcPort, loadBalancerLagTimeout) + By("hitting the TCP service's LoadBalancer") + jig.TestReachableHTTP(false, tcpIngressIP, svcPort, loadBalancerLagTimeout) - // Change the services' main ports. + // Change the services' main ports. - By("changing the TCP service's port") - tcpService = jig.UpdateServiceOrFail(ns, tcpService.Name, func(s *v1.Service) { - s.Spec.Ports[0].Port++ - }) - jig.SanityCheckService(tcpService, v1.ServiceTypeLoadBalancer) - svcPortOld := svcPort - svcPort = int(tcpService.Spec.Ports[0].Port) - if svcPort == svcPortOld { - sharedfw.Failf("TCP Spec.Ports[0].Port (%d) did not change", svcPort) - } - if int(tcpService.Spec.Ports[0].NodePort) != tcpNodePort { - sharedfw.Failf("TCP Spec.Ports[0].NodePort (%d) changed", tcpService.Spec.Ports[0].NodePort) - } - if sharedfw.GetIngressPoint(&tcpService.Status.LoadBalancer.Ingress[0]) != tcpIngressIP { - sharedfw.Failf("TCP Status.LoadBalancer.Ingress changed (%s -> %s) when not expected", tcpIngressIP, sharedfw.GetIngressPoint(&tcpService.Status.LoadBalancer.Ingress[0])) - } + By("changing the TCP service's port") + tcpService = jig.UpdateServiceOrFail(ns, tcpService.Name, func(s *v1.Service) { + s.Spec.Ports[0].Port++ + }) + jig.SanityCheckService(tcpService, v1.ServiceTypeLoadBalancer) + svcPortOld := svcPort + svcPort = int(tcpService.Spec.Ports[0].Port) + if svcPort == svcPortOld { + sharedfw.Failf("TCP Spec.Ports[0].Port (%d) did not change", svcPort) + } + if int(tcpService.Spec.Ports[0].NodePort) != tcpNodePort { + sharedfw.Failf("TCP Spec.Ports[0].NodePort (%d) changed", tcpService.Spec.Ports[0].NodePort) + } + if sharedfw.GetIngressPoint(&tcpService.Status.LoadBalancer.Ingress[0]) != tcpIngressIP { + sharedfw.Failf("TCP Status.LoadBalancer.Ingress changed (%s -> %s) when not expected", tcpIngressIP, sharedfw.GetIngressPoint(&tcpService.Status.LoadBalancer.Ingress[0])) + } - sharedfw.Logf("service port (TCP): %d", svcPort) - if f.NodePortTest { - By("hitting the TCP service's NodePort") - jig.TestReachableHTTP(false, nodeIP, tcpNodePort, sharedfw.KubeProxyLagTimeout) - } + sharedfw.Logf("service port (TCP): %d", svcPort) + if f.NodePortTest { + By("hitting the TCP service's NodePort") + jig.TestReachableHTTP(false, nodeIP, tcpNodePort, sharedfw.KubeProxyLagTimeout) + } - By("hitting the TCP service's LoadBalancer") - jig.TestReachableHTTP(false, tcpIngressIP, svcPort, loadBalancerCreateTimeout) // this may actually recreate the LB + By("hitting the TCP service's LoadBalancer") + jig.TestReachableHTTP(false, tcpIngressIP, svcPort, loadBalancerCreateTimeout) // this may actually recreate the LB - // Change the services back to ClusterIP. + // Change the services back to ClusterIP. - By("changing TCP service back to type=ClusterIP") - tcpService = jig.UpdateServiceOrFail(ns, tcpService.Name, func(s *v1.Service) { - s.Spec.Type = v1.ServiceTypeClusterIP - s.Spec.Ports[0].NodePort = 0 - }) - // Wait for the load balancer to be destroyed asynchronously - tcpService = jig.WaitForLoadBalancerDestroyOrFail(ns, tcpService.Name, tcpIngressIP, svcPort, loadBalancerCreateTimeout) - jig.SanityCheckService(tcpService, v1.ServiceTypeClusterIP) + By("changing TCP service back to type=ClusterIP") + tcpService = jig.UpdateServiceOrFail(ns, tcpService.Name, func(s *v1.Service) { + s.Spec.Type = v1.ServiceTypeClusterIP + s.Spec.Ports[0].NodePort = 0 + }) + // Wait for the load balancer to be destroyed asynchronously + tcpService = jig.WaitForLoadBalancerDestroyOrFail(ns, tcpService.Name, tcpIngressIP, svcPort, loadBalancerCreateTimeout) + jig.SanityCheckService(tcpService, v1.ServiceTypeClusterIP) - if f.NodePortTest { - By("checking the TCP NodePort is closed") - jig.TestNotReachableHTTP(nodeIP, tcpNodePort, sharedfw.KubeProxyLagTimeout) - } + if f.NodePortTest { + By("checking the TCP NodePort is closed") + jig.TestNotReachableHTTP(nodeIP, tcpNodePort, sharedfw.KubeProxyLagTimeout) + } - By("checking the TCP LoadBalancer is closed") - jig.TestNotReachableHTTP(tcpIngressIP, svcPort, loadBalancerLagTimeout) + By("checking the TCP LoadBalancer is closed") + jig.TestNotReachableHTTP(tcpIngressIP, svcPort, loadBalancerLagTimeout) + }() } }) }) @@ -1223,6 +1217,104 @@ var _ = Describe("Listener only enabled TLS", func() { }) }) +var _ = Describe("GRPC Listeners only enabled TLS", func() { + + baseName := "listener-service" + f := sharedfw.NewDefaultFramework(baseName) + + Context("[cloudprovider][ccm][lb][grpc]", func() { + It("should be possible to create a GRPC listener for LB [Canary]", func() { + serviceName := "listener-grpc-lb-test" + ns := f.Namespace.Name + + jig := sharedfw.NewServiceTestJig(f.ClientSet, serviceName) + + sslSecretName := "ssl-certificate-secret" + _, err := f.ClientSet.CoreV1().Secrets(ns).Create(context.Background(), &v1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: ns, + Name: sslSecretName, + }, + Data: map[string][]byte{ + cloudprovider.SSLCAFileName: []byte(sharedfw.SSLCAData), + cloudprovider.SSLCertificateFileName: []byte(sharedfw.SSLCertificateData), + cloudprovider.SSLPrivateKeyFileName: []byte(sharedfw.SSLPrivateData), + cloudprovider.SSLPassphrase: []byte(sharedfw.SSLPassphrase), + }, + }, metav1.CreateOptions{}) + sharedfw.ExpectNoError(err) + loadBalancerCreateTimeout := sharedfw.LoadBalancerCreateTimeoutDefault + if nodes := sharedfw.GetReadySchedulableNodesOrDie(f.ClientSet); len(nodes.Items) > sharedfw.LargeClusterMinNodesNumber { + loadBalancerCreateTimeout = sharedfw.LoadBalancerCreateTimeoutLarge + } + + requestedIP := "" + + tcpService := jig.CreateTCPServiceOrFail(ns, func(s *v1.Service) { + s.Spec.Type = v1.ServiceTypeLoadBalancer + s.Spec.LoadBalancerIP = requestedIP + s.Spec.Ports = []v1.ServicePort{v1.ServicePort{Name: "http", Port: 80, TargetPort: intstr.FromInt(80)}, + v1.ServicePort{Name: "https", Port: 443, TargetPort: intstr.FromInt(80)}} + s.ObjectMeta.Annotations = map[string]string{ + cloudprovider.ServiceAnnotationLoadBalancerSSLPorts: "80,443", + cloudprovider.ServiceAnnotationLoadBalancerTLSSecret: sslSecretName, + cloudprovider.ServiceAnnotationLoadBalancerInternal: "true", + cloudprovider.ServiceAnnotationLoadBalancerBEProtocol: "GRPC", + } + }) + + svcPort := int(tcpService.Spec.Ports[0].Port) + + By("waiting for the TCP service to have a load balancer") + // Wait for the load balancer to be created asynchronously + tcpService = jig.WaitForLoadBalancerOrFail(ns, tcpService.Name, loadBalancerCreateTimeout) + jig.SanityCheckService(tcpService, v1.ServiceTypeLoadBalancer) + + tcpNodePort := int(tcpService.Spec.Ports[0].NodePort) + sharedfw.Logf("TCP node port: %d", tcpNodePort) + + if requestedIP != "" && sharedfw.GetIngressPoint(&tcpService.Status.LoadBalancer.Ingress[0]) != requestedIP { + sharedfw.Failf("unexpected TCP Status.LoadBalancer.Ingress (expected %s, got %s)", requestedIP, sharedfw.GetIngressPoint(&tcpService.Status.LoadBalancer.Ingress[0])) + } + tcpIngressIP := sharedfw.GetIngressPoint(&tcpService.Status.LoadBalancer.Ingress[0]) + sharedfw.Logf("TCP load balancer: %s", tcpIngressIP) + + By("Validate that GRPC Listener is created") + lbName := cloudprovider.GetLoadBalancerName(tcpService) + sharedfw.Logf("LB Name is %s", lbName) + ctx := context.TODO() + compartmentId := "" + if setupF.Compartment1 != "" { + compartmentId = setupF.Compartment1 + } else if f.CloudProviderConfig.CompartmentID != "" { + compartmentId = f.CloudProviderConfig.CompartmentID + } else if f.CloudProviderConfig.Auth.CompartmentID != "" { + compartmentId = f.CloudProviderConfig.Auth.CompartmentID + } else { + sharedfw.Failf("Compartment Id undefined.") + } + loadBalancer, err := f.Client.LoadBalancer(zap.L().Sugar(), cloudprovider.LB, "", nil).GetLoadBalancerByName(ctx, compartmentId, lbName) + sharedfw.ExpectNoError(err) + protocol := *loadBalancer.Listeners["GRPC-443"].Protocol + Expect(protocol == cloudprovider.ProtocolGrpc).To(BeTrue()) + + By("changing TCP service back to type=ClusterIP") + tcpService = jig.UpdateServiceOrFail(ns, tcpService.Name, func(s *v1.Service) { + s.Spec.Type = v1.ServiceTypeClusterIP + s.Spec.Ports[0].NodePort = 0 + s.Spec.Ports[1].NodePort = 0 + }) + + // Wait for the load balancer to be destroyed asynchronously + tcpService = jig.WaitForLoadBalancerDestroyOrFail(ns, tcpService.Name, tcpIngressIP, svcPort, loadBalancerCreateTimeout) + jig.SanityCheckService(tcpService, v1.ServiceTypeClusterIP) + + err = f.ClientSet.CoreV1().Secrets(ns).Delete(context.Background(), sslSecretName, metav1.DeleteOptions{}) + sharedfw.ExpectNoError(err) + }) + }) +}) + var _ = Describe("End to end enabled TLS - different certificates", func() { baseName := "e2e-diff-certs" f := sharedfw.NewDefaultFramework(baseName) @@ -2058,7 +2150,7 @@ var _ = Describe("LB Properties", func() { }) reservedIpTestArray := []struct { - lbtype string + lbType string CreationAnnotations map[string]string }{ { @@ -2075,12 +2167,33 @@ var _ = Describe("LB Properties", func() { cloudprovider.ServiceAnnotationLoadBalancerType: "nlb", }, }, + { + "lb-wris", + map[string]string{ + cloudprovider.ServiceAnnotationServiceAccountName: "sa", + cloudprovider.ServiceAnnotationLoadBalancerShape: "flexible", + cloudprovider.ServiceAnnotationLoadBalancerShapeFlexMin: "10", + cloudprovider.ServiceAnnotationLoadBalancerShapeFlexMax: "10", + }, + }, + { + "nlb-wris", + map[string]string{ + cloudprovider.ServiceAnnotationServiceAccountName: "sa", + cloudprovider.ServiceAnnotationLoadBalancerType: "nlb", + }, + }, } - //Test Reserved IP feature + + // Test Reserved IP feature It("should be possible to create Service type:LoadbBalancer with public reservedIP", func() { for _, test := range reservedIpTestArray { - By("Running test for: " + test.lbtype) - serviceName := "e2e-" + test.lbtype + "-reserved-ip" + if strings.HasSuffix(test.lbType, "-wris") && f.ClusterType != containerengine.ClusterTypeEnhancedCluster { + sharedfw.Logf("Skipping Workload Identity Principal test for LB Type (%s) because the cluster is not an OKE ENHANCED_CLUSTER", test.lbType) + continue + } + By("Running test for: " + test.lbType) + serviceName := "e2e-" + test.lbType + "-reserved-ip" ns := f.Namespace.Name jig := sharedfw.NewServiceTestJig(f.ClientSet, serviceName) @@ -2129,7 +2242,7 @@ var _ = Describe("LB Properties", func() { sharedfw.Failf("Compartment Id undefined.") } - loadBalancer, err := f.Client.LoadBalancer(zap.L().Sugar(), test.lbtype, "", nil).GetLoadBalancerByName(ctx, compartmentId, lbName) + loadBalancer, err := f.Client.LoadBalancer(zap.L().Sugar(), test.lbType, "", nil).GetLoadBalancerByName(ctx, compartmentId, lbName) sharedfw.ExpectNoError(err) By("waiting upto 5m0s to verify whether LB has been created with public reservedIP") diff --git a/test/e2e/framework/cloud_provider_framework.go b/test/e2e/framework/cloud_provider_framework.go index 1d12f5215f..31eb3adc59 100644 --- a/test/e2e/framework/cloud_provider_framework.go +++ b/test/e2e/framework/cloud_provider_framework.go @@ -370,7 +370,7 @@ func createOCIClient(cloudProviderConfig *providercfg.Config) (client.Interface, ociClientConfig := common.NewRawConfigurationProvider(cpc.TenancyID, cpc.UserID, cpc.Region, cpc.Fingerprint, cpc.PrivateKey, &cpc.PrivateKeyPassphrase) logger := zap.L() rateLimiter := client.NewRateLimiter(logger.Sugar(), cloudProviderConfig.RateLimiter) - ociClient, err := client.New(logger.Sugar(), ociClientConfig, &rateLimiter, cloudProviderConfig.Auth.TenancyID) + ociClient, err := client.New(logger.Sugar(), ociClientConfig, &rateLimiter, cloudProviderConfig) if err != nil { return nil, errors.Wrapf(err, "Couldn't create oci client from configuration: %s.", cloudConfigFile) } diff --git a/test/e2e/framework/service_util.go b/test/e2e/framework/service_util.go index 5891bad422..84a2fdd780 100644 --- a/test/e2e/framework/service_util.go +++ b/test/e2e/framework/service_util.go @@ -546,7 +546,6 @@ func EnsureLoadBalancerResourcesDeleted(ip, portRange string) error { } func (j *ServiceTestJig) WaitForLoadBalancerDestroyOrFail(namespace, name string, ip string, port int, timeout time.Duration) *v1.Service { - // TODO: once support ticket 21807001 is resolved, reduce this timeout back to something reasonable defer func() { if err := EnsureLoadBalancerResourcesDeleted(ip, strconv.Itoa(port)); err != nil { Logf("Failed to delete cloud resources for service: %s %d (%v)", ip, port, err) diff --git a/vendor/golang.org/x/net/html/doctype.go b/vendor/golang.org/x/net/html/doctype.go index c484e5a94f..bca3ae9a0c 100644 --- a/vendor/golang.org/x/net/html/doctype.go +++ b/vendor/golang.org/x/net/html/doctype.go @@ -87,7 +87,7 @@ func parseDoctype(s string) (n *Node, quirks bool) { } } if lastAttr := n.Attr[len(n.Attr)-1]; lastAttr.Key == "system" && - strings.ToLower(lastAttr.Val) == "http://www.ibm.com/data/dtd/v11/ibmxhtml1-transitional.dtd" { + strings.EqualFold(lastAttr.Val, "http://www.ibm.com/data/dtd/v11/ibmxhtml1-transitional.dtd") { quirks = true } } diff --git a/vendor/golang.org/x/net/html/foreign.go b/vendor/golang.org/x/net/html/foreign.go index 9da9e9dc42..e8515d8e88 100644 --- a/vendor/golang.org/x/net/html/foreign.go +++ b/vendor/golang.org/x/net/html/foreign.go @@ -40,8 +40,7 @@ func htmlIntegrationPoint(n *Node) bool { if n.Data == "annotation-xml" { for _, a := range n.Attr { if a.Key == "encoding" { - val := strings.ToLower(a.Val) - if val == "text/html" || val == "application/xhtml+xml" { + if strings.EqualFold(a.Val, "text/html") || strings.EqualFold(a.Val, "application/xhtml+xml") { return true } } diff --git a/vendor/golang.org/x/net/html/parse.go b/vendor/golang.org/x/net/html/parse.go index 46a89eda6c..643c674e37 100644 --- a/vendor/golang.org/x/net/html/parse.go +++ b/vendor/golang.org/x/net/html/parse.go @@ -840,6 +840,10 @@ func afterHeadIM(p *parser) bool { p.parseImpliedToken(StartTagToken, a.Body, a.Body.String()) p.framesetOK = true + if p.tok.Type == ErrorToken { + // Stop parsing. + return true + } return false } @@ -1031,7 +1035,7 @@ func inBodyIM(p *parser) bool { if p.tok.DataAtom == a.Input { for _, t := range p.tok.Attr { if t.Key == "type" { - if strings.ToLower(t.Val) == "hidden" { + if strings.EqualFold(t.Val, "hidden") { // Skip setting framesetOK = false return true } @@ -1459,7 +1463,7 @@ func inTableIM(p *parser) bool { return inHeadIM(p) case a.Input: for _, t := range p.tok.Attr { - if t.Key == "type" && strings.ToLower(t.Val) == "hidden" { + if t.Key == "type" && strings.EqualFold(t.Val, "hidden") { p.addElement() p.oe.pop() return true diff --git a/vendor/golang.org/x/net/http2/frame.go b/vendor/golang.org/x/net/http2/frame.go index 105c3b279c..81faec7e75 100644 --- a/vendor/golang.org/x/net/http2/frame.go +++ b/vendor/golang.org/x/net/http2/frame.go @@ -1490,7 +1490,7 @@ func (mh *MetaHeadersFrame) checkPseudos() error { pf := mh.PseudoFields() for i, hf := range pf { switch hf.Name { - case ":method", ":path", ":scheme", ":authority": + case ":method", ":path", ":scheme", ":authority", ":protocol": isRequest = true case ":status": isResponse = true @@ -1498,7 +1498,7 @@ func (mh *MetaHeadersFrame) checkPseudos() error { return pseudoHeaderError(hf.Name) } // Check for duplicates. - // This would be a bad algorithm, but N is 4. + // This would be a bad algorithm, but N is 5. // And this doesn't allocate. for _, hf2 := range pf[:i] { if hf.Name == hf2.Name { diff --git a/vendor/golang.org/x/net/http2/http2.go b/vendor/golang.org/x/net/http2/http2.go index 7688c356b7..c7601c909f 100644 --- a/vendor/golang.org/x/net/http2/http2.go +++ b/vendor/golang.org/x/net/http2/http2.go @@ -34,10 +34,11 @@ import ( ) var ( - VerboseLogs bool - logFrameWrites bool - logFrameReads bool - inTests bool + VerboseLogs bool + logFrameWrites bool + logFrameReads bool + inTests bool + disableExtendedConnectProtocol bool ) func init() { @@ -50,6 +51,9 @@ func init() { logFrameWrites = true logFrameReads = true } + if strings.Contains(e, "http2xconnect=0") { + disableExtendedConnectProtocol = true + } } const ( @@ -141,6 +145,10 @@ func (s Setting) Valid() error { if s.Val < 16384 || s.Val > 1<<24-1 { return ConnectionError(ErrCodeProtocol) } + case SettingEnableConnectProtocol: + if s.Val != 1 && s.Val != 0 { + return ConnectionError(ErrCodeProtocol) + } } return nil } @@ -150,21 +158,23 @@ func (s Setting) Valid() error { type SettingID uint16 const ( - SettingHeaderTableSize SettingID = 0x1 - SettingEnablePush SettingID = 0x2 - SettingMaxConcurrentStreams SettingID = 0x3 - SettingInitialWindowSize SettingID = 0x4 - SettingMaxFrameSize SettingID = 0x5 - SettingMaxHeaderListSize SettingID = 0x6 + SettingHeaderTableSize SettingID = 0x1 + SettingEnablePush SettingID = 0x2 + SettingMaxConcurrentStreams SettingID = 0x3 + SettingInitialWindowSize SettingID = 0x4 + SettingMaxFrameSize SettingID = 0x5 + SettingMaxHeaderListSize SettingID = 0x6 + SettingEnableConnectProtocol SettingID = 0x8 ) var settingName = map[SettingID]string{ - SettingHeaderTableSize: "HEADER_TABLE_SIZE", - SettingEnablePush: "ENABLE_PUSH", - SettingMaxConcurrentStreams: "MAX_CONCURRENT_STREAMS", - SettingInitialWindowSize: "INITIAL_WINDOW_SIZE", - SettingMaxFrameSize: "MAX_FRAME_SIZE", - SettingMaxHeaderListSize: "MAX_HEADER_LIST_SIZE", + SettingHeaderTableSize: "HEADER_TABLE_SIZE", + SettingEnablePush: "ENABLE_PUSH", + SettingMaxConcurrentStreams: "MAX_CONCURRENT_STREAMS", + SettingInitialWindowSize: "INITIAL_WINDOW_SIZE", + SettingMaxFrameSize: "MAX_FRAME_SIZE", + SettingMaxHeaderListSize: "MAX_HEADER_LIST_SIZE", + SettingEnableConnectProtocol: "ENABLE_CONNECT_PROTOCOL", } func (s SettingID) String() string { diff --git a/vendor/golang.org/x/net/http2/server.go b/vendor/golang.org/x/net/http2/server.go index 832414b450..b55547aec6 100644 --- a/vendor/golang.org/x/net/http2/server.go +++ b/vendor/golang.org/x/net/http2/server.go @@ -932,14 +932,18 @@ func (sc *serverConn) serve(conf http2Config) { sc.vlogf("http2: server connection from %v on %p", sc.conn.RemoteAddr(), sc.hs) } + settings := writeSettings{ + {SettingMaxFrameSize, conf.MaxReadFrameSize}, + {SettingMaxConcurrentStreams, sc.advMaxStreams}, + {SettingMaxHeaderListSize, sc.maxHeaderListSize()}, + {SettingHeaderTableSize, conf.MaxDecoderHeaderTableSize}, + {SettingInitialWindowSize, uint32(sc.initialStreamRecvWindowSize)}, + } + if !disableExtendedConnectProtocol { + settings = append(settings, Setting{SettingEnableConnectProtocol, 1}) + } sc.writeFrame(FrameWriteRequest{ - write: writeSettings{ - {SettingMaxFrameSize, conf.MaxReadFrameSize}, - {SettingMaxConcurrentStreams, sc.advMaxStreams}, - {SettingMaxHeaderListSize, sc.maxHeaderListSize()}, - {SettingHeaderTableSize, conf.MaxDecoderHeaderTableSize}, - {SettingInitialWindowSize, uint32(sc.initialStreamRecvWindowSize)}, - }, + write: settings, }) sc.unackedSettings++ @@ -1801,6 +1805,9 @@ func (sc *serverConn) processSetting(s Setting) error { sc.maxFrameSize = int32(s.Val) // the maximum valid s.Val is < 2^31 case SettingMaxHeaderListSize: sc.peerMaxHeaderListSize = s.Val + case SettingEnableConnectProtocol: + // Receipt of this parameter by a server does not + // have any impact default: // Unknown setting: "An endpoint that receives a SETTINGS // frame with any unknown or unsupported identifier MUST @@ -2231,11 +2238,17 @@ func (sc *serverConn) newWriterAndRequest(st *stream, f *MetaHeadersFrame) (*res scheme: f.PseudoValue("scheme"), authority: f.PseudoValue("authority"), path: f.PseudoValue("path"), + protocol: f.PseudoValue("protocol"), + } + + // extended connect is disabled, so we should not see :protocol + if disableExtendedConnectProtocol && rp.protocol != "" { + return nil, nil, sc.countError("bad_connect", streamError(f.StreamID, ErrCodeProtocol)) } isConnect := rp.method == "CONNECT" if isConnect { - if rp.path != "" || rp.scheme != "" || rp.authority == "" { + if rp.protocol == "" && (rp.path != "" || rp.scheme != "" || rp.authority == "") { return nil, nil, sc.countError("bad_connect", streamError(f.StreamID, ErrCodeProtocol)) } } else if rp.method == "" || rp.path == "" || (rp.scheme != "https" && rp.scheme != "http") { @@ -2259,6 +2272,9 @@ func (sc *serverConn) newWriterAndRequest(st *stream, f *MetaHeadersFrame) (*res if rp.authority == "" { rp.authority = rp.header.Get("Host") } + if rp.protocol != "" { + rp.header.Set(":protocol", rp.protocol) + } rw, req, err := sc.newWriterAndRequestNoBody(st, rp) if err != nil { @@ -2285,6 +2301,7 @@ func (sc *serverConn) newWriterAndRequest(st *stream, f *MetaHeadersFrame) (*res type requestParam struct { method string scheme, authority, path string + protocol string header http.Header } @@ -2326,7 +2343,7 @@ func (sc *serverConn) newWriterAndRequestNoBody(st *stream, rp requestParam) (*r var url_ *url.URL var requestURI string - if rp.method == "CONNECT" { + if rp.method == "CONNECT" && rp.protocol == "" { url_ = &url.URL{Host: rp.authority} requestURI = rp.authority // mimic HTTP/1 server behavior } else { diff --git a/vendor/golang.org/x/net/http2/transport.go b/vendor/golang.org/x/net/http2/transport.go index f5968f4407..090d0e1bdb 100644 --- a/vendor/golang.org/x/net/http2/transport.go +++ b/vendor/golang.org/x/net/http2/transport.go @@ -368,25 +368,26 @@ type ClientConn struct { idleTimeout time.Duration // or 0 for never idleTimer timer - mu sync.Mutex // guards following - cond *sync.Cond // hold mu; broadcast on flow/closed changes - flow outflow // our conn-level flow control quota (cs.outflow is per stream) - inflow inflow // peer's conn-level flow control - doNotReuse bool // whether conn is marked to not be reused for any future requests - closing bool - closed bool - seenSettings bool // true if we've seen a settings frame, false otherwise - wantSettingsAck bool // we sent a SETTINGS frame and haven't heard back - goAway *GoAwayFrame // if non-nil, the GoAwayFrame we received - goAwayDebug string // goAway frame's debug data, retained as a string - streams map[uint32]*clientStream // client-initiated - streamsReserved int // incr by ReserveNewRequest; decr on RoundTrip - nextStreamID uint32 - pendingRequests int // requests blocked and waiting to be sent because len(streams) == maxConcurrentStreams - pings map[[8]byte]chan struct{} // in flight ping data to notification channel - br *bufio.Reader - lastActive time.Time - lastIdle time.Time // time last idle + mu sync.Mutex // guards following + cond *sync.Cond // hold mu; broadcast on flow/closed changes + flow outflow // our conn-level flow control quota (cs.outflow is per stream) + inflow inflow // peer's conn-level flow control + doNotReuse bool // whether conn is marked to not be reused for any future requests + closing bool + closed bool + seenSettings bool // true if we've seen a settings frame, false otherwise + seenSettingsChan chan struct{} // closed when seenSettings is true or frame reading fails + wantSettingsAck bool // we sent a SETTINGS frame and haven't heard back + goAway *GoAwayFrame // if non-nil, the GoAwayFrame we received + goAwayDebug string // goAway frame's debug data, retained as a string + streams map[uint32]*clientStream // client-initiated + streamsReserved int // incr by ReserveNewRequest; decr on RoundTrip + nextStreamID uint32 + pendingRequests int // requests blocked and waiting to be sent because len(streams) == maxConcurrentStreams + pings map[[8]byte]chan struct{} // in flight ping data to notification channel + br *bufio.Reader + lastActive time.Time + lastIdle time.Time // time last idle // Settings from peer: (also guarded by wmu) maxFrameSize uint32 maxConcurrentStreams uint32 @@ -396,6 +397,17 @@ type ClientConn struct { initialStreamRecvWindowSize int32 readIdleTimeout time.Duration pingTimeout time.Duration + extendedConnectAllowed bool + + // rstStreamPingsBlocked works around an unfortunate gRPC behavior. + // gRPC strictly limits the number of PING frames that it will receive. + // The default is two pings per two hours, but the limit resets every time + // the gRPC endpoint sends a HEADERS or DATA frame. See golang/go#70575. + // + // rstStreamPingsBlocked is set after receiving a response to a PING frame + // bundled with an RST_STREAM (see pendingResets below), and cleared after + // receiving a HEADERS or DATA frame. + rstStreamPingsBlocked bool // pendingResets is the number of RST_STREAM frames we have sent to the peer, // without confirming that the peer has received them. When we send a RST_STREAM, @@ -819,6 +831,7 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, erro peerMaxHeaderListSize: 0xffffffffffffffff, // "infinite", per spec. Use 2^64-1 instead. streams: make(map[uint32]*clientStream), singleUse: singleUse, + seenSettingsChan: make(chan struct{}), wantSettingsAck: true, readIdleTimeout: conf.SendPingTimeout, pingTimeout: conf.PingTimeout, @@ -1466,6 +1479,8 @@ func (cs *clientStream) doRequest(req *http.Request, streamf func(*clientStream) cs.cleanupWriteRequest(err) } +var errExtendedConnectNotSupported = errors.New("net/http: extended connect not supported by peer") + // writeRequest sends a request. // // It returns nil after the request is written, the response read, @@ -1481,12 +1496,31 @@ func (cs *clientStream) writeRequest(req *http.Request, streamf func(*clientStre return err } + // wait for setting frames to be received, a server can change this value later, + // but we just wait for the first settings frame + var isExtendedConnect bool + if req.Method == "CONNECT" && req.Header.Get(":protocol") != "" { + isExtendedConnect = true + } + // Acquire the new-request lock by writing to reqHeaderMu. // This lock guards the critical section covering allocating a new stream ID // (requires mu) and creating the stream (requires wmu). if cc.reqHeaderMu == nil { panic("RoundTrip on uninitialized ClientConn") // for tests } + if isExtendedConnect { + select { + case <-cs.reqCancel: + return errRequestCanceled + case <-ctx.Done(): + return ctx.Err() + case <-cc.seenSettingsChan: + if !cc.extendedConnectAllowed { + return errExtendedConnectNotSupported + } + } + } select { case cc.reqHeaderMu <- struct{}{}: case <-cs.reqCancel: @@ -1714,10 +1748,14 @@ func (cs *clientStream) cleanupWriteRequest(err error) { ping := false if !closeOnIdle { cc.mu.Lock() - if cc.pendingResets == 0 { - ping = true + // rstStreamPingsBlocked works around a gRPC behavior: + // see comment on the field for details. + if !cc.rstStreamPingsBlocked { + if cc.pendingResets == 0 { + ping = true + } + cc.pendingResets++ } - cc.pendingResets++ cc.mu.Unlock() } cc.writeStreamReset(cs.ID, ErrCodeCancel, ping, err) @@ -2030,7 +2068,7 @@ func (cs *clientStream) awaitFlowControl(maxBytes int) (taken int32, err error) func validateHeaders(hdrs http.Header) string { for k, vv := range hdrs { - if !httpguts.ValidHeaderFieldName(k) { + if !httpguts.ValidHeaderFieldName(k) && k != ":protocol" { return fmt.Sprintf("name %q", k) } for _, v := range vv { @@ -2046,6 +2084,10 @@ func validateHeaders(hdrs http.Header) string { var errNilRequestURL = errors.New("http2: Request.URI is nil") +func isNormalConnect(req *http.Request) bool { + return req.Method == "CONNECT" && req.Header.Get(":protocol") == "" +} + // requires cc.wmu be held. func (cc *ClientConn) encodeHeaders(req *http.Request, addGzipHeader bool, trailers string, contentLength int64) ([]byte, error) { cc.hbuf.Reset() @@ -2066,7 +2108,7 @@ func (cc *ClientConn) encodeHeaders(req *http.Request, addGzipHeader bool, trail } var path string - if req.Method != "CONNECT" { + if !isNormalConnect(req) { path = req.URL.RequestURI() if !validPseudoPath(path) { orig := path @@ -2103,7 +2145,7 @@ func (cc *ClientConn) encodeHeaders(req *http.Request, addGzipHeader bool, trail m = http.MethodGet } f(":method", m) - if req.Method != "CONNECT" { + if !isNormalConnect(req) { f(":path", path) f(":scheme", req.URL.Scheme) } @@ -2461,7 +2503,7 @@ func (rl *clientConnReadLoop) run() error { cc.vlogf("http2: Transport readFrame error on conn %p: (%T) %v", cc, err, err) } if se, ok := err.(StreamError); ok { - if cs := rl.streamByID(se.StreamID); cs != nil { + if cs := rl.streamByID(se.StreamID, notHeaderOrDataFrame); cs != nil { if se.Cause == nil { se.Cause = cc.fr.errDetail } @@ -2507,13 +2549,16 @@ func (rl *clientConnReadLoop) run() error { if VerboseLogs { cc.vlogf("http2: Transport conn %p received error from processing frame %v: %v", cc, summarizeFrame(f), err) } + if !cc.seenSettings { + close(cc.seenSettingsChan) + } return err } } } func (rl *clientConnReadLoop) processHeaders(f *MetaHeadersFrame) error { - cs := rl.streamByID(f.StreamID) + cs := rl.streamByID(f.StreamID, headerOrDataFrame) if cs == nil { // We'd get here if we canceled a request while the // server had its response still in flight. So if this @@ -2842,7 +2887,7 @@ func (b transportResponseBody) Close() error { func (rl *clientConnReadLoop) processData(f *DataFrame) error { cc := rl.cc - cs := rl.streamByID(f.StreamID) + cs := rl.streamByID(f.StreamID, headerOrDataFrame) data := f.Data() if cs == nil { cc.mu.Lock() @@ -2977,9 +3022,22 @@ func (rl *clientConnReadLoop) endStreamError(cs *clientStream, err error) { cs.abortStream(err) } -func (rl *clientConnReadLoop) streamByID(id uint32) *clientStream { +// Constants passed to streamByID for documentation purposes. +const ( + headerOrDataFrame = true + notHeaderOrDataFrame = false +) + +// streamByID returns the stream with the given id, or nil if no stream has that id. +// If headerOrData is true, it clears rst.StreamPingsBlocked. +func (rl *clientConnReadLoop) streamByID(id uint32, headerOrData bool) *clientStream { rl.cc.mu.Lock() defer rl.cc.mu.Unlock() + if headerOrData { + // Work around an unfortunate gRPC behavior. + // See comment on ClientConn.rstStreamPingsBlocked for details. + rl.cc.rstStreamPingsBlocked = false + } cs := rl.cc.streams[id] if cs != nil && !cs.readAborted { return cs @@ -3073,6 +3131,21 @@ func (rl *clientConnReadLoop) processSettingsNoWrite(f *SettingsFrame) error { case SettingHeaderTableSize: cc.henc.SetMaxDynamicTableSize(s.Val) cc.peerMaxHeaderTableSize = s.Val + case SettingEnableConnectProtocol: + if err := s.Valid(); err != nil { + return err + } + // If the peer wants to send us SETTINGS_ENABLE_CONNECT_PROTOCOL, + // we require that it do so in the first SETTINGS frame. + // + // When we attempt to use extended CONNECT, we wait for the first + // SETTINGS frame to see if the server supports it. If we let the + // server enable the feature with a later SETTINGS frame, then + // users will see inconsistent results depending on whether we've + // seen that frame or not. + if !cc.seenSettings { + cc.extendedConnectAllowed = s.Val == 1 + } default: cc.vlogf("Unhandled Setting: %v", s) } @@ -3090,6 +3163,7 @@ func (rl *clientConnReadLoop) processSettingsNoWrite(f *SettingsFrame) error { // connection can establish to our default. cc.maxConcurrentStreams = defaultMaxConcurrentStreams } + close(cc.seenSettingsChan) cc.seenSettings = true } @@ -3098,7 +3172,7 @@ func (rl *clientConnReadLoop) processSettingsNoWrite(f *SettingsFrame) error { func (rl *clientConnReadLoop) processWindowUpdate(f *WindowUpdateFrame) error { cc := rl.cc - cs := rl.streamByID(f.StreamID) + cs := rl.streamByID(f.StreamID, notHeaderOrDataFrame) if f.StreamID != 0 && cs == nil { return nil } @@ -3127,7 +3201,7 @@ func (rl *clientConnReadLoop) processWindowUpdate(f *WindowUpdateFrame) error { } func (rl *clientConnReadLoop) processResetStream(f *RSTStreamFrame) error { - cs := rl.streamByID(f.StreamID) + cs := rl.streamByID(f.StreamID, notHeaderOrDataFrame) if cs == nil { // TODO: return error if server tries to RST_STREAM an idle stream return nil @@ -3205,6 +3279,7 @@ func (rl *clientConnReadLoop) processPing(f *PingFrame) error { if cc.pendingResets > 0 { // See clientStream.cleanupWriteRequest. cc.pendingResets = 0 + cc.rstStreamPingsBlocked = true cc.cond.Broadcast() } return nil diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux.go b/vendor/golang.org/x/sys/unix/zerrors_linux.go index ccba391c9f..6ebc48b3fe 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux.go @@ -321,6 +321,9 @@ const ( AUDIT_INTEGRITY_STATUS = 0x70a AUDIT_IPC = 0x517 AUDIT_IPC_SET_PERM = 0x51f + AUDIT_IPE_ACCESS = 0x58c + AUDIT_IPE_CONFIG_CHANGE = 0x58d + AUDIT_IPE_POLICY_LOAD = 0x58e AUDIT_KERNEL = 0x7d0 AUDIT_KERNEL_OTHER = 0x524 AUDIT_KERN_MODULE = 0x532 @@ -489,6 +492,7 @@ const ( BPF_F_ID = 0x20 BPF_F_NETFILTER_IP_DEFRAG = 0x1 BPF_F_QUERY_EFFECTIVE = 0x1 + BPF_F_REDIRECT_FLAGS = 0x19 BPF_F_REPLACE = 0x4 BPF_F_SLEEPABLE = 0x10 BPF_F_STRICT_ALIGNMENT = 0x1 @@ -1166,6 +1170,7 @@ const ( EXTA = 0xe EXTB = 0xf F2FS_SUPER_MAGIC = 0xf2f52010 + FALLOC_FL_ALLOCATE_RANGE = 0x0 FALLOC_FL_COLLAPSE_RANGE = 0x8 FALLOC_FL_INSERT_RANGE = 0x20 FALLOC_FL_KEEP_SIZE = 0x1 @@ -1799,6 +1804,8 @@ const ( LANDLOCK_ACCESS_NET_BIND_TCP = 0x1 LANDLOCK_ACCESS_NET_CONNECT_TCP = 0x2 LANDLOCK_CREATE_RULESET_VERSION = 0x1 + LANDLOCK_SCOPE_ABSTRACT_UNIX_SOCKET = 0x1 + LANDLOCK_SCOPE_SIGNAL = 0x2 LINUX_REBOOT_CMD_CAD_OFF = 0x0 LINUX_REBOOT_CMD_CAD_ON = 0x89abcdef LINUX_REBOOT_CMD_HALT = 0xcdef0123 @@ -1924,6 +1931,7 @@ const ( MNT_FORCE = 0x1 MNT_ID_REQ_SIZE_VER0 = 0x18 MNT_ID_REQ_SIZE_VER1 = 0x20 + MNT_NS_INFO_SIZE_VER0 = 0x10 MODULE_INIT_COMPRESSED_FILE = 0x4 MODULE_INIT_IGNORE_MODVERSIONS = 0x1 MODULE_INIT_IGNORE_VERMAGIC = 0x2 @@ -2970,6 +2978,7 @@ const ( RWF_WRITE_LIFE_NOT_SET = 0x0 SCHED_BATCH = 0x3 SCHED_DEADLINE = 0x6 + SCHED_EXT = 0x7 SCHED_FIFO = 0x1 SCHED_FLAG_ALL = 0x7f SCHED_FLAG_DL_OVERRUN = 0x4 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go index 0c00cb3f3a..c0d45e3205 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go @@ -109,6 +109,7 @@ const ( HIDIOCGRAWINFO = 0x80084803 HIDIOCGRDESC = 0x90044802 HIDIOCGRDESCSIZE = 0x80044801 + HIDIOCREVOKE = 0x4004480d HUPCL = 0x400 ICANON = 0x2 IEXTEN = 0x8000 @@ -297,6 +298,8 @@ const ( RTC_WIE_ON = 0x700f RTC_WKALM_RD = 0x80287010 RTC_WKALM_SET = 0x4028700f + SCM_DEVMEM_DMABUF = 0x4f + SCM_DEVMEM_LINEAR = 0x4e SCM_TIMESTAMPING = 0x25 SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a @@ -335,6 +338,9 @@ const ( SO_CNX_ADVICE = 0x35 SO_COOKIE = 0x39 SO_DETACH_REUSEPORT_BPF = 0x44 + SO_DEVMEM_DMABUF = 0x4f + SO_DEVMEM_DONTNEED = 0x50 + SO_DEVMEM_LINEAR = 0x4e SO_DOMAIN = 0x27 SO_DONTROUTE = 0x5 SO_ERROR = 0x4 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go index dfb364554d..c731d24f02 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go @@ -109,6 +109,7 @@ const ( HIDIOCGRAWINFO = 0x80084803 HIDIOCGRDESC = 0x90044802 HIDIOCGRDESCSIZE = 0x80044801 + HIDIOCREVOKE = 0x4004480d HUPCL = 0x400 ICANON = 0x2 IEXTEN = 0x8000 @@ -298,6 +299,8 @@ const ( RTC_WIE_ON = 0x700f RTC_WKALM_RD = 0x80287010 RTC_WKALM_SET = 0x4028700f + SCM_DEVMEM_DMABUF = 0x4f + SCM_DEVMEM_LINEAR = 0x4e SCM_TIMESTAMPING = 0x25 SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a @@ -336,6 +339,9 @@ const ( SO_CNX_ADVICE = 0x35 SO_COOKIE = 0x39 SO_DETACH_REUSEPORT_BPF = 0x44 + SO_DEVMEM_DMABUF = 0x4f + SO_DEVMEM_DONTNEED = 0x50 + SO_DEVMEM_LINEAR = 0x4e SO_DOMAIN = 0x27 SO_DONTROUTE = 0x5 SO_ERROR = 0x4 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go index d46dcf78ab..680018a4a7 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go @@ -108,6 +108,7 @@ const ( HIDIOCGRAWINFO = 0x80084803 HIDIOCGRDESC = 0x90044802 HIDIOCGRDESCSIZE = 0x80044801 + HIDIOCREVOKE = 0x4004480d HUPCL = 0x400 ICANON = 0x2 IEXTEN = 0x8000 @@ -303,6 +304,8 @@ const ( RTC_WIE_ON = 0x700f RTC_WKALM_RD = 0x80287010 RTC_WKALM_SET = 0x4028700f + SCM_DEVMEM_DMABUF = 0x4f + SCM_DEVMEM_LINEAR = 0x4e SCM_TIMESTAMPING = 0x25 SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a @@ -341,6 +344,9 @@ const ( SO_CNX_ADVICE = 0x35 SO_COOKIE = 0x39 SO_DETACH_REUSEPORT_BPF = 0x44 + SO_DEVMEM_DMABUF = 0x4f + SO_DEVMEM_DONTNEED = 0x50 + SO_DEVMEM_LINEAR = 0x4e SO_DOMAIN = 0x27 SO_DONTROUTE = 0x5 SO_ERROR = 0x4 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go index 3af3248a7f..a63909f308 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go @@ -112,6 +112,7 @@ const ( HIDIOCGRAWINFO = 0x80084803 HIDIOCGRDESC = 0x90044802 HIDIOCGRDESCSIZE = 0x80044801 + HIDIOCREVOKE = 0x4004480d HUPCL = 0x400 ICANON = 0x2 IEXTEN = 0x8000 @@ -205,6 +206,7 @@ const ( PERF_EVENT_IOC_SET_BPF = 0x40042408 PERF_EVENT_IOC_SET_FILTER = 0x40082406 PERF_EVENT_IOC_SET_OUTPUT = 0x2405 + POE_MAGIC = 0x504f4530 PPPIOCATTACH = 0x4004743d PPPIOCATTCHAN = 0x40047438 PPPIOCBRIDGECHAN = 0x40047435 @@ -294,6 +296,8 @@ const ( RTC_WIE_ON = 0x700f RTC_WKALM_RD = 0x80287010 RTC_WKALM_SET = 0x4028700f + SCM_DEVMEM_DMABUF = 0x4f + SCM_DEVMEM_LINEAR = 0x4e SCM_TIMESTAMPING = 0x25 SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a @@ -332,6 +336,9 @@ const ( SO_CNX_ADVICE = 0x35 SO_COOKIE = 0x39 SO_DETACH_REUSEPORT_BPF = 0x44 + SO_DEVMEM_DMABUF = 0x4f + SO_DEVMEM_DONTNEED = 0x50 + SO_DEVMEM_LINEAR = 0x4e SO_DOMAIN = 0x27 SO_DONTROUTE = 0x5 SO_ERROR = 0x4 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go index 292bcf0283..9b0a2573fe 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go @@ -109,6 +109,7 @@ const ( HIDIOCGRAWINFO = 0x80084803 HIDIOCGRDESC = 0x90044802 HIDIOCGRDESCSIZE = 0x80044801 + HIDIOCREVOKE = 0x4004480d HUPCL = 0x400 ICANON = 0x2 IEXTEN = 0x8000 @@ -290,6 +291,8 @@ const ( RTC_WIE_ON = 0x700f RTC_WKALM_RD = 0x80287010 RTC_WKALM_SET = 0x4028700f + SCM_DEVMEM_DMABUF = 0x4f + SCM_DEVMEM_LINEAR = 0x4e SCM_TIMESTAMPING = 0x25 SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a @@ -328,6 +331,9 @@ const ( SO_CNX_ADVICE = 0x35 SO_COOKIE = 0x39 SO_DETACH_REUSEPORT_BPF = 0x44 + SO_DEVMEM_DMABUF = 0x4f + SO_DEVMEM_DONTNEED = 0x50 + SO_DEVMEM_LINEAR = 0x4e SO_DOMAIN = 0x27 SO_DONTROUTE = 0x5 SO_ERROR = 0x4 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go index 782b7110fa..958e6e0645 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go @@ -108,6 +108,7 @@ const ( HIDIOCGRAWINFO = 0x40084803 HIDIOCGRDESC = 0x50044802 HIDIOCGRDESCSIZE = 0x40044801 + HIDIOCREVOKE = 0x8004480d HUPCL = 0x400 ICANON = 0x2 IEXTEN = 0x100 @@ -296,6 +297,8 @@ const ( RTC_WIE_ON = 0x2000700f RTC_WKALM_RD = 0x40287010 RTC_WKALM_SET = 0x8028700f + SCM_DEVMEM_DMABUF = 0x4f + SCM_DEVMEM_LINEAR = 0x4e SCM_TIMESTAMPING = 0x25 SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a @@ -334,6 +337,9 @@ const ( SO_CNX_ADVICE = 0x35 SO_COOKIE = 0x39 SO_DETACH_REUSEPORT_BPF = 0x44 + SO_DEVMEM_DMABUF = 0x4f + SO_DEVMEM_DONTNEED = 0x50 + SO_DEVMEM_LINEAR = 0x4e SO_DOMAIN = 0x1029 SO_DONTROUTE = 0x10 SO_ERROR = 0x1007 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go index 84973fd927..50c7f25bd1 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go @@ -108,6 +108,7 @@ const ( HIDIOCGRAWINFO = 0x40084803 HIDIOCGRDESC = 0x50044802 HIDIOCGRDESCSIZE = 0x40044801 + HIDIOCREVOKE = 0x8004480d HUPCL = 0x400 ICANON = 0x2 IEXTEN = 0x100 @@ -296,6 +297,8 @@ const ( RTC_WIE_ON = 0x2000700f RTC_WKALM_RD = 0x40287010 RTC_WKALM_SET = 0x8028700f + SCM_DEVMEM_DMABUF = 0x4f + SCM_DEVMEM_LINEAR = 0x4e SCM_TIMESTAMPING = 0x25 SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a @@ -334,6 +337,9 @@ const ( SO_CNX_ADVICE = 0x35 SO_COOKIE = 0x39 SO_DETACH_REUSEPORT_BPF = 0x44 + SO_DEVMEM_DMABUF = 0x4f + SO_DEVMEM_DONTNEED = 0x50 + SO_DEVMEM_LINEAR = 0x4e SO_DOMAIN = 0x1029 SO_DONTROUTE = 0x10 SO_ERROR = 0x1007 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go index 6d9cbc3b27..ced21d66d9 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go @@ -108,6 +108,7 @@ const ( HIDIOCGRAWINFO = 0x40084803 HIDIOCGRDESC = 0x50044802 HIDIOCGRDESCSIZE = 0x40044801 + HIDIOCREVOKE = 0x8004480d HUPCL = 0x400 ICANON = 0x2 IEXTEN = 0x100 @@ -296,6 +297,8 @@ const ( RTC_WIE_ON = 0x2000700f RTC_WKALM_RD = 0x40287010 RTC_WKALM_SET = 0x8028700f + SCM_DEVMEM_DMABUF = 0x4f + SCM_DEVMEM_LINEAR = 0x4e SCM_TIMESTAMPING = 0x25 SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a @@ -334,6 +337,9 @@ const ( SO_CNX_ADVICE = 0x35 SO_COOKIE = 0x39 SO_DETACH_REUSEPORT_BPF = 0x44 + SO_DEVMEM_DMABUF = 0x4f + SO_DEVMEM_DONTNEED = 0x50 + SO_DEVMEM_LINEAR = 0x4e SO_DOMAIN = 0x1029 SO_DONTROUTE = 0x10 SO_ERROR = 0x1007 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go index 5f9fedbce0..226c044190 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go @@ -108,6 +108,7 @@ const ( HIDIOCGRAWINFO = 0x40084803 HIDIOCGRDESC = 0x50044802 HIDIOCGRDESCSIZE = 0x40044801 + HIDIOCREVOKE = 0x8004480d HUPCL = 0x400 ICANON = 0x2 IEXTEN = 0x100 @@ -296,6 +297,8 @@ const ( RTC_WIE_ON = 0x2000700f RTC_WKALM_RD = 0x40287010 RTC_WKALM_SET = 0x8028700f + SCM_DEVMEM_DMABUF = 0x4f + SCM_DEVMEM_LINEAR = 0x4e SCM_TIMESTAMPING = 0x25 SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a @@ -334,6 +337,9 @@ const ( SO_CNX_ADVICE = 0x35 SO_COOKIE = 0x39 SO_DETACH_REUSEPORT_BPF = 0x44 + SO_DEVMEM_DMABUF = 0x4f + SO_DEVMEM_DONTNEED = 0x50 + SO_DEVMEM_LINEAR = 0x4e SO_DOMAIN = 0x1029 SO_DONTROUTE = 0x10 SO_ERROR = 0x1007 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go index bb0026ee0c..3122737cd4 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go @@ -108,6 +108,7 @@ const ( HIDIOCGRAWINFO = 0x40084803 HIDIOCGRDESC = 0x50044802 HIDIOCGRDESCSIZE = 0x40044801 + HIDIOCREVOKE = 0x8004480d HUPCL = 0x4000 ICANON = 0x100 IEXTEN = 0x400 @@ -351,6 +352,8 @@ const ( RTC_WIE_ON = 0x2000700f RTC_WKALM_RD = 0x40287010 RTC_WKALM_SET = 0x8028700f + SCM_DEVMEM_DMABUF = 0x4f + SCM_DEVMEM_LINEAR = 0x4e SCM_TIMESTAMPING = 0x25 SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a @@ -389,6 +392,9 @@ const ( SO_CNX_ADVICE = 0x35 SO_COOKIE = 0x39 SO_DETACH_REUSEPORT_BPF = 0x44 + SO_DEVMEM_DMABUF = 0x4f + SO_DEVMEM_DONTNEED = 0x50 + SO_DEVMEM_LINEAR = 0x4e SO_DOMAIN = 0x27 SO_DONTROUTE = 0x5 SO_ERROR = 0x4 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go index 46120db5c9..eb5d3467ed 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go @@ -108,6 +108,7 @@ const ( HIDIOCGRAWINFO = 0x40084803 HIDIOCGRDESC = 0x50044802 HIDIOCGRDESCSIZE = 0x40044801 + HIDIOCREVOKE = 0x8004480d HUPCL = 0x4000 ICANON = 0x100 IEXTEN = 0x400 @@ -355,6 +356,8 @@ const ( RTC_WIE_ON = 0x2000700f RTC_WKALM_RD = 0x40287010 RTC_WKALM_SET = 0x8028700f + SCM_DEVMEM_DMABUF = 0x4f + SCM_DEVMEM_LINEAR = 0x4e SCM_TIMESTAMPING = 0x25 SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a @@ -393,6 +396,9 @@ const ( SO_CNX_ADVICE = 0x35 SO_COOKIE = 0x39 SO_DETACH_REUSEPORT_BPF = 0x44 + SO_DEVMEM_DMABUF = 0x4f + SO_DEVMEM_DONTNEED = 0x50 + SO_DEVMEM_LINEAR = 0x4e SO_DOMAIN = 0x27 SO_DONTROUTE = 0x5 SO_ERROR = 0x4 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go index 5c951634fb..e921ebc60b 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go @@ -108,6 +108,7 @@ const ( HIDIOCGRAWINFO = 0x40084803 HIDIOCGRDESC = 0x50044802 HIDIOCGRDESCSIZE = 0x40044801 + HIDIOCREVOKE = 0x8004480d HUPCL = 0x4000 ICANON = 0x100 IEXTEN = 0x400 @@ -355,6 +356,8 @@ const ( RTC_WIE_ON = 0x2000700f RTC_WKALM_RD = 0x40287010 RTC_WKALM_SET = 0x8028700f + SCM_DEVMEM_DMABUF = 0x4f + SCM_DEVMEM_LINEAR = 0x4e SCM_TIMESTAMPING = 0x25 SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a @@ -393,6 +396,9 @@ const ( SO_CNX_ADVICE = 0x35 SO_COOKIE = 0x39 SO_DETACH_REUSEPORT_BPF = 0x44 + SO_DEVMEM_DMABUF = 0x4f + SO_DEVMEM_DONTNEED = 0x50 + SO_DEVMEM_LINEAR = 0x4e SO_DOMAIN = 0x27 SO_DONTROUTE = 0x5 SO_ERROR = 0x4 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go index 11a84d5af2..38ba81c55c 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go @@ -108,6 +108,7 @@ const ( HIDIOCGRAWINFO = 0x80084803 HIDIOCGRDESC = 0x90044802 HIDIOCGRDESCSIZE = 0x80044801 + HIDIOCREVOKE = 0x4004480d HUPCL = 0x400 ICANON = 0x2 IEXTEN = 0x8000 @@ -287,6 +288,8 @@ const ( RTC_WIE_ON = 0x700f RTC_WKALM_RD = 0x80287010 RTC_WKALM_SET = 0x4028700f + SCM_DEVMEM_DMABUF = 0x4f + SCM_DEVMEM_LINEAR = 0x4e SCM_TIMESTAMPING = 0x25 SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a @@ -325,6 +328,9 @@ const ( SO_CNX_ADVICE = 0x35 SO_COOKIE = 0x39 SO_DETACH_REUSEPORT_BPF = 0x44 + SO_DEVMEM_DMABUF = 0x4f + SO_DEVMEM_DONTNEED = 0x50 + SO_DEVMEM_LINEAR = 0x4e SO_DOMAIN = 0x27 SO_DONTROUTE = 0x5 SO_ERROR = 0x4 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go index f78c4617ca..71f0400977 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go @@ -108,6 +108,7 @@ const ( HIDIOCGRAWINFO = 0x80084803 HIDIOCGRDESC = 0x90044802 HIDIOCGRDESCSIZE = 0x80044801 + HIDIOCREVOKE = 0x4004480d HUPCL = 0x400 ICANON = 0x2 IEXTEN = 0x8000 @@ -359,6 +360,8 @@ const ( RTC_WIE_ON = 0x700f RTC_WKALM_RD = 0x80287010 RTC_WKALM_SET = 0x4028700f + SCM_DEVMEM_DMABUF = 0x4f + SCM_DEVMEM_LINEAR = 0x4e SCM_TIMESTAMPING = 0x25 SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a @@ -397,6 +400,9 @@ const ( SO_CNX_ADVICE = 0x35 SO_COOKIE = 0x39 SO_DETACH_REUSEPORT_BPF = 0x44 + SO_DEVMEM_DMABUF = 0x4f + SO_DEVMEM_DONTNEED = 0x50 + SO_DEVMEM_LINEAR = 0x4e SO_DOMAIN = 0x27 SO_DONTROUTE = 0x5 SO_ERROR = 0x4 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go index aeb777c344..c44a313322 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go @@ -112,6 +112,7 @@ const ( HIDIOCGRAWINFO = 0x40084803 HIDIOCGRDESC = 0x50044802 HIDIOCGRDESCSIZE = 0x40044801 + HIDIOCREVOKE = 0x8004480d HUPCL = 0x400 ICANON = 0x2 IEXTEN = 0x8000 @@ -350,6 +351,8 @@ const ( RTC_WIE_ON = 0x2000700f RTC_WKALM_RD = 0x40287010 RTC_WKALM_SET = 0x8028700f + SCM_DEVMEM_DMABUF = 0x58 + SCM_DEVMEM_LINEAR = 0x57 SCM_TIMESTAMPING = 0x23 SCM_TIMESTAMPING_OPT_STATS = 0x38 SCM_TIMESTAMPING_PKTINFO = 0x3c @@ -436,6 +439,9 @@ const ( SO_CNX_ADVICE = 0x37 SO_COOKIE = 0x3b SO_DETACH_REUSEPORT_BPF = 0x47 + SO_DEVMEM_DMABUF = 0x58 + SO_DEVMEM_DONTNEED = 0x59 + SO_DEVMEM_LINEAR = 0x57 SO_DOMAIN = 0x1029 SO_DONTROUTE = 0x10 SO_ERROR = 0x1007 diff --git a/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go index d003c3d437..17c53bd9b3 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go @@ -462,11 +462,14 @@ type FdSet struct { const ( SizeofIfMsghdr = 0x70 + SizeofIfMsghdr2 = 0xa0 SizeofIfData = 0x60 + SizeofIfData64 = 0x80 SizeofIfaMsghdr = 0x14 SizeofIfmaMsghdr = 0x10 SizeofIfmaMsghdr2 = 0x14 SizeofRtMsghdr = 0x5c + SizeofRtMsghdr2 = 0x5c SizeofRtMetrics = 0x38 ) @@ -480,6 +483,20 @@ type IfMsghdr struct { Data IfData } +type IfMsghdr2 struct { + Msglen uint16 + Version uint8 + Type uint8 + Addrs int32 + Flags int32 + Index uint16 + Snd_len int32 + Snd_maxlen int32 + Snd_drops int32 + Timer int32 + Data IfData64 +} + type IfData struct { Type uint8 Typelen uint8 @@ -512,6 +529,34 @@ type IfData struct { Reserved2 uint32 } +type IfData64 struct { + Type uint8 + Typelen uint8 + Physical uint8 + Addrlen uint8 + Hdrlen uint8 + Recvquota uint8 + Xmitquota uint8 + Unused1 uint8 + Mtu uint32 + Metric uint32 + Baudrate uint64 + Ipackets uint64 + Ierrors uint64 + Opackets uint64 + Oerrors uint64 + Collisions uint64 + Ibytes uint64 + Obytes uint64 + Imcasts uint64 + Omcasts uint64 + Iqdrops uint64 + Noproto uint64 + Recvtiming uint32 + Xmittiming uint32 + Lastchange Timeval32 +} + type IfaMsghdr struct { Msglen uint16 Version uint8 @@ -557,6 +602,21 @@ type RtMsghdr struct { Rmx RtMetrics } +type RtMsghdr2 struct { + Msglen uint16 + Version uint8 + Type uint8 + Index uint16 + Flags int32 + Addrs int32 + Refcnt int32 + Parentflags int32 + Reserved int32 + Use int32 + Inits uint32 + Rmx RtMetrics +} + type RtMetrics struct { Locks uint32 Mtu uint32 diff --git a/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go b/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go index 0d45a941aa..2392226a74 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go @@ -462,11 +462,14 @@ type FdSet struct { const ( SizeofIfMsghdr = 0x70 + SizeofIfMsghdr2 = 0xa0 SizeofIfData = 0x60 + SizeofIfData64 = 0x80 SizeofIfaMsghdr = 0x14 SizeofIfmaMsghdr = 0x10 SizeofIfmaMsghdr2 = 0x14 SizeofRtMsghdr = 0x5c + SizeofRtMsghdr2 = 0x5c SizeofRtMetrics = 0x38 ) @@ -480,6 +483,20 @@ type IfMsghdr struct { Data IfData } +type IfMsghdr2 struct { + Msglen uint16 + Version uint8 + Type uint8 + Addrs int32 + Flags int32 + Index uint16 + Snd_len int32 + Snd_maxlen int32 + Snd_drops int32 + Timer int32 + Data IfData64 +} + type IfData struct { Type uint8 Typelen uint8 @@ -512,6 +529,34 @@ type IfData struct { Reserved2 uint32 } +type IfData64 struct { + Type uint8 + Typelen uint8 + Physical uint8 + Addrlen uint8 + Hdrlen uint8 + Recvquota uint8 + Xmitquota uint8 + Unused1 uint8 + Mtu uint32 + Metric uint32 + Baudrate uint64 + Ipackets uint64 + Ierrors uint64 + Opackets uint64 + Oerrors uint64 + Collisions uint64 + Ibytes uint64 + Obytes uint64 + Imcasts uint64 + Omcasts uint64 + Iqdrops uint64 + Noproto uint64 + Recvtiming uint32 + Xmittiming uint32 + Lastchange Timeval32 +} + type IfaMsghdr struct { Msglen uint16 Version uint8 @@ -557,6 +602,21 @@ type RtMsghdr struct { Rmx RtMetrics } +type RtMsghdr2 struct { + Msglen uint16 + Version uint8 + Type uint8 + Index uint16 + Flags int32 + Addrs int32 + Refcnt int32 + Parentflags int32 + Reserved int32 + Use int32 + Inits uint32 + Rmx RtMetrics +} + type RtMetrics struct { Locks uint32 Mtu uint32 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux.go b/vendor/golang.org/x/sys/unix/ztypes_linux.go index 8daaf3faf4..5537148dcb 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux.go @@ -2594,8 +2594,8 @@ const ( SOF_TIMESTAMPING_BIND_PHC = 0x8000 SOF_TIMESTAMPING_OPT_ID_TCP = 0x10000 - SOF_TIMESTAMPING_LAST = 0x10000 - SOF_TIMESTAMPING_MASK = 0x1ffff + SOF_TIMESTAMPING_LAST = 0x20000 + SOF_TIMESTAMPING_MASK = 0x3ffff SCM_TSTAMP_SND = 0x0 SCM_TSTAMP_SCHED = 0x1 @@ -3541,7 +3541,7 @@ type Nhmsg struct { type NexthopGrp struct { Id uint32 Weight uint8 - Resvd1 uint8 + High uint8 Resvd2 uint16 } @@ -3802,7 +3802,7 @@ const ( ETHTOOL_MSG_PSE_GET = 0x24 ETHTOOL_MSG_PSE_SET = 0x25 ETHTOOL_MSG_RSS_GET = 0x26 - ETHTOOL_MSG_USER_MAX = 0x2c + ETHTOOL_MSG_USER_MAX = 0x2d ETHTOOL_MSG_KERNEL_NONE = 0x0 ETHTOOL_MSG_STRSET_GET_REPLY = 0x1 ETHTOOL_MSG_LINKINFO_GET_REPLY = 0x2 @@ -3842,7 +3842,7 @@ const ( ETHTOOL_MSG_MODULE_NTF = 0x24 ETHTOOL_MSG_PSE_GET_REPLY = 0x25 ETHTOOL_MSG_RSS_GET_REPLY = 0x26 - ETHTOOL_MSG_KERNEL_MAX = 0x2c + ETHTOOL_MSG_KERNEL_MAX = 0x2e ETHTOOL_FLAG_COMPACT_BITSETS = 0x1 ETHTOOL_FLAG_OMIT_REPLY = 0x2 ETHTOOL_FLAG_STATS = 0x4 @@ -3850,7 +3850,7 @@ const ( ETHTOOL_A_HEADER_DEV_INDEX = 0x1 ETHTOOL_A_HEADER_DEV_NAME = 0x2 ETHTOOL_A_HEADER_FLAGS = 0x3 - ETHTOOL_A_HEADER_MAX = 0x3 + ETHTOOL_A_HEADER_MAX = 0x4 ETHTOOL_A_BITSET_BIT_UNSPEC = 0x0 ETHTOOL_A_BITSET_BIT_INDEX = 0x1 ETHTOOL_A_BITSET_BIT_NAME = 0x2 @@ -4031,11 +4031,11 @@ const ( ETHTOOL_A_CABLE_RESULT_UNSPEC = 0x0 ETHTOOL_A_CABLE_RESULT_PAIR = 0x1 ETHTOOL_A_CABLE_RESULT_CODE = 0x2 - ETHTOOL_A_CABLE_RESULT_MAX = 0x2 + ETHTOOL_A_CABLE_RESULT_MAX = 0x3 ETHTOOL_A_CABLE_FAULT_LENGTH_UNSPEC = 0x0 ETHTOOL_A_CABLE_FAULT_LENGTH_PAIR = 0x1 ETHTOOL_A_CABLE_FAULT_LENGTH_CM = 0x2 - ETHTOOL_A_CABLE_FAULT_LENGTH_MAX = 0x2 + ETHTOOL_A_CABLE_FAULT_LENGTH_MAX = 0x3 ETHTOOL_A_CABLE_TEST_NTF_STATUS_UNSPEC = 0x0 ETHTOOL_A_CABLE_TEST_NTF_STATUS_STARTED = 0x1 ETHTOOL_A_CABLE_TEST_NTF_STATUS_COMPLETED = 0x2 @@ -4200,7 +4200,8 @@ type ( } PtpSysOffsetExtended struct { Samples uint32 - Rsv [3]uint32 + Clockid int32 + Rsv [2]uint32 Ts [25][3]PtpClockTime } PtpSysOffsetPrecise struct { @@ -4399,6 +4400,7 @@ const ( type LandlockRulesetAttr struct { Access_fs uint64 Access_net uint64 + Scoped uint64 } type LandlockPathBeneathAttr struct { diff --git a/vendor/golang.org/x/sys/windows/syscall_windows.go b/vendor/golang.org/x/sys/windows/syscall_windows.go index 4510bfc3f5..4a32543868 100644 --- a/vendor/golang.org/x/sys/windows/syscall_windows.go +++ b/vendor/golang.org/x/sys/windows/syscall_windows.go @@ -168,6 +168,8 @@ func NewCallbackCDecl(fn interface{}) uintptr { //sys CreateNamedPipe(name *uint16, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *SecurityAttributes) (handle Handle, err error) [failretval==InvalidHandle] = CreateNamedPipeW //sys ConnectNamedPipe(pipe Handle, overlapped *Overlapped) (err error) //sys DisconnectNamedPipe(pipe Handle) (err error) +//sys GetNamedPipeClientProcessId(pipe Handle, clientProcessID *uint32) (err error) +//sys GetNamedPipeServerProcessId(pipe Handle, serverProcessID *uint32) (err error) //sys GetNamedPipeInfo(pipe Handle, flags *uint32, outSize *uint32, inSize *uint32, maxInstances *uint32) (err error) //sys GetNamedPipeHandleState(pipe Handle, state *uint32, curInstances *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32, userName *uint16, maxUserNameSize uint32) (err error) = GetNamedPipeHandleStateW //sys SetNamedPipeHandleState(pipe Handle, state *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32) (err error) = SetNamedPipeHandleState diff --git a/vendor/golang.org/x/sys/windows/types_windows.go b/vendor/golang.org/x/sys/windows/types_windows.go index 51311e205f..9d138de5fe 100644 --- a/vendor/golang.org/x/sys/windows/types_windows.go +++ b/vendor/golang.org/x/sys/windows/types_windows.go @@ -176,6 +176,7 @@ const ( WAIT_FAILED = 0xFFFFFFFF // Access rights for process. + PROCESS_ALL_ACCESS = 0xFFFF PROCESS_CREATE_PROCESS = 0x0080 PROCESS_CREATE_THREAD = 0x0002 PROCESS_DUP_HANDLE = 0x0040 diff --git a/vendor/golang.org/x/sys/windows/zsyscall_windows.go b/vendor/golang.org/x/sys/windows/zsyscall_windows.go index 6f5252880c..01c0716c2c 100644 --- a/vendor/golang.org/x/sys/windows/zsyscall_windows.go +++ b/vendor/golang.org/x/sys/windows/zsyscall_windows.go @@ -280,8 +280,10 @@ var ( procGetMaximumProcessorCount = modkernel32.NewProc("GetMaximumProcessorCount") procGetModuleFileNameW = modkernel32.NewProc("GetModuleFileNameW") procGetModuleHandleExW = modkernel32.NewProc("GetModuleHandleExW") + procGetNamedPipeClientProcessId = modkernel32.NewProc("GetNamedPipeClientProcessId") procGetNamedPipeHandleStateW = modkernel32.NewProc("GetNamedPipeHandleStateW") procGetNamedPipeInfo = modkernel32.NewProc("GetNamedPipeInfo") + procGetNamedPipeServerProcessId = modkernel32.NewProc("GetNamedPipeServerProcessId") procGetOverlappedResult = modkernel32.NewProc("GetOverlappedResult") procGetPriorityClass = modkernel32.NewProc("GetPriorityClass") procGetProcAddress = modkernel32.NewProc("GetProcAddress") @@ -1612,7 +1614,7 @@ func DwmSetWindowAttribute(hwnd HWND, attribute uint32, value unsafe.Pointer, si } func CancelMibChangeNotify2(notificationHandle Handle) (errcode error) { - r0, _, _ := syscall.SyscallN(procCancelMibChangeNotify2.Addr(), uintptr(notificationHandle)) + r0, _, _ := syscall.Syscall(procCancelMibChangeNotify2.Addr(), 1, uintptr(notificationHandle), 0, 0) if r0 != 0 { errcode = syscall.Errno(r0) } @@ -1652,7 +1654,7 @@ func GetIfEntry(pIfRow *MibIfRow) (errcode error) { } func GetIfEntry2Ex(level uint32, row *MibIfRow2) (errcode error) { - r0, _, _ := syscall.SyscallN(procGetIfEntry2Ex.Addr(), uintptr(level), uintptr(unsafe.Pointer(row))) + r0, _, _ := syscall.Syscall(procGetIfEntry2Ex.Addr(), 2, uintptr(level), uintptr(unsafe.Pointer(row)), 0) if r0 != 0 { errcode = syscall.Errno(r0) } @@ -1660,7 +1662,7 @@ func GetIfEntry2Ex(level uint32, row *MibIfRow2) (errcode error) { } func GetUnicastIpAddressEntry(row *MibUnicastIpAddressRow) (errcode error) { - r0, _, _ := syscall.SyscallN(procGetUnicastIpAddressEntry.Addr(), uintptr(unsafe.Pointer(row))) + r0, _, _ := syscall.Syscall(procGetUnicastIpAddressEntry.Addr(), 1, uintptr(unsafe.Pointer(row)), 0, 0) if r0 != 0 { errcode = syscall.Errno(r0) } @@ -1672,7 +1674,7 @@ func NotifyIpInterfaceChange(family uint16, callback uintptr, callerContext unsa if initialNotification { _p0 = 1 } - r0, _, _ := syscall.SyscallN(procNotifyIpInterfaceChange.Addr(), uintptr(family), uintptr(callback), uintptr(callerContext), uintptr(_p0), uintptr(unsafe.Pointer(notificationHandle))) + r0, _, _ := syscall.Syscall6(procNotifyIpInterfaceChange.Addr(), 5, uintptr(family), uintptr(callback), uintptr(callerContext), uintptr(_p0), uintptr(unsafe.Pointer(notificationHandle)), 0) if r0 != 0 { errcode = syscall.Errno(r0) } @@ -1684,7 +1686,7 @@ func NotifyUnicastIpAddressChange(family uint16, callback uintptr, callerContext if initialNotification { _p0 = 1 } - r0, _, _ := syscall.SyscallN(procNotifyUnicastIpAddressChange.Addr(), uintptr(family), uintptr(callback), uintptr(callerContext), uintptr(_p0), uintptr(unsafe.Pointer(notificationHandle))) + r0, _, _ := syscall.Syscall6(procNotifyUnicastIpAddressChange.Addr(), 5, uintptr(family), uintptr(callback), uintptr(callerContext), uintptr(_p0), uintptr(unsafe.Pointer(notificationHandle)), 0) if r0 != 0 { errcode = syscall.Errno(r0) } @@ -2446,6 +2448,14 @@ func GetModuleHandleEx(flags uint32, moduleName *uint16, module *Handle) (err er return } +func GetNamedPipeClientProcessId(pipe Handle, clientProcessID *uint32) (err error) { + r1, _, e1 := syscall.Syscall(procGetNamedPipeClientProcessId.Addr(), 2, uintptr(pipe), uintptr(unsafe.Pointer(clientProcessID)), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + func GetNamedPipeHandleState(pipe Handle, state *uint32, curInstances *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32, userName *uint16, maxUserNameSize uint32) (err error) { r1, _, e1 := syscall.Syscall9(procGetNamedPipeHandleStateW.Addr(), 7, uintptr(pipe), uintptr(unsafe.Pointer(state)), uintptr(unsafe.Pointer(curInstances)), uintptr(unsafe.Pointer(maxCollectionCount)), uintptr(unsafe.Pointer(collectDataTimeout)), uintptr(unsafe.Pointer(userName)), uintptr(maxUserNameSize), 0, 0) if r1 == 0 { @@ -2462,6 +2472,14 @@ func GetNamedPipeInfo(pipe Handle, flags *uint32, outSize *uint32, inSize *uint3 return } +func GetNamedPipeServerProcessId(pipe Handle, serverProcessID *uint32) (err error) { + r1, _, e1 := syscall.Syscall(procGetNamedPipeServerProcessId.Addr(), 2, uintptr(pipe), uintptr(unsafe.Pointer(serverProcessID)), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + func GetOverlappedResult(handle Handle, overlapped *Overlapped, done *uint32, wait bool) (err error) { var _p0 uint32 if wait { diff --git a/vendor/modules.txt b/vendor/modules.txt index df6c8493aa..4bf44f0656 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -474,7 +474,7 @@ go.uber.org/zap/internal/ztest go.uber.org/zap/zapcore go.uber.org/zap/zapgrpc go.uber.org/zap/zaptest -# golang.org/x/crypto v0.29.0 +# golang.org/x/crypto v0.31.0 ## explicit; go 1.20 golang.org/x/crypto/cryptobyte golang.org/x/crypto/cryptobyte/asn1 @@ -493,7 +493,7 @@ golang.org/x/exp/slog/internal/buffer # golang.org/x/mod v0.17.0 ## explicit; go 1.18 golang.org/x/mod/semver -# golang.org/x/net v0.31.0 +# golang.org/x/net v0.33.0 ## explicit; go 1.18 golang.org/x/net/bpf golang.org/x/net/context @@ -517,22 +517,22 @@ golang.org/x/net/websocket ## explicit; go 1.18 golang.org/x/oauth2 golang.org/x/oauth2/internal -# golang.org/x/sync v0.9.0 +# golang.org/x/sync v0.10.0 ## explicit; go 1.18 golang.org/x/sync/errgroup golang.org/x/sync/semaphore golang.org/x/sync/singleflight -# golang.org/x/sys v0.27.0 +# golang.org/x/sys v0.28.0 ## explicit; go 1.18 golang.org/x/sys/cpu golang.org/x/sys/plan9 golang.org/x/sys/unix golang.org/x/sys/windows golang.org/x/sys/windows/registry -# golang.org/x/term v0.26.0 +# golang.org/x/term v0.27.0 ## explicit; go 1.18 golang.org/x/term -# golang.org/x/text v0.20.0 +# golang.org/x/text v0.21.0 ## explicit; go 1.18 golang.org/x/text/encoding golang.org/x/text/encoding/charmap