From 24e6597acd448bebb3e10abc0f481fd1cddc4b9a Mon Sep 17 00:00:00 2001 From: Hongyi Wu Date: Fri, 27 Jun 2025 22:22:22 +0000 Subject: [PATCH 1/4] feat(tools): Add script to deploy local CCM with kops This commit introduces a new bash script, `deploy-kops-gcp.sh`, to automate the deployment of a Kubernetes cluster on GCP using kops, specifically configured to run a locally built version of the cloud-controller-manager. This script streamlines the development and testing workflow by handling the entire process: - Building the cloud-controller-manager Docker image. - Pushing the image to the user's Google Container Registry (GCR). - Copying the default CCM manifest and using `sed` to replace the image with the newly built version. - Creating and updating a kops cluster with the `ClusterAddons` feature flag, injecting the custom manifest. The script requires the `GCP_PROJECT` environment variable to be set. It includes a cleanup function to automatically delete the cluster upon completion, which can be disabled by setting `DELETE_CLUSTER=false`. This provides developers with a quick and repeatable way to test their changes on a real cluster. --- tools/kops_local_ccm.sh | 171 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 171 insertions(+) create mode 100755 tools/kops_local_ccm.sh diff --git a/tools/kops_local_ccm.sh b/tools/kops_local_ccm.sh new file mode 100755 index 0000000000..b652da5917 --- /dev/null +++ b/tools/kops_local_ccm.sh @@ -0,0 +1,171 @@ +#!/usr/bin/env bash + +# Copyright 2022 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -e +set -x + +# This script brings up a Kubernetes cluster using kops and a local CCM image. +# It is based on the e2e test script in e2e/scenarios/kops-simple. + +REPO_ROOT=$(git rev-parse --show-toplevel) +cd ${REPO_ROOT} +cd .. +WORKSPACE=$(pwd) + +# Create bindir +BINDIR=${WORKSPACE}/bin +export PATH=${BINDIR}:${PATH} +mkdir -p "${BINDIR}" + +# Setup our cleanup function; as we allocate resources we set a variable to indicate they should be cleaned up +function cleanup { + if [[ "${DELETE_CLUSTER:-}" == "true" ]]; then + kops delete cluster --name "${CLUSTER_NAME}" --state "${KOPS_STATE_STORE}" --yes || echo "kops delete cluster failed" + fi +} +trap cleanup EXIT + +# Default cluster name +SCRIPT_NAME=$(basename $0 .sh) +if [[ -z "${CLUSTER_NAME:-}" ]]; then + CLUSTER_NAME="${SCRIPT_NAME}.k8s.local" +fi +echo "CLUSTER_NAME=${CLUSTER_NAME}" + +# Default workdir +if [[ -z "${WORKDIR:-}" ]]; then + WORKDIR="${WORKSPACE}/clusters/${CLUSTER_NAME}" +fi +mkdir -p "${WORKDIR}" + +if [[ -z "${GCP_PROJECT:-}" ]]; then + echo "GCP_PROJECT must be set" + exit 1 +fi +echo "GCP_PROJECT=${GCP_PROJECT}" + +# Ensure we have an SSH key; needed to dump the node information to artifacts/ +if [[ -z "${SSH_PRIVATE_KEY:-}" ]]; then + echo "SSH_PRIVATE_KEY not set, creating one" + + SSH_PRIVATE_KEY="${WORKDIR}/google_compute_engine" + # This will create a new key if one doesn't exist, and add it to the project metadata. + gcloud compute config-ssh --project="${GCP_PROJECT}" --ssh-key-file="${SSH_PRIVATE_KEY}" --quiet + export KUBE_SSH_USER="${USER}" +fi +echo "SSH_PRIVATE_KEY=${SSH_PRIVATE_KEY}" +export KUBE_SSH_PUBLIC_KEY_PATH="${SSH_PRIVATE_KEY}.pub" + + +if [[ -z "${K8S_VERSION:-}" ]]; then + K8S_VERSION="$(curl -sL https://dl.k8s.io/release/stable.txt)" +fi + +# Set cloud provider to gce +CLOUD_PROVIDER="gce" +echo "CLOUD_PROVIDER=${CLOUD_PROVIDER}" + +#Set cloud provider location +GCP_LOCATION="${GCP_LOCATION:-us-central1}" +ZONES="${ZONES:-us-central1-a}" + +# KOPS_STATE_STORE holds metadata about the clusters we create +if [[ -z "${KOPS_STATE_STORE:-}" ]]; then + KOPS_STATE_STORE="gs://kops-state-${GCP_PROJECT}" +fi + +# Ensure the bucket exists +if ! gsutil ls -p "${GCP_PROJECT}" "${KOPS_STATE_STORE}" >/dev/null 2>&1; then + gsutil mb -p "${GCP_PROJECT}" -l "${GCP_LOCATION}" "${KOPS_STATE_STORE}" + # Setting ubla off so that kOps can automatically set ACLs for the default serviceACcount + gsutil ubla set off "${KOPS_STATE_STORE}" +fi + +echo "KOPS_STATE_STORE=${KOPS_STATE_STORE}" +export KOPS_STATE_STORE + +# IMAGE_REPO is used to upload images +if [[ -z "${IMAGE_REPO:-}" ]]; then + IMAGE_REPO="gcr.io/${GCP_PROJECT}" +fi +echo "IMAGE_REPO=${IMAGE_REPO}" + +cd ${REPO_ROOT} +if [[ -z "${IMAGE_TAG:-}" ]]; then + IMAGE_TAG=$(git rev-parse --short HEAD)-$(date +%Y%m%dT%H%M%S) +fi +echo "IMAGE_TAG=${IMAGE_TAG}" + +# Build and push cloud-controller-manager +cd ${REPO_ROOT} + +export KUBE_ROOT=${REPO_ROOT} +source "${REPO_ROOT}/tools/version.sh" +get_version_vars +unset KUBE_ROOT + +echo "git status:" +git status + +echo "Configuring docker auth with gcloud" +gcloud auth configure-docker + +echo "Building and pushing images" +IMAGE_REPO=${IMAGE_REPO} IMAGE_TAG=${IMAGE_TAG} tools/push-images + +if [[ -z "${ADMIN_ACCESS:-}" ]]; then + ADMIN_ACCESS="0.0.0.0/0" # Or use your IPv4 with /32 +fi +echo "ADMIN_ACCESS=${ADMIN_ACCESS}" + +# Add our manifest +cp "${REPO_ROOT}/deploy/packages/default/manifest.yaml" "${WORKDIR}/cloud-provider-gcp.yaml" +sed -i -e "s@k8scloudprovidergcp/cloud-controller-manager:latest@${IMAGE_REPO}/cloud-controller-manager:${IMAGE_TAG}@g" "${WORKDIR}/cloud-provider-gcp.yaml" + +# Enable cluster addons, this enables us to replace the built-in manifest +export KOPS_FEATURE_FLAGS="ClusterAddons,${KOPS_FEATURE_FLAGS:-}" +echo "KOPS_FEATURE_FLAGS=${KOPS_FEATURE_FLAGS}" + +# The caller can set DELETE_CLUSTER=false to stop us deleting the cluster +if [[ -z "${DELETE_CLUSTER:-}" ]]; then + DELETE_CLUSTER="true" +fi + +kops create cluster \ + --name "${CLUSTER_NAME}" \ + --state "${KOPS_STATE_STORE}" \ + --zones "${ZONES}" \ + --project "${GCP_PROJECT}" \ + --kubernetes-version="${K8S_VERSION}" \ + --node-count "${NODE_COUNT:-2}" \ + --node-size "${NODE_SIZE:-e2-medium}" \ + --master-size "${MASTER_SIZE:-e2-medium}" \ + --cloud-labels "Owner=${USER},ManagedBy=kops" \ + --networking "gce" \ + --gce-service-account="default" \ + --ssh-public-key="${KUBE_SSH_PUBLIC_KEY_PATH}" \ + --admin-access="${ADMIN_ACCESS}" \ + --add="${WORKDIR}/cloud-provider-gcp.yaml" + +kops update cluster "${CLUSTER_NAME}" --state "${KOPS_STATE_STORE}" --yes + +echo "Cluster is being created. It may take a few minutes." +echo "You can check the status with: kops validate cluster --name ${CLUSTER_NAME} --state ${KOPS_STATE_STORE}" + +if [[ "${DELETE_CLUSTER:-}" == "true" ]]; then + # Don't delete again in trap + DELETE_CLUSTER=false +fi From e860770f5f76545701ad6d25bf360ee161d7aed1 Mon Sep 17 00:00:00 2001 From: Hongyi Wu Date: Mon, 30 Jun 2025 05:39:30 +0000 Subject: [PATCH 2/4] Refactor: Rename SSH_PRIVATE_KEY to SSH_PRIVATE_KEY_PATH --- tools/kops_local_ccm.sh | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/tools/kops_local_ccm.sh b/tools/kops_local_ccm.sh index b652da5917..17494c4878 100755 --- a/tools/kops_local_ccm.sh +++ b/tools/kops_local_ccm.sh @@ -58,16 +58,16 @@ fi echo "GCP_PROJECT=${GCP_PROJECT}" # Ensure we have an SSH key; needed to dump the node information to artifacts/ -if [[ -z "${SSH_PRIVATE_KEY:-}" ]]; then - echo "SSH_PRIVATE_KEY not set, creating one" +if [[ -z "${SSH_PRIVATE_KEY_PATH:-}" ]]; then + echo "SSH_PRIVATE_KEY_PATH not set, creating one" - SSH_PRIVATE_KEY="${WORKDIR}/google_compute_engine" + SSH_PRIVATE_KEY_PATH="${WORKDIR}/google_compute_engine" # This will create a new key if one doesn't exist, and add it to the project metadata. - gcloud compute config-ssh --project="${GCP_PROJECT}" --ssh-key-file="${SSH_PRIVATE_KEY}" --quiet + gcloud compute config-ssh --project="${GCP_PROJECT}" --ssh-key-file="${SSH_PRIVATE_KEY_PATH}" --quiet export KUBE_SSH_USER="${USER}" fi -echo "SSH_PRIVATE_KEY=${SSH_PRIVATE_KEY}" -export KUBE_SSH_PUBLIC_KEY_PATH="${SSH_PRIVATE_KEY}.pub" +echo "SSH_PRIVATE_KEY_PATH=${SSH_PRIVATE_KEY_PATH}" +export KUBE_SSH_PUBLIC_KEY_PATH="${SSH_PRIVATE_KEY_PATH}.pub" if [[ -z "${K8S_VERSION:-}" ]]; then From 1c61ba75025c359a7fbc7ebac6e0ac6afed60fc3 Mon Sep 17 00:00:00 2001 From: locanon Date: Sun, 29 Jun 2025 22:56:14 -0700 Subject: [PATCH 3/4] Update kops_local_ccm.sh refactor: Move requirement checks to beginning of process --- tools/kops_local_ccm.sh | 40 +++++++++++++++++++++------------------- 1 file changed, 21 insertions(+), 19 deletions(-) diff --git a/tools/kops_local_ccm.sh b/tools/kops_local_ccm.sh index 17494c4878..1299754ef8 100755 --- a/tools/kops_local_ccm.sh +++ b/tools/kops_local_ccm.sh @@ -25,6 +25,27 @@ cd ${REPO_ROOT} cd .. WORKSPACE=$(pwd) +# Default workdir +if [[ -z "${WORKDIR:-}" ]]; then + WORKDIR="${WORKSPACE}/clusters/${CLUSTER_NAME}" +fi +mkdir -p "${WORKDIR}" + +if [[ -z "${GCP_PROJECT:-}" ]]; then + echo "GCP_PROJECT must be set" + exit 1 +fi + +# Ensure we have an SSH key; needed to dump the node information to artifacts/ +if [[ -z "${SSH_PRIVATE_KEY_PATH:-}" ]]; then + echo "SSH_PRIVATE_KEY_PATH not set, creating one" + + SSH_PRIVATE_KEY_PATH="${WORKDIR}/google_compute_engine" + # This will create a new key if one doesn't exist, and add it to the project metadata. + gcloud compute config-ssh --project="${GCP_PROJECT}" --ssh-key-file="${SSH_PRIVATE_KEY_PATH}" --quiet + export KUBE_SSH_USER="${USER}" +fi + # Create bindir BINDIR=${WORKSPACE}/bin export PATH=${BINDIR}:${PATH} @@ -45,27 +66,8 @@ if [[ -z "${CLUSTER_NAME:-}" ]]; then fi echo "CLUSTER_NAME=${CLUSTER_NAME}" -# Default workdir -if [[ -z "${WORKDIR:-}" ]]; then - WORKDIR="${WORKSPACE}/clusters/${CLUSTER_NAME}" -fi -mkdir -p "${WORKDIR}" - -if [[ -z "${GCP_PROJECT:-}" ]]; then - echo "GCP_PROJECT must be set" - exit 1 -fi echo "GCP_PROJECT=${GCP_PROJECT}" -# Ensure we have an SSH key; needed to dump the node information to artifacts/ -if [[ -z "${SSH_PRIVATE_KEY_PATH:-}" ]]; then - echo "SSH_PRIVATE_KEY_PATH not set, creating one" - - SSH_PRIVATE_KEY_PATH="${WORKDIR}/google_compute_engine" - # This will create a new key if one doesn't exist, and add it to the project metadata. - gcloud compute config-ssh --project="${GCP_PROJECT}" --ssh-key-file="${SSH_PRIVATE_KEY_PATH}" --quiet - export KUBE_SSH_USER="${USER}" -fi echo "SSH_PRIVATE_KEY_PATH=${SSH_PRIVATE_KEY_PATH}" export KUBE_SSH_PUBLIC_KEY_PATH="${SSH_PRIVATE_KEY_PATH}.pub" From 0e21dc12f790bdbf4c87e1f0af78ac6e878e76c0 Mon Sep 17 00:00:00 2001 From: locanon Date: Sun, 29 Jun 2025 23:11:44 -0700 Subject: [PATCH 4/4] Require CLUSTER_NAME to be set explicitly --- tools/kops_local_ccm.sh | 19 +++++++++++++++++-- 1 file changed, 17 insertions(+), 2 deletions(-) diff --git a/tools/kops_local_ccm.sh b/tools/kops_local_ccm.sh index 1299754ef8..a13f082700 100755 --- a/tools/kops_local_ccm.sh +++ b/tools/kops_local_ccm.sh @@ -14,6 +14,21 @@ # See the License for the specific language governing permissions and # limitations under the License. +# This script brings up a Kubernetes cluster using kops and a local CCM image. +# It is based on the e2e test script in e2e/scenarios/kops-simple. +# +# Usage: +# +# # Set required environment variables +# export GCP_PROJECT="your-gcp-project-id" +# export CLUSTER_NAME="your-cluster-name.k8s.local" +# +# # Optional: To prevent the script from deleting the cluster after creation, set: +# export DELETE_CLUSTER="false" +# +# # Run the script +# ./tools/kops_local_ccm.sh + set -e set -x @@ -60,9 +75,9 @@ function cleanup { trap cleanup EXIT # Default cluster name -SCRIPT_NAME=$(basename $0 .sh) if [[ -z "${CLUSTER_NAME:-}" ]]; then - CLUSTER_NAME="${SCRIPT_NAME}.k8s.local" + echo "CLUSTER_NAME must be set" + exit 1 fi echo "CLUSTER_NAME=${CLUSTER_NAME}"