Skip to content

Commit 0a8987a

Browse files
authored
Merge branch 'main' into nap-cgroup-mode
2 parents 6489769 + ad9cbb1 commit 0a8987a

File tree

61 files changed

+13485
-215
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

61 files changed

+13485
-215
lines changed

Makefile

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,7 @@
1818
# Make will use bash instead of sh
1919
SHELL := /usr/bin/env bash
2020

21-
DOCKER_TAG_VERSION_DEVELOPER_TOOLS := 1.23
21+
DOCKER_TAG_VERSION_DEVELOPER_TOOLS := 1.24
2222
DOCKER_IMAGE_DEVELOPER_TOOLS := cft/developer-tools
2323
REGISTRY_URL := gcr.io/cloud-foundation-cicd
2424
DOCKER_BIN ?= docker
@@ -70,6 +70,7 @@ docker_test_integration:
7070
docker_test_lint:
7171
$(DOCKER_BIN) run --rm -it \
7272
-e ENABLE_PARALLEL=1 \
73+
-e ENABLE_BPMETADATA=1 \
7374
-v "$(CURDIR)":/workspace \
7475
$(REGISTRY_URL)/${DOCKER_IMAGE_DEVELOPER_TOOLS}:${DOCKER_TAG_VERSION_DEVELOPER_TOOLS} \
7576
/usr/local/bin/test_lint.sh
@@ -78,9 +79,10 @@ docker_test_lint:
7879
.PHONY: docker_generate_docs
7980
docker_generate_docs:
8081
$(DOCKER_BIN) run --rm -it \
82+
-e ENABLE_BPMETADATA=1 \
8183
-v "$(CURDIR)":/workspace \
8284
$(REGISTRY_URL)/${DOCKER_IMAGE_DEVELOPER_TOOLS}:${DOCKER_TAG_VERSION_DEVELOPER_TOOLS} \
83-
/bin/bash -c 'source /usr/local/bin/task_helper_functions.sh && generate_docs'
85+
/bin/bash -c 'source /usr/local/bin/task_helper_functions.sh && generate_docs display'
8486

8587
# Generate files from autogen
8688
.PHONY: docker_generate_modules

README.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -353,7 +353,7 @@ The node_pools variable takes the following parameters:
353353
| max_count | Maximum number of nodes in the NodePool. Must be >= min_count. Cannot be used with total limits. | 100 | Optional |
354354
| total_max_count | Total maximum number of nodes in the NodePool. Must be >= min_count. Cannot be used with per zone limits. | null | Optional |
355355
| max_pods_per_node | The maximum number of pods per node in this cluster | null | Optional |
356-
| strategy | The upgrade stragey to be used for upgrading the nodes. Valid values of state are: `SURGE` or `BLUE_GREEN` | "SURGE" | Optional |
356+
| strategy | The upgrade stragey to be used for upgrading the nodes. Valid values of state are: `SURGE`, `BLUE_GREEN`, or for flex-start and queued provisioning `SHORT_LIVED` | "SURGE" | Optional |
357357
| threads_per_core | Optional The number of threads per physical core. To disable simultaneous multithreading (SMT) set this to 1. If unset, the maximum number of threads supported per core by the underlying processor is assumed | null | Optional |
358358
| enable_nested_virtualization | Whether the node should have nested virtualization | null | Optional |
359359
| max_surge | The number of additional nodes that can be added to the node pool during an upgrade. Increasing max_surge raises the number of nodes that can be upgraded simultaneously. Can be set to 0 or greater. Only works with `SURGE` strategy. | 1 | Optional |

autogen/main/README.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -238,7 +238,7 @@ The node_pools variable takes the following parameters:
238238
| max_count | Maximum number of nodes in the NodePool. Must be >= min_count. Cannot be used with total limits. | 100 | Optional |
239239
| total_max_count | Total maximum number of nodes in the NodePool. Must be >= min_count. Cannot be used with per zone limits. | null | Optional |
240240
| max_pods_per_node | The maximum number of pods per node in this cluster | null | Optional |
241-
| strategy | The upgrade stragey to be used for upgrading the nodes. Valid values of state are: `SURGE` or `BLUE_GREEN` | "SURGE" | Optional |
241+
| strategy | The upgrade stragey to be used for upgrading the nodes. Valid values of state are: `SURGE`, `BLUE_GREEN`, or for flex-start and queued provisioning `SHORT_LIVED` | "SURGE" | Optional |
242242
| threads_per_core | Optional The number of threads per physical core. To disable simultaneous multithreading (SMT) set this to 1. If unset, the maximum number of threads supported per core by the underlying processor is assumed | null | Optional |
243243
| enable_nested_virtualization | Whether the node should have nested virtualization | null | Optional |
244244
| max_surge | The number of additional nodes that can be added to the node pool during an upgrade. Increasing max_surge raises the number of nodes that can be upgraded simultaneously. Can be set to 0 or greater. Only works with `SURGE` strategy. | 1 | Optional |

autogen/main/cluster.tf.tmpl

Lines changed: 15 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -967,20 +967,23 @@ resource "google_container_node_pool" "windows_pools" {
967967
auto_upgrade = lookup(each.value, "auto_upgrade", local.default_auto_upgrade)
968968
}
969969

970-
upgrade_settings {
971-
strategy = lookup(each.value, "strategy", "SURGE")
972-
max_surge = lookup(each.value, "strategy", "SURGE") == "SURGE" ? lookup(each.value, "max_surge", 1) : null
973-
max_unavailable = lookup(each.value, "strategy", "SURGE") == "SURGE" ? lookup(each.value, "max_unavailable", 0) : null
970+
dynamic "upgrade_settings" {
971+
for_each = contains(["SURGE", "BLUE_GREEN"], lookup(each.value, "strategy", "SURGE")) ? [1] : []
972+
content {
973+
strategy = lookup(each.value, "strategy", "SURGE")
974+
max_surge = lookup(each.value, "strategy", "SURGE") == "SURGE" ? lookup(each.value, "max_surge", 1) : null
975+
max_unavailable = lookup(each.value, "strategy", "SURGE") == "SURGE" ? lookup(each.value, "max_unavailable", 0) : null
974976

975-
dynamic "blue_green_settings" {
976-
for_each = lookup(each.value, "strategy", "SURGE") == "BLUE_GREEN" ? [1] : []
977-
content {
978-
node_pool_soak_duration = lookup(each.value, "node_pool_soak_duration", null)
977+
dynamic "blue_green_settings" {
978+
for_each = lookup(each.value, "strategy", "SURGE") == "BLUE_GREEN" ? [1] : []
979+
content {
980+
node_pool_soak_duration = lookup(each.value, "node_pool_soak_duration", null)
979981

980-
standard_rollout_policy {
981-
batch_soak_duration = lookup(each.value, "batch_soak_duration", null)
982-
batch_percentage = lookup(each.value, "batch_percentage", null)
983-
batch_node_count = lookup(each.value, "batch_node_count", null)
982+
standard_rollout_policy {
983+
batch_soak_duration = lookup(each.value, "batch_soak_duration", null)
984+
batch_percentage = lookup(each.value, "batch_percentage", null)
985+
batch_node_count = lookup(each.value, "batch_node_count", null)
986+
}
984987
}
985988
}
986989
}

build/int.cloudbuild.yaml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -486,6 +486,6 @@ tags:
486486
- 'integration'
487487
substitutions:
488488
_DOCKER_IMAGE_DEVELOPER_TOOLS: 'cft/developer-tools'
489-
_DOCKER_TAG_VERSION_DEVELOPER_TOOLS: '1.23'
489+
_DOCKER_TAG_VERSION_DEVELOPER_TOOLS: '1.24'
490490
options:
491491
machineType: 'E2_HIGHCPU_8'

cluster.tf

Lines changed: 30 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -670,20 +670,23 @@ resource "google_container_node_pool" "pools" {
670670
auto_upgrade = lookup(each.value, "auto_upgrade", local.default_auto_upgrade)
671671
}
672672

673-
upgrade_settings {
674-
strategy = lookup(each.value, "strategy", "SURGE")
675-
max_surge = lookup(each.value, "strategy", "SURGE") == "SURGE" ? lookup(each.value, "max_surge", 1) : null
676-
max_unavailable = lookup(each.value, "strategy", "SURGE") == "SURGE" ? lookup(each.value, "max_unavailable", 0) : null
673+
dynamic "upgrade_settings" {
674+
for_each = contains(["SURGE", "BLUE_GREEN"], lookup(each.value, "strategy", "SURGE")) ? [1] : []
675+
content {
676+
strategy = lookup(each.value, "strategy", "SURGE")
677+
max_surge = lookup(each.value, "strategy", "SURGE") == "SURGE" ? lookup(each.value, "max_surge", 1) : null
678+
max_unavailable = lookup(each.value, "strategy", "SURGE") == "SURGE" ? lookup(each.value, "max_unavailable", 0) : null
677679

678-
dynamic "blue_green_settings" {
679-
for_each = lookup(each.value, "strategy", "SURGE") == "BLUE_GREEN" ? [1] : []
680-
content {
681-
node_pool_soak_duration = lookup(each.value, "node_pool_soak_duration", null)
680+
dynamic "blue_green_settings" {
681+
for_each = lookup(each.value, "strategy", "SURGE") == "BLUE_GREEN" ? [1] : []
682+
content {
683+
node_pool_soak_duration = lookup(each.value, "node_pool_soak_duration", null)
682684

683-
standard_rollout_policy {
684-
batch_soak_duration = lookup(each.value, "batch_soak_duration", null)
685-
batch_percentage = lookup(each.value, "batch_percentage", null)
686-
batch_node_count = lookup(each.value, "batch_node_count", null)
685+
standard_rollout_policy {
686+
batch_soak_duration = lookup(each.value, "batch_soak_duration", null)
687+
batch_percentage = lookup(each.value, "batch_percentage", null)
688+
batch_node_count = lookup(each.value, "batch_node_count", null)
689+
}
687690
}
688691
}
689692
}
@@ -1001,20 +1004,23 @@ resource "google_container_node_pool" "windows_pools" {
10011004
auto_upgrade = lookup(each.value, "auto_upgrade", local.default_auto_upgrade)
10021005
}
10031006

1004-
upgrade_settings {
1005-
strategy = lookup(each.value, "strategy", "SURGE")
1006-
max_surge = lookup(each.value, "strategy", "SURGE") == "SURGE" ? lookup(each.value, "max_surge", 1) : null
1007-
max_unavailable = lookup(each.value, "strategy", "SURGE") == "SURGE" ? lookup(each.value, "max_unavailable", 0) : null
1007+
dynamic "upgrade_settings" {
1008+
for_each = contains(["SURGE", "BLUE_GREEN"], lookup(each.value, "strategy", "SURGE")) ? [1] : []
1009+
content {
1010+
strategy = lookup(each.value, "strategy", "SURGE")
1011+
max_surge = lookup(each.value, "strategy", "SURGE") == "SURGE" ? lookup(each.value, "max_surge", 1) : null
1012+
max_unavailable = lookup(each.value, "strategy", "SURGE") == "SURGE" ? lookup(each.value, "max_unavailable", 0) : null
10081013

1009-
dynamic "blue_green_settings" {
1010-
for_each = lookup(each.value, "strategy", "SURGE") == "BLUE_GREEN" ? [1] : []
1011-
content {
1012-
node_pool_soak_duration = lookup(each.value, "node_pool_soak_duration", null)
1014+
dynamic "blue_green_settings" {
1015+
for_each = lookup(each.value, "strategy", "SURGE") == "BLUE_GREEN" ? [1] : []
1016+
content {
1017+
node_pool_soak_duration = lookup(each.value, "node_pool_soak_duration", null)
10131018

1014-
standard_rollout_policy {
1015-
batch_soak_duration = lookup(each.value, "batch_soak_duration", null)
1016-
batch_percentage = lookup(each.value, "batch_percentage", null)
1017-
batch_node_count = lookup(each.value, "batch_node_count", null)
1019+
standard_rollout_policy {
1020+
batch_soak_duration = lookup(each.value, "batch_soak_duration", null)
1021+
batch_percentage = lookup(each.value, "batch_percentage", null)
1022+
batch_node_count = lookup(each.value, "batch_node_count", null)
1023+
}
10181024
}
10191025
}
10201026
}

examples/node_pool/main.tf

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -92,6 +92,7 @@ module "gke" {
9292
min_count = 0
9393
service_account = var.compute_engine_service_account
9494
queued_provisioning = true
95+
strategy = "SHORT_LIVED"
9596
},
9697
{
9798
name = "pool-05"
@@ -102,7 +103,7 @@ module "gke" {
102103
{
103104
name = "pool-06"
104105
node_count = 1
105-
machine_type = "n1-highmem-96"
106+
machine_type = "c2-standard-30"
106107
node_affinity = "{\"key\": \"compute.googleapis.com/node-group-name\", \"operator\": \"IN\", \"values\": [\"${google_compute_node_group.soletenant-nodes.name}\"]}"
107108
},
108109
]
@@ -170,7 +171,7 @@ resource "google_compute_node_template" "soletenant-tmpl" {
170171
name = "soletenant-tmpl-${var.cluster_name_suffix}"
171172
region = var.region
172173

173-
node_type = "n1-node-96-624"
174+
node_type = "c2-node-60-240"
174175
}
175176

176177
resource "google_compute_node_group" "soletenant-nodes" {

examples/safer_cluster_iap_bastion/bastion.tf

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -30,7 +30,7 @@ module "bastion" {
3030
name = local.bastion_name
3131
zone = local.bastion_zone
3232
image_project = "debian-cloud"
33-
machine_type = "g1-small"
33+
machine_type = "e2-small"
3434
startup_script = templatefile("${path.module}/templates/startup-script.tftpl", {})
3535
members = var.bastion_members
3636
shielded_vm = "false"

examples/simple_autopilot_private/main.tf

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -50,8 +50,7 @@ module "gke" {
5050
enable_private_endpoint = true
5151
enable_private_nodes = true
5252
network_tags = [local.cluster_type]
53-
# TODO: b/413643369
54-
# node_pools_cgroup_mode = "CGROUP_MODE_V2"
53+
node_pools_cgroup_mode = "CGROUP_MODE_V2"
5554
deletion_protection = false
5655
insecure_kubelet_readonly_port_enabled = false
5756
}

examples/simple_autopilot_private_cmek/main.tf

Lines changed: 4 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -71,9 +71,8 @@ module "gke" {
7171
enable_private_endpoint = true
7272
enable_private_nodes = true
7373
network_tags = [local.cluster_type]
74-
# TODO: b/413643369
75-
# node_pools_cgroup_mode = "CGROUP_MODE_V2"
76-
deletion_protection = false
77-
boot_disk_kms_key = values(module.kms.keys)[0]
78-
depends_on = [google_kms_crypto_key_iam_member.main]
74+
node_pools_cgroup_mode = "CGROUP_MODE_V2"
75+
deletion_protection = false
76+
boot_disk_kms_key = values(module.kms.keys)[0]
77+
depends_on = [google_kms_crypto_key_iam_member.main]
7978
}

examples/simple_autopilot_private_non_default_sa/main.tf

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -49,9 +49,8 @@ module "gke" {
4949
enable_vertical_pod_autoscaling = true
5050
enable_private_endpoint = true
5151
enable_private_nodes = true
52-
# TODO: b/413643369
53-
# node_pools_cgroup_mode = "CGROUP_MODE_V2"
54-
deletion_protection = false
52+
node_pools_cgroup_mode = "CGROUP_MODE_V2"
53+
deletion_protection = false
5554

5655
master_authorized_networks = [
5756
{

examples/simple_autopilot_public/main.tf

Lines changed: 5 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -47,12 +47,11 @@ module "gke" {
4747
release_channel = "RAPID"
4848
enable_vertical_pod_autoscaling = true
4949
network_tags = [local.cluster_type]
50-
# TODO: b/413643369
51-
# node_pools_cgroup_mode = "CGROUP_MODE_V2"
52-
deletion_protection = false
53-
enable_l4_ilb_subsetting = true
54-
stateful_ha = false
55-
gke_backup_agent_config = false
50+
node_pools_cgroup_mode = "CGROUP_MODE_V2"
51+
deletion_protection = false
52+
enable_l4_ilb_subsetting = true
53+
stateful_ha = false
54+
gke_backup_agent_config = false
5655
ray_operator_config = {
5756
enabled = true
5857
logging_enabled = true

0 commit comments

Comments
 (0)