diff --git a/.github/dependabot.yml b/.github/dependabot.yml new file mode 100644 index 0000000..8cce2e1 --- /dev/null +++ b/.github/dependabot.yml @@ -0,0 +1,13 @@ +version: 2 +updates: + - package-ecosystem: "terraform" + directory: "/" + schedule: + interval: "daily" + time: "23:00" + timezone: "Europe/Oslo" + open-pull-requests-limit: 3 + - package-ecosystem: "github-actions" + directory: "/" + schedule: + interval: "daily" \ No newline at end of file diff --git a/.github/workflows/documentation.yml b/.github/workflows/documentation.yml new file mode 100644 index 0000000..d16a556 --- /dev/null +++ b/.github/workflows/documentation.yml @@ -0,0 +1,19 @@ +name: Generate terraform docs +on: + - pull_request + +jobs: + docs: + runs-on: [ubuntu-latest] + steps: + - uses: actions/checkout@v3 + with: + ref: ${{ github.event.pull_request.head.ref }} + + - name: Render terraform docs and push changes back to PR + uses: terraform-docs/gh-actions@main + with: + working-dir: . + output-file: README.md + output-method: inject + git-push: "true" diff --git a/.terraform.lock.hcl b/.terraform.lock.hcl new file mode 100644 index 0000000..4510869 --- /dev/null +++ b/.terraform.lock.hcl @@ -0,0 +1,22 @@ +# This file is maintained automatically by "terraform init". +# Manual edits may be lost in future updates. + +provider "registry.terraform.io/hashicorp/azurerm" { + version = "3.94.0" + constraints = ">= 3.0.0" + hashes = [ + "h1:t3fM/PO8PLAA5mK3esAypp01V6Vh75kjPnNqxQeVrV0=", + "zh:20d102bc63096ade82f8da81c91afaffa858aa56fe9a7ad02f24f5ae5618bc53", + "zh:3ddb9d6173a4fdb9b2352a76324ee321976915544ae66cbb863c7a60f0593f05", + "zh:4bc6c62142f67192d2def11f4fd419c54dddd89a5448af036bfc60b15eb0509a", + "zh:4c5120c2101a51524af32c4220c5e376f97a227730dd92ec0b06ac677e4b39f2", + "zh:585fa7ab876d09899cd2d842f12bc28c34556b4d47919eceadefab6fa47f909f", + "zh:59de7ea462470dee7088fc4deeff48e1ffd286eaca1185c219be68dadde745b8", + "zh:8421a46dd3bc4bc2eb56f7eb9b91cc84a66070b72195a805862c6022adee2da0", + "zh:a2fcb5a091d5944dc50f1e51f53fa4d370810a507fbf4122920d756083d8df19", + "zh:beb6b93a2a16942625bb6ac1e52bf26878e35f5562f3173279423ca66553b6d7", + "zh:c6846892ea68f49c838d90b75793d1f3a866871dd701ccb575b1eecccd4e7051", + "zh:ddd59492b6d5ce4c83f06a5b16c520048f3e9bb898bab4f3910042f5c01ffeda", + "zh:f569b65999264a9416862bca5cd2a6177d94ccb0424f3a4ef424428912b9cb3c", + ] +} diff --git a/README.md b/README.md index 59fe7c2..685c41f 100644 --- a/README.md +++ b/README.md @@ -1,2 +1,129 @@ # tf-azure-aks + Terraform Module for Azure AKS + + +## Requirements + +| Name | Version | +|------|---------| +| [terraform](#requirement\_terraform) | >= 1.0 | +| [azurerm](#requirement\_azurerm) | >= 3.0.0 | + +## Providers + +| Name | Version | +|------|---------| +| [azurerm](#provider\_azurerm) | 3.94.0 | + +## Modules + +No modules. + +## Resources + +| Name | Type | +|------|------| +| [azurerm_kubernetes_cluster.k8s_cluster](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/kubernetes_cluster) | resource | +| [azurerm_kubernetes_cluster_node_pool.aks-node](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/kubernetes_cluster_node_pool) | resource | +| [azurerm_monitor_diagnostic_setting.aks-diagnostics](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/monitor_diagnostic_setting) | resource | +| [azurerm_subnet.k8s_agent_subnet](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/subnet) | resource | +| [azurerm_virtual_network.k8s_agent_network](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/virtual_network) | resource | + +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|------|---------|:--------:| +| [admin\_username](#input\_admin\_username) | user name to add to VMs | `string` | `"azureuser"` | no | +| [agent\_net\_name](#input\_agent\_net\_name) | Optional name of the agent vnet | `string` | `"agent-net"` | no | +| [aks\_dns\_service\_ip](#input\_aks\_dns\_service\_ip) | n/a | `string` | `"10.0.0.10"` | no | +| [aks\_docker\_bridge\_cidr](#input\_aks\_docker\_bridge\_cidr) | n/a | `string` | `"172.26.0.1/16"` | no | +| [aks\_network\_plugin](#input\_aks\_network\_plugin) | n/a | `string` | `"azure"` | no | +| [aks\_network\_policy](#input\_aks\_network\_policy) | n/a | `string` | `"calico"` | no | +| [aks\_pod\_cidr](#input\_aks\_pod\_cidr) | n/a | `any` | `null` | no | +| [aks\_service\_cidr](#input\_aks\_service\_cidr) | n/a | `string` | `"10.0.0.0/16"` | no | +| [aks\_vnet\_subnet\_cidr](#input\_aks\_vnet\_subnet\_cidr) | n/a | `string` | `"10.200.0.0/24"` | no | +| [aks\_vnet\_subnet\_id](#input\_aks\_vnet\_subnet\_id) | n/a | `string` | `""` | no | +| [api\_server\_authorized\_ip\_ranges](#input\_api\_server\_authorized\_ip\_ranges) | List of IPs to whitelist for incoming to Kubernetes API | `list(string)` | `[]` | no | +| [automatic\_channel\_upgrade](#input\_automatic\_channel\_upgrade) | n/a | `string` | `null` | no | +| [azure\_policy\_enable](#input\_azure\_policy\_enable) | Turn on Azure Policy in cluster or not | `bool` | `false` | no | +| [client\_id](#input\_client\_id) | azure client id | `any` | `null` | no | +| [client\_secret](#input\_client\_secret) | azure client secret | `any` | `null` | no | +| [cluster\_name](#input\_cluster\_name) | What the k8s cluster should be identified as | `any` | n/a | yes | +| [create\_vnet](#input\_create\_vnet) | n/a | `bool` | `true` | no | +| [day\_of\_week](#input\_day\_of\_week) | The day of the week for the maintenance run. | `string` | `"Tuesday"` | no | +| [default\_pool](#input\_default\_pool) | n/a | `map` | `{}` | no | +| [dns\_prefix](#input\_dns\_prefix) | n/a | `any` | n/a | yes | +| [duration](#input\_duration) | The duration of the window for maintenance to run in hours. | `string` | `"5"` | no | +| [enable\_diagnostics](#input\_enable\_diagnostics) | n/a | `bool` | `false` | no | +| [frequency](#input\_frequency) | Frequency of maintenance. | `string` | `"Weekly"` | no | +| [identity\_ids](#input\_identity\_ids) | n/a | `list(string)` | `[]` | no | +| [identity\_type](#input\_identity\_type) | n/a | `string` | `"SystemAssigned"` | no | +| [ingress\_application\_gateway\_enable](#input\_ingress\_application\_gateway\_enable) | Ingress Application Gateway | `bool` | `false` | no | +| [ingress\_application\_gateway\_id](#input\_ingress\_application\_gateway\_id) | n/a | `string` | `null` | no | +| [ingress\_application\_gateway\_name](#input\_ingress\_application\_gateway\_name) | n/a | `string` | `null` | no | +| [ingress\_application\_gateway\_subnet\_cidr](#input\_ingress\_application\_gateway\_subnet\_cidr) | n/a | `string` | `null` | no | +| [ingress\_application\_gateway\_subnet\_id](#input\_ingress\_application\_gateway\_subnet\_id) | n/a | `string` | `null` | no | +| [ingress\_application\_subnet\_cidr](#input\_ingress\_application\_subnet\_cidr) | n/a | `string` | `null` | no | +| [ingress\_application\_subnet\_id](#input\_ingress\_application\_subnet\_id) | n/a | `string` | `null` | no | +| [interval](#input\_interval) | The interval for maintenance runs. | `number` | `1` | no | +| [k8s\_version](#input\_k8s\_version) | What version of k8s to request from provider | `any` | `null` | no | +| [key\_vault\_secrets\_provider](#input\_key\_vault\_secrets\_provider) | n/a |
map(object({| `null` | no | +| [kubelet\_identity](#input\_kubelet\_identity) | Identity / RBAC goes here |
secret_rotation_enabled = string
secret_rotation_interval = string
}))
object({| `null` | no | +| [load\_balancer\_sku](#input\_load\_balancer\_sku) | Networking settings. | `string` | `"standard"` | no | +| [log\_analytics](#input\_log\_analytics) | Diagnostics |
client_id = string
object_id = string
user_assigned_identity_id = string
})
map(object({| `{}` | no | +| [maintenance\_window\_auto\_upgrade](#input\_maintenance\_window\_auto\_upgrade) | - `day_of_month` - (Optional) The day of the month for the maintenance run. Required in combination with RelativeMonthly frequency. Value between 0 and 31 (inclusive).
enabled = bool
retention = object({
enabled = bool
days = number
})
}))
object({| `null` | no | +| [maintenance\_window\_node\_os](#input\_maintenance\_window\_node\_os) | - `day_of_month` - (Optional) The day of the month for the maintenance run. Required in combination with RelativeMonthly frequency. Value between 0 and 31 (inclusive).
day_of_month = optional(number)
day_of_week = optional(string)
duration = number
frequency = string
interval = number
start_date = optional(string)
start_time = optional(string)
utc_offset = optional(string)
week_index = optional(string)
not_allowed = optional(map(object({
end = string
start = string
})))
})
object({| `null` | no | +| [managed\_outbound\_ip\_count](#input\_managed\_outbound\_ip\_count) | n/a | `number` | `1` | no | +| [max\_pods](#input\_max\_pods) | Max pods to support in this cluster pr node | `number` | `30` | no | +| [max\_surge](#input\_max\_surge) | The maximum percentage of nodes which will be added to the Node Pool size during an upgrade | `string` | `"33%"` | no | +| [metrics](#input\_metrics) | n/a |
day_of_month = optional(number)
day_of_week = optional(string)
duration = number
frequency = string
interval = number
start_date = optional(string)
start_time = optional(string)
utc_offset = optional(string)
week_index = optional(string)
not_allowed = optional(map(object({
end = string
start = string
})))
})
map(object({| `{}` | no | +| [msd\_enable](#input\_msd\_enable) | Enable audit logs collected by Microsoft Defender | `bool` | `false` | no | +| [msd\_workspace\_id](#input\_msd\_workspace\_id) | Specifies the ID of the Log Analytics Workspace where the audit logs collected by Microsoft Defender should be sent to | `string` | `""` | no | +| [node\_os\_channel\_upgrade](#input\_node\_os\_channel\_upgrade) | automatically upgrades the node image to the latest version available. | `string` | `"NodeImage"` | no | +| [node\_pools](#input\_node\_pools) | Node pools to use | `list` | `[]` | no | +| [node\_resource\_group](#input\_node\_resource\_group) | n/a | `any` | `null` | no | +| [oidc\_issuer\_enabled](#input\_oidc\_issuer\_enabled) | n/a | `bool` | `false` | no | +| [oms\_agent\_enable](#input\_oms\_agent\_enable) | Enable OMS Agent profile | `bool` | `true` | no | +| [oms\_workspace\_id](#input\_oms\_workspace\_id) | Operations Management Suite Workspace ID | `string` | `""` | no | +| [outbound\_ip\_address\_ids](#input\_outbound\_ip\_address\_ids) | n/a | `list(any)` | `null` | no | +| [outbound\_ip\_prefix\_ids](#input\_outbound\_ip\_prefix\_ids) | n/a | `list(any)` | `null` | no | +| [outbound\_type](#input\_outbound\_type) | n/a | `string` | `"loadBalancer"` | no | +| [private\_cluster\_enabled](#input\_private\_cluster\_enabled) | n/a | `bool` | `false` | no | +| [private\_dns\_zone\_id](#input\_private\_dns\_zone\_id) | n/a | `string` | `null` | no | +| [rbac\_admin\_group\_ids](#input\_rbac\_admin\_group\_ids) | n/a | `list(any)` | `[]` | no | +| [rbac\_client\_app\_id](#input\_rbac\_client\_app\_id) | The Client ID of an Azure Active Directory Application | `any` | `null` | no | +| [rbac\_enable](#input\_rbac\_enable) | Should RBAC be enabled. | `bool` | `true` | no | +| [rbac\_managed](#input\_rbac\_managed) | n/a | `bool` | `false` | no | +| [rbac\_server\_app\_id](#input\_rbac\_server\_app\_id) | The Server ID of an Azure Active Directory Application | `any` | `null` | no | +| [rbac\_server\_app\_secret](#input\_rbac\_server\_app\_secret) | The Client Secret of an Azure Active Directory Application | `any` | `null` | no | +| [resource\_group\_location](#input\_resource\_group\_location) | Location of the RG the environment will run inside | `string` | `"West Europe"` | no | +| [resource\_group\_name](#input\_resource\_group\_name) | Name of RG the environment will run inside | `any` | n/a | yes | +| [ssh\_public\_key](#input\_ssh\_public\_key) | public key to add to admin\_user in VMs | `any` | n/a | yes | +| [tags](#input\_tags) | # Metadata ## | `map` | `{}` | no | +| [workload\_identity\_enabled](#input\_workload\_identity\_enabled) | n/a | `bool` | `false` | no | + +## Outputs + +| Name | Description | +|------|-------------| +| [id](#output\_id) | n/a | +| [identity](#output\_identity) | n/a | +| [kube\_client\_ca](#output\_kube\_client\_ca) | n/a | +| [kube\_client\_certificate](#output\_kube\_client\_certificate) | n/a | +| [kube\_client\_key](#output\_kube\_client\_key) | n/a | +| [kube\_cluster\_ca](#output\_kube\_cluster\_ca) | n/a | +| [kube\_cluster\_ca\_certificate](#output\_kube\_cluster\_ca\_certificate) | n/a | +| [kube\_config](#output\_kube\_config) | # Outputs ## | +| [kube\_configure](#output\_kube\_configure) | n/a | +| [kube\_host](#output\_kube\_host) | n/a | +| [kube\_password](#output\_kube\_password) | n/a | +| [kube\_username](#output\_kube\_username) | n/a | +| [kubelet\_identity](#output\_kubelet\_identity) | n/a | +| [name](#output\_name) | Re-export the AKS name for usage | +| [network\_profile](#output\_network\_profile) | n/a | +| [node\_resource\_group](#output\_node\_resource\_group) | auto-generated resource group which contains the resources for this managed kubernetes cluster | +| [node\_resource\_group\_id](#output\_node\_resource\_group\_id) | auto-generated resource group which contains the resources for this managed kubernetes cluster | +| [oidc\_issuer\_url](#output\_oidc\_issuer\_url) | n/a | +| [private\_fqdn](#output\_private\_fqdn) | n/a | + diff --git a/aks.tf b/aks.tf index 6001b4a..c6d7e85 100644 --- a/aks.tf +++ b/aks.tf @@ -29,7 +29,7 @@ locals { os_type = lookup(p, "os_type", local.default_pool_settings.os_type) os_disk_size_gb = lookup(p, "os_disk_size_gb", local.default_pool_settings.os_disk_size_gb) os_disk_type = lookup(p, "os_disk_type", local.default_pool_settings.os_disk_type) - vnet_subnet_id = var.create_vnet ? element(concat(azurerm_subnet.k8s_agent_subnet.*.id, [""]), 0) : var.aks_vnet_subnet_id + vnet_subnet_id = var.create_vnet ? element(concat(azurerm_subnet.k8s_agent_subnet[*].id, [""]), 0) : var.aks_vnet_subnet_id zones = lookup(p, "zones", local.default_pool_settings.zones) mode = lookup(p, "mode", "User") @@ -114,15 +114,68 @@ resource "azurerm_subnet" "k8s_agent_subnet" { } resource "azurerm_kubernetes_cluster" "k8s_cluster" { - name = var.cluster_name - location = var.resource_group_location - resource_group_name = var.resource_group_name - dns_prefix = var.dns_prefix - private_cluster_enabled = var.private_cluster_enabled - private_dns_zone_id = var.private_dns_zone_id - kubernetes_version = var.k8s_version - api_server_authorized_ip_ranges = var.api_server_authorized_ip_ranges - automatic_channel_upgrade = var.automatic_channel_upgrade + name = var.cluster_name + location = var.resource_group_location + resource_group_name = var.resource_group_name + dns_prefix = var.dns_prefix + private_cluster_enabled = var.private_cluster_enabled + private_dns_zone_id = var.private_dns_zone_id + kubernetes_version = var.k8s_version + dynamic "api_server_access_profile" { + for_each = length(var.api_server_authorized_ip_ranges) != 0 ? [1] : [] + content { + authorized_ip_ranges = var.api_server_authorized_ip_ranges + } + } + automatic_channel_upgrade = var.automatic_channel_upgrade + + dynamic "maintenance_window_auto_upgrade" { + for_each = var.maintenance_window_auto_upgrade == null ? [] : [var.maintenance_window_auto_upgrade] + content { + duration = maintenance_window_auto_upgrade.value.duration + frequency = maintenance_window_auto_upgrade.value.frequency + interval = maintenance_window_auto_upgrade.value.interval + day_of_month = maintenance_window_auto_upgrade.value.day_of_month + day_of_week = maintenance_window_auto_upgrade.value.day_of_week + start_date = maintenance_window_auto_upgrade.value.start_date + start_time = maintenance_window_auto_upgrade.value.start_time + utc_offset = maintenance_window_auto_upgrade.value.utc_offset + week_index = maintenance_window_auto_upgrade.value.week_index + + dynamic "not_allowed" { + for_each = maintenance_window_auto_upgrade.value.not_allowed == null ? {} : maintenance_window_auto_upgrade.value.not_allowed + content { + end = not_allowed.value.end + start = not_allowed.value.start + } + } + } + } + + node_os_channel_upgrade = var.node_os_channel_upgrade + + dynamic "maintenance_window_node_os" { + for_each = var.maintenance_window_node_os == null ? [] : [var.maintenance_window_node_os] + content { + duration = maintenance_window_node_os.value.duration + frequency = maintenance_window_node_os.value.frequency + interval = maintenance_window_node_os.value.interval + day_of_month = maintenance_window_node_os.value.day_of_month + day_of_week = maintenance_window_node_os.value.day_of_week + start_date = maintenance_window_node_os.value.start_date + start_time = maintenance_window_node_os.value.start_time + utc_offset = maintenance_window_node_os.value.utc_offset + week_index = maintenance_window_node_os.value.week_index + + dynamic "not_allowed" { + for_each = maintenance_window_node_os.value.not_allowed == null ? {} : maintenance_window_node_os.value.not_allowed + content { + end = not_allowed.value.end + start = not_allowed.value.start + } + } + } + } linux_profile { admin_username = var.admin_username @@ -135,6 +188,8 @@ resource "azurerm_kubernetes_cluster" "k8s_cluster" { oidc_issuer_enabled = var.oidc_issuer_enabled workload_identity_enabled = var.workload_identity_enabled + role_based_access_control_enabled = true + node_resource_group = var.node_resource_group #if No aks_vnet_subnet_id is passed THEN use newly created subnet id ELSE use PASSED subnet id @@ -144,7 +199,7 @@ resource "azurerm_kubernetes_cluster" "k8s_cluster" { vm_size = lookup(var.default_pool, "vm_size", local.default_pool_settings.vm_size) os_disk_size_gb = lookup(var.default_pool, "os_disk_size_gb", local.default_pool_settings.os_disk_size_gb) os_disk_type = lookup(var.default_pool, "os_disk_type", local.default_pool_settings.os_disk_type) - vnet_subnet_id = var.create_vnet ? element(concat(azurerm_subnet.k8s_agent_subnet.*.id, [""]), 0) : var.aks_vnet_subnet_id + vnet_subnet_id = var.create_vnet ? element(concat(azurerm_subnet.k8s_agent_subnet[*].id, [""]), 0) : var.aks_vnet_subnet_id zones = lookup(var.default_pool, "zones", local.default_pool_settings.zones) type = lookup(var.default_pool, "type", local.default_pool_settings.default_pool_type) enable_auto_scaling = lookup(var.default_pool, "enable_auto_scaling", true) @@ -153,6 +208,14 @@ resource "azurerm_kubernetes_cluster" "k8s_cluster" { tags = lookup(var.default_pool, "tags", var.tags) max_pods = lookup(var.default_pool, "max_pods", local.default_pool_settings.max_pods) orchestrator_version = lookup(var.default_pool, "k8s_version", local.default_pool_settings.k8s_version) + + dynamic "upgrade_settings" { + for_each = var.max_surge == null ? [] : ["upgrade_settings"] + + content { + max_surge = var.max_surge + } + } } dynamic "service_principal" { @@ -200,10 +263,9 @@ resource "azurerm_kubernetes_cluster" "k8s_cluster" { network_plugin = var.aks_network_plugin network_policy = var.aks_network_policy - pod_cidr = var.aks_pod_cidr - service_cidr = var.aks_service_cidr - dns_service_ip = var.aks_dns_service_ip - docker_bridge_cidr = var.aks_docker_bridge_cidr + pod_cidr = var.aks_pod_cidr + service_cidr = var.aks_service_cidr + dns_service_ip = var.aks_dns_service_ip dynamic "load_balancer_profile" { for_each = var.outbound_type == "loadBalancer" ? [1] : [] @@ -246,6 +308,14 @@ resource "azurerm_kubernetes_cluster" "k8s_cluster" { } } + dynamic "microsoft_defender" { + for_each = var.msd_enable ? [1] : [] + + content { + log_analytics_workspace_id = var.msd_workspace_id + } + } + dynamic "oms_agent" { for_each = var.oms_agent_enable ? [1] : [] @@ -256,6 +326,21 @@ resource "azurerm_kubernetes_cluster" "k8s_cluster" { tags = var.tags + + # dynamic "lifecycle" { + # for_each = lookup(var.default_pool, "enable_auto_scaling", true) ? [1] : [] + # + # content { + # ignore_changes = [tags,] + # } + # } + # lifecycle { + # ignore_changes = [ + # # Ignore changes to default_node_pools node_count , e.g. because it is managed by enable_auto_scaling + # default_node_pool[0].node_count, + # ] + # } + } resource "azurerm_kubernetes_cluster_node_pool" "aks-node" { @@ -284,6 +369,21 @@ resource "azurerm_kubernetes_cluster_node_pool" "aks-node" { priority = each.value.priority eviction_policy = each.value.eviction_policy spot_max_price = each.value.spot_max_price + + dynamic "upgrade_settings" { + for_each = var.max_surge == null || each.value.priority == "Spot" ? [] : ["upgrade_settings"] + + content { + max_surge = var.max_surge + } + } + + lifecycle { + ignore_changes = [ + # Ignore changes to default_node_pools node_count , e.g. because it is managed by enable_auto_scaling + node_count, + ] + } } resource "azurerm_monitor_diagnostic_setting" "aks-diagnostics" { @@ -298,11 +398,6 @@ resource "azurerm_monitor_diagnostic_setting" "aks-diagnostics" { content { category = log.key enabled = log.value.enabled - - retention_policy { - enabled = log.value.retention.enabled - days = log.value.retention.days - } } } dynamic "metric" { @@ -312,11 +407,6 @@ resource "azurerm_monitor_diagnostic_setting" "aks-diagnostics" { content { category = metric.key enabled = metric.value.enabled - - retention_policy { - enabled = metric.value.retention.enabled - days = metric.value.retention.days - } } } } diff --git a/outputs.tf b/outputs.tf index 27b525f..cef8b2c 100644 --- a/outputs.tf +++ b/outputs.tf @@ -86,3 +86,13 @@ output "private_fqdn" { output "oidc_issuer_url" { value = azurerm_kubernetes_cluster.k8s_cluster.oidc_issuer_url } + +output "node_resource_group" { + description = "auto-generated resource group which contains the resources for this managed kubernetes cluster" + value = azurerm_kubernetes_cluster.k8s_cluster.node_resource_group +} + +output "node_resource_group_id" { + description = "auto-generated resource group which contains the resources for this managed kubernetes cluster" + value = azurerm_kubernetes_cluster.k8s_cluster.node_resource_group_id +} \ No newline at end of file diff --git a/variables.tf b/variables.tf index 33625e7..0be7b4a 100644 --- a/variables.tf +++ b/variables.tf @@ -22,7 +22,7 @@ variable "agent_net_name" { variable "k8s_version" { description = "What version of k8s to request from provider" - default = "1.11.4" + default = null } variable "cluster_name" { @@ -209,6 +209,17 @@ variable "oms_agent_enable" { default = true } +variable "msd_workspace_id" { + description = "Specifies the ID of the Log Analytics Workspace where the audit logs collected by Microsoft Defender should be sent to" + default = "" +} + +variable "msd_enable" { + type = bool + description = "Enable audit logs collected by Microsoft Defender" + default = false +} + variable "enable_diagnostics" { default = false type = bool @@ -263,6 +274,113 @@ variable "automatic_channel_upgrade" { default = null } +variable "node_os_channel_upgrade" { + type = string + default = "None" + description = "automatically upgrades the node image to the latest version available." +} + +variable "max_surge" { + type = string + default = "33%" + description = "The maximum percentage of nodes which will be added to the Node Pool size during an upgrade" +} + +variable "frequency" { + description = "Frequency of maintenance." + type = string + default = "Weekly" +} + +variable "interval" { + description = "The interval for maintenance runs." + type = number + default = 1 +} + +variable "duration" { + description = "The duration of the window for maintenance to run in hours." + type = string + default = "5" +} + +variable "day_of_week" { + description = "The day of the week for the maintenance run." + type = string + default = "Tuesday" +} + +variable "maintenance_window_auto_upgrade" { + type = object({ + day_of_month = optional(number) + day_of_week = optional(string) + duration = number + frequency = string + interval = number + start_date = optional(string) + start_time = optional(string) + utc_offset = optional(string) + week_index = optional(string) + not_allowed = optional(map(object({ + end = string + start = string + }))) + }) + default = null + description = <<-EOT + - `day_of_month` - (Optional) The day of the month for the maintenance run. Required in combination with RelativeMonthly frequency. Value between 0 and 31 (inclusive). + - `day_of_week` - (Optional) The day of the week for the maintenance run. Options are `Monday`, `Tuesday`, `Wednesday`, `Thurday`, `Friday`, `Saturday` and `Sunday`. Required in combination with weekly frequency. + - `duration` - (Required) The duration of the window for maintenance to run in hours. + - `frequency` - (Required) Frequency of maintenance. Possible options are `Weekly`, `AbsoluteMonthly` and `RelativeMonthly`. + - `interval` - (Required) The interval for maintenance runs. Depending on the frequency this interval is week or month based. + - `start_date` - (Optional) The date on which the maintenance window begins to take effect. + - `start_time` - (Optional) The time for maintenance to begin, based on the timezone determined by `utc_offset`. Format is `HH:mm`. + - `utc_offset` - (Optional) Used to determine the timezone for cluster maintenance. + - `week_index` - (Optional) The week in the month used for the maintenance run. Options are `First`, `Second`, `Third`, `Fourth`, and `Last`. + + --- + `not_allowed` block supports the following: + - `end` - (Required) The end of a time span, formatted as an RFC3339 string. + - `start` - (Required) The start of a time span, formatted as an RFC3339 string. +EOT +} + +variable "maintenance_window_node_os" { + type = object({ + day_of_month = optional(number) + day_of_week = optional(string) + duration = number + frequency = string + interval = number + start_date = optional(string) + start_time = optional(string) + utc_offset = optional(string) + week_index = optional(string) + not_allowed = optional(map(object({ + end = string + start = string + }))) + }) + default = null + description = <<-EOT + - `day_of_month` - (Optional) The day of the month for the maintenance run. Required in combination with RelativeMonthly frequency. Value between 0 and 31 (inclusive). + - `day_of_week` - (Optional) The day of the week for the maintenance run. Options are `Monday`, `Tuesday`, `Wednesday`, `Thurday`, `Friday`, `Saturday` and `Sunday`. Required in combination with weekly frequency. + - `duration` - (Required) The duration of the window for maintenance to run in hours. + - `frequency` - (Required) Frequency of maintenance. Possible options are `Daily`, `Weekly`, `AbsoluteMonthly` and `RelativeMonthly`. + - `interval` - (Required) The interval for maintenance runs. Depending on the frequency this interval is week or month based. + - `start_date` - (Optional) The date on which the maintenance window begins to take effect. + - `start_time` - (Optional) The time for maintenance to begin, based on the timezone determined by `utc_offset`. Format is `HH:mm`. + - `utc_offset` - (Optional) Used to determine the timezone for cluster maintenance. + - `week_index` - (Optional) The week in the month used for the maintenance run. Options are `First`, `Second`, `Third`, `Fourth`, and `Last`. + + --- + `not_allowed` block supports the following: + - `end` - (Required) The end of a time span, formatted as an RFC3339 string. + - `start` - (Required) The start of a time span, formatted as an RFC3339 string. +EOT +} + + # Ingress Application Gateway variable "ingress_application_gateway_enable" { type = bool @@ -273,6 +391,21 @@ variable "ingress_application_gateway_name" { type = string default = null } +variable "ingress_application_gateway_subnet_id" { + type = string + default = null +} + +variable "ingress_application_gateway_subnet_cidr" { + type = string + default = null +} + +variable "ingress_application_gateway_id" { + type = string + default = null +} + variable "ingress_application_subnet_id" { type = string diff --git a/versions.tf b/versions.tf index 9d76625..a065769 100644 --- a/versions.tf +++ b/versions.tf @@ -3,6 +3,6 @@ terraform { required_version = ">= 1.0" required_providers { - azurerm = ">= 3.39.0" + azurerm = ">= 3.0.0" } }
enabled = bool
retention = object({
enabled = bool
days = number
})
}))