diff --git a/.changelog/15308.txt b/.changelog/15308.txt new file mode 100644 index 0000000000..5d6f519f43 --- /dev/null +++ b/.changelog/15308.txt @@ -0,0 +1,3 @@ +```release-note:enhancement +sql: added `read_pool_auto_scale_config` support to `sql_database_instance` resource +``` \ No newline at end of file diff --git a/google-beta/services/sql/resource_sql_database_instance.go b/google-beta/services/sql/resource_sql_database_instance.go index f90620e0d9..95834d4868 100644 --- a/google-beta/services/sql/resource_sql_database_instance.go +++ b/google-beta/services/sql/resource_sql_database_instance.go @@ -140,8 +140,45 @@ var ( "settings.0.sql_server_audit_config.0.retention_interval", "settings.0.sql_server_audit_config.0.upload_interval", } + + readPoolAutoScaleConfigKeys = []string{ + "settings.0.read_pool_auto_scale_config.0.enabled", + "settings.0.read_pool_auto_scale_config.0.min_node_count", + "settings.0.read_pool_auto_scale_config.0.max_node_count", + "settings.0.read_pool_auto_scale_config.0.target_metrics", + "settings.0.read_pool_auto_scale_config.0.disable_scale_in", + "settings.0.read_pool_auto_scale_config.0.scale_in_cooldown_seconds", + "settings.0.read_pool_auto_scale_config.0.scale_out_cooldown_seconds", + } ) +func nodeCountCustomDiff(ctx context.Context, d *schema.ResourceDiff, meta interface{}) error { + autoScaleEnabled := d.Get("settings.0.read_pool_auto_scale_config.0.enabled").(bool) + + if !autoScaleEnabled { + // Keep the diff + return nil + } + + currentNodeCountI, _ := d.GetChange("node_count") + currentNodeCount := currentNodeCountI.(int) + minNodeCount := d.Get("settings.0.read_pool_auto_scale_config.0.min_node_count").(int) + maxNodeCount := d.Get("settings.0.read_pool_auto_scale_config.0.max_node_count").(int) + if currentNodeCount < minNodeCount { + // Node count will only be less than min node count if min node count is being increased. + // Set node count to be new min node count. + return d.SetNew("node_count", minNodeCount) + } + if currentNodeCount > maxNodeCount { + // Node count will only be greater than max node count if max node count is being descreased. + // Set node count to be new max node count. + return d.SetNew("node_count", maxNodeCount) + } + + // Otherwise when autoscaling is enabled, ignore the node count in config. + return d.Clear("node_count") +} + func diskSizeCutomizeDiff(ctx context.Context, d *schema.ResourceDiff, meta interface{}) error { key := "settings.0.disk_size" @@ -206,6 +243,7 @@ func ResourceSqlDatabaseInstance() *schema.Resource { customdiff.IfValueChange("instance_type", isReplicaPromoteRequested, checkPromoteConfigurationsAndUpdateDiff), privateNetworkCustomizeDiff, pitrSupportDbCustomizeDiff, + nodeCountCustomDiff, ), Schema: map[string]*schema.Schema{ @@ -866,6 +904,76 @@ API (for read pools, effective_availability_type may differ from availability_ty }, Description: `Config used to determine the final backup settings for the instance`, }, + "read_pool_auto_scale_config": { + Type: schema.TypeList, + Optional: true, + Computed: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "enabled": { + Type: schema.TypeBool, + Optional: true, + Default: false, + AtLeastOneOf: readPoolAutoScaleConfigKeys, + Description: `True if Read Pool Auto Scale is enabled.`, + }, + "max_node_count": { + Type: schema.TypeInt, + Optional: true, + ValidateFunc: validation.IntBetween(1, 20), + AtLeastOneOf: readPoolAutoScaleConfigKeys, + Description: `Maximum number of nodes in the read pool. If set to lower than current node count, node count will be updated.`, + }, + "min_node_count": { + Type: schema.TypeInt, + Optional: true, + ValidateFunc: validation.IntBetween(1, 20), + AtLeastOneOf: readPoolAutoScaleConfigKeys, + Description: `Minimum number of nodes in the read pool. If set to higher than current node count, node count will be updated.`, + }, + "target_metrics": { + Type: schema.TypeSet, + Optional: true, + AtLeastOneOf: readPoolAutoScaleConfigKeys, + Description: `Target metrics for Read Pool Auto Scale.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "metric": { + Type: schema.TypeString, + Optional: true, + Description: `Metric name for Read Pool Auto Scale.`, + }, + "target_value": { + Type: schema.TypeFloat, + Optional: true, + Description: `Target value for Read Pool Auto Scale.`, + }, + }, + }, + }, + "disable_scale_in": { + Type: schema.TypeBool, + Optional: true, + AtLeastOneOf: readPoolAutoScaleConfigKeys, + Description: `True if auto scale in is disabled.`, + }, + "scale_in_cooldown_seconds": { + Type: schema.TypeInt, + Optional: true, + AtLeastOneOf: readPoolAutoScaleConfigKeys, + Description: `The cooldown period for scale in operations.`, + }, + "scale_out_cooldown_seconds": { + Type: schema.TypeInt, + Optional: true, + AtLeastOneOf: readPoolAutoScaleConfigKeys, + Description: `The cooldown period for scale out operations.`, + }, + }, + }, + Description: `Configuration of Read Pool Auto Scale.`, + }, }, }, Description: `The settings to use for the database. The configuration is detailed below.`, @@ -984,9 +1092,8 @@ API (for read pools, effective_availability_type may differ from availability_ty Type: schema.TypeInt, Computed: true, Optional: true, - Description: `For a read pool instance, the number of nodes in the read pool.`, + Description: `For a read pool instance, the number of nodes in the read pool. For read pools with auto scaling enabled, this field is read only.`, }, - "replica_configuration": { Type: schema.TypeList, Optional: true, @@ -1636,6 +1743,7 @@ func expandSqlDatabaseInstanceSettings(configured []interface{}, databaseVersion InsightsConfig: expandInsightsConfig(_settings["insights_config"].([]interface{})), PasswordValidationPolicy: expandPasswordValidationPolicy(_settings["password_validation_policy"].([]interface{})), ConnectionPoolConfig: expandConnectionPoolConfig(_settings["connection_pool_config"].(*schema.Set).List()), + ReadPoolAutoScaleConfig: expandReadPoolAutoScaleConfig(_settings["read_pool_auto_scale_config"].([]interface{})), } resize := _settings["disk_autoresize"].(bool) @@ -1983,6 +2091,50 @@ func expandPasswordValidationPolicy(configured []interface{}) *sqladmin.Password } } +func expandTargetMetrics(configured []interface{}) []*sqladmin.TargetMetric { + targetMetrics := make([]*sqladmin.TargetMetric, 0, len(configured)) + for _, _metric := range configured { + if _metric == nil { + continue + } + _entry := _metric.(map[string]interface{}) + + targetMetric := &sqladmin.TargetMetric{} + + if v, ok := _entry["metric"]; ok && v != nil { + targetMetric.Metric = v.(string) + } + + if v, ok := _entry["target_value"]; ok && v != nil { + targetMetric.TargetValue = v.(float64) + } + + targetMetrics = append(targetMetrics, targetMetric) + } + return targetMetrics +} + +func expandReadPoolAutoScaleConfig(configured []interface{}) *sqladmin.ReadPoolAutoScaleConfig { + if len(configured) == 0 || configured[0] == nil { + return nil + } + + _readPoolAutoScaleConfig := configured[0].(map[string]interface{}) + if !_readPoolAutoScaleConfig["enabled"].(bool) { + return nil + } + return &sqladmin.ReadPoolAutoScaleConfig{ + Enabled: _readPoolAutoScaleConfig["enabled"].(bool), + MaxNodeCount: int64(_readPoolAutoScaleConfig["max_node_count"].(int)), + MinNodeCount: int64(_readPoolAutoScaleConfig["min_node_count"].(int)), + TargetMetrics: expandTargetMetrics(_readPoolAutoScaleConfig["target_metrics"].(*schema.Set).List()), + DisableScaleIn: _readPoolAutoScaleConfig["disable_scale_in"].(bool), + ScaleInCooldownSeconds: int64(_readPoolAutoScaleConfig["scale_in_cooldown_seconds"].(int)), + ScaleOutCooldownSeconds: int64(_readPoolAutoScaleConfig["scale_out_cooldown_seconds"].(int)), + } + +} + func resourceSqlDatabaseInstanceRead(d *schema.ResourceData, meta interface{}) error { config := meta.(*transport_tpg.Config) userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) @@ -2655,6 +2807,16 @@ func flattenSettings(settings *sqladmin.Settings, iType string, d *schema.Resour data["insights_config"] = flattenInsightsConfig(settings.InsightsConfig) } + if settings.ReadPoolAutoScaleConfig != nil { + data["read_pool_auto_scale_config"] = flattenReadPoolAutoScaleConfig(settings.ReadPoolAutoScaleConfig) + } else { + data["read_pool_auto_scale_config"] = []map[string]interface{}{ + { + "enabled": false, + }, + } + } + data["disk_autoresize"] = settings.StorageAutoResize data["disk_autoresize_limit"] = settings.StorageAutoResizeLimit @@ -3035,6 +3197,36 @@ func flattenInsightsConfig(insightsConfig *sqladmin.InsightsConfig) interface{} return []map[string]interface{}{data} } +func flattenTargetMetrics(targetMetrics []*sqladmin.TargetMetric) []interface{} { + if len(targetMetrics) == 0 { // Handles nil or empty slice + return make([]interface{}, 0) // Explicitly return empty slice + } + + metrics := make([]interface{}, len(targetMetrics)) // Pre-allocate for efficiency + for i, metric := range targetMetrics { + data := map[string]interface{}{ + "metric": metric.Metric, + "target_value": metric.TargetValue, + } + metrics[i] = data + } + return metrics +} + +func flattenReadPoolAutoScaleConfig(readPoolAutoScaleConfig *sqladmin.ReadPoolAutoScaleConfig) interface{} { + data := map[string]interface{}{ + "enabled": readPoolAutoScaleConfig.Enabled, + "min_node_count": readPoolAutoScaleConfig.MinNodeCount, + "max_node_count": readPoolAutoScaleConfig.MaxNodeCount, + "target_metrics": flattenTargetMetrics(readPoolAutoScaleConfig.TargetMetrics), + "disable_scale_in": readPoolAutoScaleConfig.DisableScaleIn, + "scale_in_cooldown_seconds": readPoolAutoScaleConfig.ScaleInCooldownSeconds, + "scale_out_cooldown_seconds": readPoolAutoScaleConfig.ScaleOutCooldownSeconds, + } + + return []map[string]interface{}{data} +} + func flattenPasswordValidationPolicy(passwordValidationPolicy *sqladmin.PasswordValidationPolicy) interface{} { data := map[string]interface{}{ "min_length": passwordValidationPolicy.MinLength, diff --git a/google-beta/services/sql/resource_sql_database_instance_test.go b/google-beta/services/sql/resource_sql_database_instance_test.go index f1191b7b81..732582ee01 100644 --- a/google-beta/services/sql/resource_sql_database_instance_test.go +++ b/google-beta/services/sql/resource_sql_database_instance_test.go @@ -3420,6 +3420,171 @@ func TestAccSqlDatabaseInstance_MysqlReadPoolEnableDisableSuccess(t *testing.T) }) } +// Read pool for MySQL. Enable and disable read pool auto scale +func TestAccSqlDatabaseInstance_MysqlReadPoolAutoScaleEnableDisableSuccess(t *testing.T) { + t.Parallel() + primaryName := "tf-test-mysql-as-readpool-primary-" + acctest.RandString(t, 10) + readPoolName := "tf-test-mysql-as-readpool-" + acctest.RandString(t, 10) + project := envvar.GetTestProjectFromEnv() + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccSqlDatabaseInstanceDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testGoogleSqlDatabaseInstanceConfig_eplusWithReadPool(project, primaryName, ReadPoolConfig{ + DatabaseType: "MYSQL_8_0", + ReplicaName: readPoolName, + }), + }, + { + ResourceName: "google_sql_database_instance.original-primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"deletion_protection"}, + }, + { + ResourceName: "google_sql_database_instance.original-read-pool", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"deletion_protection"}, + }, + // Enable auto scale + { + Config: testGoogleSqlDatabaseInstanceConfig_eplusWithReadPool(project, primaryName, ReadPoolConfig{ + DatabaseType: "MYSQL_8_0", + ReplicaName: readPoolName, + AutoScaleEnabled: true, + NodeCount: 2, + MaxNodeCount: 2, + MinNodeCount: 2, + }), + }, + { + ResourceName: "google_sql_database_instance.original-primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"deletion_protection"}, + }, + { + ResourceName: "google_sql_database_instance.original-read-pool", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"deletion_protection"}, + }, + // Disable auto scale + { + Config: testGoogleSqlDatabaseInstanceConfig_eplusWithReadPool(project, primaryName, ReadPoolConfig{ + DatabaseType: "MYSQL_8_0", + ReplicaName: readPoolName, + AutoScaleEnabled: false, + }), + }, + { + ResourceName: "google_sql_database_instance.original-primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"deletion_protection"}, + }, + { + ResourceName: "google_sql_database_instance.original-read-pool", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"deletion_protection"}, + }, + }, + }) +} + +// Read pool for Postgres. Modify Node Count when auto scale enabled +func TestAccSqlDatabaseInstance_PostgresReadPoolAutoScaleChangeNodeCount(t *testing.T) { + t.Parallel() + primaryName := "tf-test-postgreas-as-readpool-primary-" + acctest.RandString(t, 10) + readPoolName := "tf-test-postgres-as-readpool-" + acctest.RandString(t, 10) + project := envvar.GetTestProjectFromEnv() + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccSqlDatabaseInstanceDestroyProducer(t), + Steps: []resource.TestStep{ + // Create with auto scale takes min node count as node count + { + Config: testGoogleSqlDatabaseInstanceConfig_eplusWithReadPool(project, primaryName, ReadPoolConfig{ + DatabaseType: "POSTGRES_15", + ReplicaName: readPoolName, + AutoScaleEnabled: true, + NodeCount: 2, + MinNodeCount: 1, + MaxNodeCount: 5, + }), + Check: testGoogleSqlDatabaseInstanceCheckNodeCount(t, readPoolName, 1, 1, 5), + }, + // Change node count not respected + { + Config: testGoogleSqlDatabaseInstanceConfig_eplusWithReadPool(project, primaryName, ReadPoolConfig{ + DatabaseType: "POSTGRES_15", + ReplicaName: readPoolName, + AutoScaleEnabled: true, + NodeCount: 4, + MinNodeCount: 1, + MaxNodeCount: 5, + }), + Check: resource.ComposeTestCheckFunc( + testGoogleSqlDatabaseInstanceCheckNodeCount(t, readPoolName, 1, 1, 5), + testGoogleSqlDatabaseInstanceChangeNodeCount(t, readPoolName, 3), + ), // Simulate auto scale to 3 nodes. + + }, + // Change Min/Max (no change to node count) + { + Config: testGoogleSqlDatabaseInstanceConfig_eplusWithReadPool(project, primaryName, ReadPoolConfig{ + DatabaseType: "POSTGRES_15", + ReplicaName: readPoolName, + AutoScaleEnabled: true, + NodeCount: 4, + MinNodeCount: 2, + MaxNodeCount: 4, + }), + Check: testGoogleSqlDatabaseInstanceCheckNodeCount(t, readPoolName, 3, 2, 4), + }, + // Change Min (higher than node count), sets node count + { + Config: testGoogleSqlDatabaseInstanceConfig_eplusWithReadPool(project, primaryName, ReadPoolConfig{ + DatabaseType: "POSTGRES_15", + ReplicaName: readPoolName, + AutoScaleEnabled: true, + NodeCount: 3, + MinNodeCount: 4, + MaxNodeCount: 5, + }), + Check: testGoogleSqlDatabaseInstanceCheckNodeCount(t, readPoolName, 4, 4, 5), + }, + // Change Max (lower than node count), sets node count + { + Config: testGoogleSqlDatabaseInstanceConfig_eplusWithReadPool(project, primaryName, ReadPoolConfig{ + DatabaseType: "POSTGRES_15", + ReplicaName: readPoolName, + AutoScaleEnabled: true, + NodeCount: 5, + MinNodeCount: 3, + MaxNodeCount: 3, + }), + Check: testGoogleSqlDatabaseInstanceCheckNodeCount(t, readPoolName, 3, 3, 3), + }, + // Disable auto scale, sets node count + { + Config: testGoogleSqlDatabaseInstanceConfig_eplusWithReadPool(project, primaryName, ReadPoolConfig{ + DatabaseType: "POSTGRES_15", + ReplicaName: readPoolName, + AutoScaleEnabled: false, + NodeCount: 2, + }), + Check: testGoogleSqlDatabaseInstanceCheckNodeCount(t, readPoolName, 2, 0, 0), + }, + }, + }) +} + func TestAccSqlDatabaseInstance_updateSslOptionsForPostgreSQL(t *testing.T) { t.Parallel() @@ -5272,6 +5437,11 @@ type ReadPoolConfig struct { // ReplicaMachineType gives the machine type of the read pool nodes // or read replica. It defaults to db-perf-optimized-N-2. ReplicaMachineType string + // AutoScaleEnabled indicates if auto scaling should be enabled on + // the read pool. + AutoScaleEnabled bool + MinNodeCount int64 + MaxNodeCount int64 } func testGoogleSqlDatabaseInstanceConfig_eplusWithReadPool(project, primaryName string, rpconfig ReadPoolConfig) string { @@ -5280,6 +5450,16 @@ func testGoogleSqlDatabaseInstanceConfig_eplusWithReadPool(project, primaryName nodeCountStr = fmt.Sprintf(` node_count = %d `, rpconfig.NodeCount) } + minNodeCountStr := "" + if rpconfig.MinNodeCount > 0 { + minNodeCountStr = fmt.Sprintf(` min_node_count = %d +`, rpconfig.MinNodeCount) + } + maxNodeCountStr := "" + if rpconfig.MaxNodeCount > 0 { + maxNodeCountStr = fmt.Sprintf(` max_node_count = %d +`, rpconfig.MaxNodeCount) + } if rpconfig.InstanceType == "" { rpconfig.InstanceType = "READ_POOL_INSTANCE" @@ -5295,6 +5475,30 @@ func testGoogleSqlDatabaseInstanceConfig_eplusWithReadPool(project, primaryName } else if strings.HasPrefix(rpconfig.DatabaseType, "POSTGRES") { primaryTxnLogs = "point_in_time_recovery_enabled = true\n" } + autoScaleConfigStr := "" + if rpconfig.AutoScaleEnabled { + cooldownSeconds := 180 + autoScaleConfigStr = fmt.Sprintf(` + read_pool_auto_scale_config { + enabled = true +%s +%s + target_metrics { + metric = "AVERAGE_CPU_UTILIZATION" + target_value = 0.5 + } + scale_in_cooldown_seconds = %d + scale_out_cooldown_seconds = %d + disable_scale_in = true + } +`, minNodeCountStr, maxNodeCountStr, cooldownSeconds, cooldownSeconds) + } else { + autoScaleConfigStr = fmt.Sprintf(` + read_pool_auto_scale_config { + enabled = false + } +`) + } return fmt.Sprintf(` resource "google_sql_database_instance" "original-primary" { @@ -5328,9 +5532,10 @@ resource "google_sql_database_instance" "original-read-pool" { settings { tier = "%s" edition = "ENTERPRISE_PLUS" + %s } } -`, project, primaryName, rpconfig.DatabaseType, primaryTxnLogs, project, rpconfig.ReplicaName, rpconfig.DatabaseType, rpconfig.InstanceType, nodeCountStr, rpconfig.ReplicaMachineType) +`, project, primaryName, rpconfig.DatabaseType, primaryTxnLogs, project, rpconfig.ReplicaName, rpconfig.DatabaseType, rpconfig.InstanceType, nodeCountStr, rpconfig.ReplicaMachineType, autoScaleConfigStr) } func testAccSqlDatabaseInstance_basicInstanceForPsc(instanceName string, projectId string, orgId string, billingAccount string) string { @@ -7714,3 +7919,49 @@ func testGoogleSqlDatabaseInstanceCheckDiskSize(t *testing.T, instance string, s return nil } } + +func testGoogleSqlDatabaseInstanceChangeNodeCount(t *testing.T, instance string, newNodeCount int64) resource.TestCheckFunc { + return func(s *terraform.State) error { + config := acctest.GoogleProviderConfig(t) + + sqlAdminClient := config.NewSqlAdminClient(config.UserAgent) + + operation, err := sqlAdminClient.Instances.Patch(config.Project, instance, &sqladmin.DatabaseInstance{ + NodeCount: newNodeCount, + }).Do() + if err != nil { + return fmt.Errorf("Could not update database instance %q: %s", instance, err) + } + + // Wait for the operation to complete + if err := sql.SqlAdminOperationWaitTime(config, operation, config.Project, "Waiting for scale op", config.UserAgent, 10*time.Minute); err != nil { + return fmt.Errorf("Could not wait for operation to complete: %s", err) + } + + return nil + } +} + +func testGoogleSqlDatabaseInstanceCheckNodeCount(t *testing.T, instance string, nodeCount, minNodeCount, maxNodeCount int64) resource.TestCheckFunc { + return func(s *terraform.State) error { + config := acctest.GoogleProviderConfig(t) + + sqlAdminClient := config.NewSqlAdminClient(config.UserAgent) + + inst, err := sqlAdminClient.Instances.Get(config.Project, instance).Do() + if err != nil { + return fmt.Errorf("Could not get database instance %q: %s", instance, err) + } + if inst.NodeCount != nodeCount { + return fmt.Errorf("Expected nodeCount %d, got %d", nodeCount, inst.NodeCount) + } + if minNodeCount > 0 && inst.Settings.ReadPoolAutoScaleConfig.MinNodeCount != minNodeCount { + return fmt.Errorf("Expected min node count %d, got %d", minNodeCount, inst.Settings.ReadPoolAutoScaleConfig.MinNodeCount) + } + if maxNodeCount > 0 && inst.Settings.ReadPoolAutoScaleConfig.MaxNodeCount != maxNodeCount { + return fmt.Errorf("Expected max node count %d, got %d", maxNodeCount, inst.Settings.ReadPoolAutoScaleConfig.MaxNodeCount) + } + + return nil + } +} diff --git a/website/docs/r/sql_database_instance.html.markdown b/website/docs/r/sql_database_instance.html.markdown index c977143056..aeb9cd3b68 100644 --- a/website/docs/r/sql_database_instance.html.markdown +++ b/website/docs/r/sql_database_instance.html.markdown @@ -409,7 +409,7 @@ The `settings` block supports: * `data_disk_provisioned_throughput` - (Optional, Beta) Provisioned throughput measured in MiB per second for the data disk. This field is only used for `HYPERDISK_BALANCED` disk types. -* `node_count` - For a read pool instance, the number of nodes in the read pool. +* `node_count` - For a read pool instance, the number of nodes in the read pool. For read pools with auto scaling enabled, this field is read only. * `pricing_plan` - (Optional) Pricing plan for this instance, can only be `PER_USE`. @@ -531,6 +531,28 @@ The optional `settings.ip_configuration.psc_config` sublist supports: * `consumer_service_project_id` - (Optional) The project ID of consumer service project of this consumer endpoint. +The optional `settings.read_pool_auto_scale_config` subblock supports: + +* `enabled` - True if Read Pool Auto Scale is enabled. + +* `max_node_count` - Maximum number of nodes in the read pool. If set to lower than current node count, node count will be updated. + +* `min_node_count` - Minimum number of nodes in the read pool. If set to higher than current node count, node count will be updated. + + + +* `disable_scale_in` - True if auto scale in is disabled. + +* `scale_in_cooldown_seconds` - The cooldown period for scale in operations. + +* `scale_out_cooldown_seconds` - The cooldown period for scale out operations. + +* `target_metrics` - Target metrics for Read Pool Auto Scale. Must specify `target_metrics.metric` and `target_metrics.target_value` in subblock. + +* `metric` - Metric name for Read Pool Auto Scale. + +* `target_value` - Target value for Read Pool Auto Scale. + The optional `settings.location_preference` subblock supports: * `follow_gae_application` - (Optional) A GAE application whose zone to remain