From 53926331bd7fdb974b84e7ce77d8924039119e89 Mon Sep 17 00:00:00 2001 From: hjoshi123 Date: Sat, 28 Dec 2024 11:12:59 -0700 Subject: [PATCH 1/7] init: added data source for cluster versions (returns array) --- .../eks/cluster_versions_data_source.go | 197 ++++++++++++++++ .../eks/cluster_versions_data_source_test.go | 214 ++++++++++++++++++ .../docs/d/eks_cluster_versions.html.markdown | 46 ++++ 3 files changed, 457 insertions(+) create mode 100644 internal/service/eks/cluster_versions_data_source.go create mode 100644 internal/service/eks/cluster_versions_data_source_test.go create mode 100644 website/docs/d/eks_cluster_versions.html.markdown diff --git a/internal/service/eks/cluster_versions_data_source.go b/internal/service/eks/cluster_versions_data_source.go new file mode 100644 index 000000000000..e32d57573e0a --- /dev/null +++ b/internal/service/eks/cluster_versions_data_source.go @@ -0,0 +1,197 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package eks + +import ( + // TIP: ==== IMPORTS ==== + // This is a common set of imports but not customized to your code since + // your code hasn't been written yet. Make sure you, your IDE, or + // goimports -w fixes these imports. + // + // The provider linter wants your imports to be in two groups: first, + // standard library (i.e., "fmt" or "strings"), second, everything else. + // + // Also, AWS Go SDK v2 may handle nested structures differently than v1, + // using the services/eks/types package. If so, you'll + // need to import types and reference the nested types, e.g., as + // awstypes.. + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/eks" + awstypes "github.com/aws/aws-sdk-go-v2/service/eks/types" + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/eks" + "github.com/hashicorp/terraform-plugin-framework/datasource" + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-provider-aws/internal/create" + "github.com/hashicorp/terraform-provider-aws/internal/framework" + "github.com/hashicorp/terraform-provider-aws/internal/framework/flex" + fwtypes "github.com/hashicorp/terraform-provider-aws/internal/framework/types" + tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" + "github.com/hashicorp/terraform-provider-aws/names" +) + +// TIP: ==== FILE STRUCTURE ==== +// All data sources should follow this basic outline. Improve this data source's +// maintainability by sticking to it. +// +// 1. Package declaration +// 2. Imports +// 3. Main data source struct with schema method +// 4. Read method +// 5. Other functions (flatteners, expanders, waiters, finders, etc.) + +// Function annotations are used for datasource registration to the Provider. DO NOT EDIT. +// @FrameworkDataSource("aws_eks_cluster_versions", name="Cluster Versions") +func newDataSourceClusterVersions(context.Context) (datasource.DataSourceWithConfigure, error) { + return &dataSourceClusterVersions{}, nil +} + +const ( + DSNameClusterVersions = "Cluster Versions Data Source" +) + +type dataSourceClusterVersions struct { + framework.DataSourceWithConfigure +} + +func (d *dataSourceClusterVersions) Metadata(_ context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) { // nosemgrep:ci.meta-in-func-name + resp.TypeName = "aws_eks_cluster_versions" +} + +// TIP: ==== SCHEMA ==== +// In the schema, add each of the arguments and attributes in snake +// case (e.g., delete_automated_backups). +// * Alphabetize arguments to make them easier to find. +// * Do not add a blank line between arguments/attributes. +// +// Users can configure argument values while attribute values cannot be +// configured and are used as output. Arguments have either: +// Required: true, +// Optional: true, +// +// All attributes will be computed and some arguments. If users will +// want to read updated information or detect drift for an argument, +// it should be computed: +// Computed: true, +// +// You will typically find arguments in the input struct +// (e.g., CreateDBInstanceInput) for the create operation. Sometimes +// they are only in the input struct (e.g., ModifyDBInstanceInput) for +// the modify operation. +// +// For more about schema options, visit +// https://developer.hashicorp.com/terraform/plugin/framework/handling-data/schemas?page=schemas +func (d *dataSourceClusterVersions) Schema(ctx context.Context, req datasource.SchemaRequest, resp *datasource.SchemaResponse) { + resp.Schema = schema.Schema{ + Attributes: map[string]schema.Attribute{ + "cluster_type": schema.StringAttribute{ + Computed: true, + }, + "cluster_versions": schema.StringAttribute{ + Computed: true, + }, + "default_platform_version": schema.StringAttribute{ + Computed: true, + }, + "default_version": schema.BoolAttribute{ + Computed: true, + }, + "end_of_extended_support_date": schema.StringAttribute{ + Computed: true, + }, + "end_of_standard_support_date": schema.StringAttribute{ + Computed: true, + }, + "kubernetes_patch_version": schema.StringAttribute{ + Computed: true, + }, + "release_date": schema.StringAttribute{ + Computed: true, + }, + "status": schema.StringAttribute{ + CustomType: fwtypes.StringEnumType(awstypes.ClusterVersionStatus), + Computed: true, + }, + }, + } +} + +// TIP: ==== ASSIGN CRUD METHODS ==== +// Data sources only have a read method. +func (d *dataSourceClusterVersions) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) { + // TIP: ==== DATA SOURCE READ ==== + // Generally, the Read function should do the following things. Make + // sure there is a good reason if you don't do one of these. + // + // 1. Get a client connection to the relevant service + // 2. Fetch the config + // 3. Get information about a resource from AWS + // 4. Set the ID, arguments, and attributes + // 5. Set the tags + // 6. Set the state + // TIP: -- 1. Get a client connection to the relevant service + conn := d.Meta().EKSClient(ctx) + + // TIP: -- 2. Fetch the config + var data dataSourceClusterVersionsModel + resp.Diagnostics.Append(req.Config.Get(ctx, &data)...) + if resp.Diagnostics.HasError() { + return + } + + // TIP: -- 3. Get information about a resource from AWS + out, err := findClusterVersionsByName(ctx, conn, data.Name.ValueString()) + if err != nil { + resp.Diagnostics.AddError( + create.ProblemStandardMessage(names.EKS, create.ErrActionReading, DSNameClusterVersions, data.Name.String(), err), + err.Error(), + ) + return + } + + // TIP: -- 4. Set the ID, arguments, and attributes + // Using a field name prefix allows mapping fields such as `ClusterVersionsId` to `ID` + resp.Diagnostics.Append(flex.Flatten(ctx, out, &data, flex.WithFieldNamePrefix("ClusterVersions"))...) + if resp.Diagnostics.HasError() { + return + } + + // TIP: -- 5. Set the tags + ignoreTagsConfig := d.Meta().IgnoreTagsConfig(ctx) + tags := KeyValueTags(ctx, out.Tags).IgnoreAWS().IgnoreConfig(ignoreTagsConfig) + data.Tags = tftags.FlattenStringValueMap(ctx, tags.Map()) + + // TIP: -- 6. Set the state + resp.Diagnostics.Append(resp.State.Set(ctx, &data)...) +} + +// TIP: ==== DATA STRUCTURES ==== +// With Terraform Plugin-Framework configurations are deserialized into +// Go types, providing type safety without the need for type assertions. +// These structs should match the schema definition exactly, and the `tfsdk` +// tag value should match the attribute name. +// +// Nested objects are represented in their own data struct. These will +// also have a corresponding attribute type mapping for use inside flex +// functions. +// +// See more: +// https://developer.hashicorp.com/terraform/plugin/framework/handling-data/accessing-values +type dataSourceClusterVersionsModel struct { + ARN types.String `tfsdk:"arn"` + ComplexArgument fwtypes.ListNestedObjectValueOf[complexArgumentModel] `tfsdk:"complex_argument"` + Description types.String `tfsdk:"description"` + ID types.String `tfsdk:"id"` + Name types.String `tfsdk:"name"` + Tags tftags.Map `tfsdk:"tags"` + Type types.String `tfsdk:"type"` +} + +type complexArgumentModel struct { + NestedRequired types.String `tfsdk:"nested_required"` + NestedOptional types.String `tfsdk:"nested_optional"` +} diff --git a/internal/service/eks/cluster_versions_data_source_test.go b/internal/service/eks/cluster_versions_data_source_test.go new file mode 100644 index 000000000000..f001030947cb --- /dev/null +++ b/internal/service/eks/cluster_versions_data_source_test.go @@ -0,0 +1,214 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package eks_test +// **PLEASE DELETE THIS AND ALL TIP COMMENTS BEFORE SUBMITTING A PR FOR REVIEW!** +// +// TIP: ==== INTRODUCTION ==== +// Thank you for trying the skaff tool! +// +// You have opted to include these helpful comments. They all include "TIP:" +// to help you find and remove them when you're done with them. +// +// While some aspects of this file are customized to your input, the +// scaffold tool does *not* look at the AWS API and ensure it has correct +// function, structure, and variable names. It makes guesses based on +// commonalities. You will need to make significant adjustments. +// +// In other words, as generated, this is a rough outline of the work you will +// need to do. If something doesn't make sense for your situation, get rid of +// it. + +import ( + // TIP: ==== IMPORTS ==== + // This is a common set of imports but not customized to your code since + // your code hasn't been written yet. Make sure you, your IDE, or + // goimports -w fixes these imports. + // + // The provider linter wants your imports to be in two groups: first, + // standard library (i.e., "fmt" or "strings"), second, everything else. + // + // Also, AWS Go SDK v2 may handle nested structures differently than v1, + // using the services/eks/types package. If so, you'll + // need to import types and reference the nested types, e.g., as + // types.. + "fmt" + "strings" + "testing" + + "github.com/YakDriver/regexache" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/eks" + "github.com/aws/aws-sdk-go-v2/service/eks/types" + "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/terraform" + "github.com/hashicorp/terraform-provider-aws/internal/acctest" + "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/create" + + // TIP: You will often need to import the package that this test file lives + // in. Since it is in the "test" context, it must import the package to use + // any normal context constants, variables, or functions. + tfeks "github.com/hashicorp/terraform-provider-aws/internal/service/eks" + "github.com/hashicorp/terraform-provider-aws/names" +) + +// TIP: File Structure. The basic outline for all test files should be as +// follows. Improve this data source's maintainability by following this +// outline. +// +// 1. Package declaration (add "_test" since this is a test file) +// 2. Imports +// 3. Unit tests +// 4. Basic test +// 5. Disappears test +// 6. All the other tests +// 7. Helper functions (exists, destroy, check, etc.) +// 8. Functions that return Terraform configurations + + +// TIP: ==== UNIT TESTS ==== +// This is an example of a unit test. Its name is not prefixed with +// "TestAcc" like an acceptance test. +// +// Unlike acceptance tests, unit tests do not access AWS and are focused on a +// function (or method). Because of this, they are quick and cheap to run. +// +// In designing a data source's implementation, isolate complex bits from AWS bits +// so that they can be tested through a unit test. We encourage more unit tests +// in the provider. +// +// Cut and dry functions using well-used patterns, like typical flatteners and +// expanders, don't need unit testing. However, if they are complex or +// intricate, they should be unit tested. +func TestClusterVersionsExampleUnitTest(t *testing.T) { + t.Parallel() + + testCases := []struct { + TestName string + Input string + Expected string + Error bool + }{ + { + TestName: "empty", + Input: "", + Expected: "", + Error: true, + }, + { + TestName: "descriptive name", + Input: "some input", + Expected: "some output", + Error: false, + }, + { + TestName: "another descriptive name", + Input: "more input", + Expected: "more output", + Error: false, + }, + } + + for _, testCase := range testCases { + t.Run(testCase.TestName, func(t *testing.T) { + t.Parallel() + got, err := tfeks.FunctionFromDataSource(testCase.Input) + + if err != nil && !testCase.Error { + t.Errorf("got error (%s), expected no error", err) + } + + if err == nil && testCase.Error { + t.Errorf("got (%s) and no error, expected error", got) + } + + if got != testCase.Expected { + t.Errorf("got %s, expected %s", got, testCase.Expected) + } + }) + } +} + + +// TIP: ==== ACCEPTANCE TESTS ==== +// This is an example of a basic acceptance test. This should test as much of +// standard functionality of the data source as possible, and test importing, if +// applicable. We prefix its name with "TestAcc", the service, and the +// data source name. +// +// Acceptance test access AWS and cost money to run. +func TestAccEKSClusterVersionsDataSource_basic(t *testing.T) { + ctx := acctest.Context(t) + // TIP: This is a long-running test guard for tests that run longer than + // 300s (5 min) generally. + if testing.Short() { + t.Skip("skipping long-running test in short mode") + } + + var clusterversions eks.DescribeClusterVersionsResponse + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + dataSourceName := "data.aws_eks_cluster_versions.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckPartitionHasService(t, names.EKSEndpointID) + testAccPreCheck(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.EKSServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckClusterVersionsDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccClusterVersionsDataSourceConfig_basic(rName), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckClusterVersionsExists(ctx, dataSourceName, &clusterversions), + resource.TestCheckResourceAttr(dataSourceName, "auto_minor_version_upgrade", "false"), + resource.TestCheckResourceAttrSet(dataSourceName, "maintenance_window_start_time.0.day_of_week"), + resource.TestCheckTypeSetElemNestedAttrs(dataSourceName, "user.*", map[string]string{ + "console_access": "false", + "groups.#": "0", + "username": "Test", + "password": "TestTest1234", + }), + // TIP: If the ARN can be partially or completely determined by the parameters passed, e.g. it contains the + // value of `rName`, either include the values in the regex or check for an exact match using `acctest.CheckResourceAttrRegionalARN` + // Alternatively, if the data source returns the values for a corresponding resource, use `resource.TestCheckResourceAttrPair` to + // check that the values are the same. + acctest.MatchResourceAttrRegionalARN(ctx, dataSourceName, names.AttrARN, "eks", regexache.MustCompile(`clusterversions:.+$`)), + ), + }, + }, + }) +} + +func testAccClusterVersionsDataSourceConfig_basic(rName, version string) string { + return fmt.Sprintf(` +data "aws_security_group" "test" { + name = %[1]q +} + +data "aws_eks_cluster_versions" "test" { + cluster_versions_name = %[1]q + engine_type = "ActiveEKS" + engine_version = %[2]q + host_instance_type = "eks.t2.micro" + security_groups = [aws_security_group.test.id] + authentication_strategy = "simple" + storage_type = "efs" + + logs { + general = true + } + + user { + username = "Test" + password = "TestTest1234" + } +} +`, rName, version) +} diff --git a/website/docs/d/eks_cluster_versions.html.markdown b/website/docs/d/eks_cluster_versions.html.markdown new file mode 100644 index 000000000000..09bc442845bb --- /dev/null +++ b/website/docs/d/eks_cluster_versions.html.markdown @@ -0,0 +1,46 @@ +--- +subcategory: "EKS (Elastic Kubernetes)" +layout: "aws" +page_title: "AWS: aws_eks_cluster_versions" +description: |- + Terraform data source for managing an AWS EKS (Elastic Kubernetes) Cluster Versions. +--- + + +# Data Source: aws_eks_cluster_versions + +Terraform data source for managing an AWS EKS (Elastic Kubernetes) Cluster Versions. + +## Example Usage + +### Basic Usage + +```terraform +data "aws_eks_cluster_versions" "example" { +} +``` + +## Argument Reference + +The following arguments are required: + +* `example_arg` - (Required) Concise argument description. Do not begin the description with "An", "The", "Defines", "Indicates", or "Specifies," as these are verbose. In other words, "Indicates the amount of storage," can be rewritten as "Amount of storage," without losing any information. + +The following arguments are optional: + +* `optional_arg` - (Optional) Concise argument description. Do not begin the description with "An", "The", "Defines", "Indicates", or "Specifies," as these are verbose. In other words, "Indicates the amount of storage," can be rewritten as "Amount of storage," without losing any information. + +## Attribute Reference + +This data source exports the following attributes in addition to the arguments above: + +* `arn` - ARN of the Cluster Versions. Do not begin the description with "An", "The", "Defines", "Indicates", or "Specifies," as these are verbose. In other words, "Indicates the amount of storage," can be rewritten as "Amount of storage," without losing any information. +* `example_attribute` - Concise description. Do not begin the description with "An", "The", "Defines", "Indicates", or "Specifies," as these are verbose. In other words, "Indicates the amount of storage," can be rewritten as "Amount of storage," without losing any information. +* `tags` - Map of tags assigned to the resource. From 007d33621cf914e2ca81dac0fc607b3638060917 Mon Sep 17 00:00:00 2001 From: hjoshi123 Date: Wed, 1 Jan 2025 16:42:13 -0700 Subject: [PATCH 2/7] feat: added new data source aws_eks_cluster_versions --- go.mod | 2 +- go.sum | 4 +- .../eks/cluster_versions_data_source.go | 200 +++++++---------- .../eks/cluster_versions_data_source_test.go | 207 +++--------------- internal/service/eks/service_package_gen.go | 7 +- 5 files changed, 121 insertions(+), 299 deletions(-) diff --git a/go.mod b/go.mod index a4340da71e51..2d4b905c77c1 100644 --- a/go.mod +++ b/go.mod @@ -105,7 +105,7 @@ require ( github.com/aws/aws-sdk-go-v2/service/ecrpublic v1.27.8 github.com/aws/aws-sdk-go-v2/service/ecs v1.53.1 github.com/aws/aws-sdk-go-v2/service/efs v1.34.2 - github.com/aws/aws-sdk-go-v2/service/eks v1.55.0 + github.com/aws/aws-sdk-go-v2/service/eks v1.56.0 github.com/aws/aws-sdk-go-v2/service/elasticache v1.44.2 github.com/aws/aws-sdk-go-v2/service/elasticbeanstalk v1.28.8 github.com/aws/aws-sdk-go-v2/service/elasticloadbalancing v1.28.7 diff --git a/go.sum b/go.sum index 84119d38183b..7f72d0d95f72 100644 --- a/go.sum +++ b/go.sum @@ -221,8 +221,8 @@ github.com/aws/aws-sdk-go-v2/service/ecs v1.53.1 h1:sAT2jzHkds1cv7VvNpzFfCw2w3zA github.com/aws/aws-sdk-go-v2/service/ecs v1.53.1/go.mod h1:YpTRClSDOPvN2e3kiIrYOx1sI+YKTZVmlMiNO2AwYhE= github.com/aws/aws-sdk-go-v2/service/efs v1.34.2 h1:gV7yKX8euN6W9vXiPutShochfx5ren706E9D0qsoOjo= github.com/aws/aws-sdk-go-v2/service/efs v1.34.2/go.mod h1:SB5IpCGoPDDTpf7wMLVtq5MRsad+vqIMONmJf/l4nqY= -github.com/aws/aws-sdk-go-v2/service/eks v1.55.0 h1:EahmhEaZE/xuD/X9GhgfSkLhcxMAl+mnSZCxmCmHrfE= -github.com/aws/aws-sdk-go-v2/service/eks v1.55.0/go.mod h1:kNUWaiotRWCnfQlprrxSMg8ALqbZyA9xLCwKXuLumSk= +github.com/aws/aws-sdk-go-v2/service/eks v1.56.0 h1:x31cGGE/t/QkrHVh5m2uWvYwDiaDXpj88nh6OdnI5r0= +github.com/aws/aws-sdk-go-v2/service/eks v1.56.0/go.mod h1:kNUWaiotRWCnfQlprrxSMg8ALqbZyA9xLCwKXuLumSk= github.com/aws/aws-sdk-go-v2/service/elasticache v1.44.2 h1:+dzQKj9hOytVJOQjRxBI1nWyfoyB4gPh91vUTnPPOTk= github.com/aws/aws-sdk-go-v2/service/elasticache v1.44.2/go.mod h1:XIxNB7tOhWeEBxjR73NTGrQ6tTHM2YBCKS/5CL2YKqE= github.com/aws/aws-sdk-go-v2/service/elasticbeanstalk v1.28.8 h1:A6w8FeT/ZD67gHTlJLdemN3eZbvcVXOcmeHvTLbkeCY= diff --git a/internal/service/eks/cluster_versions_data_source.go b/internal/service/eks/cluster_versions_data_source.go index e32d57573e0a..a67c5f5b94e9 100644 --- a/internal/service/eks/cluster_versions_data_source.go +++ b/internal/service/eks/cluster_versions_data_source.go @@ -4,25 +4,15 @@ package eks import ( - // TIP: ==== IMPORTS ==== - // This is a common set of imports but not customized to your code since - // your code hasn't been written yet. Make sure you, your IDE, or - // goimports -w fixes these imports. - // - // The provider linter wants your imports to be in two groups: first, - // standard library (i.e., "fmt" or "strings"), second, everything else. - // - // Also, AWS Go SDK v2 may handle nested structures differently than v1, - // using the services/eks/types package. If so, you'll - // need to import types and reference the nested types, e.g., as - // awstypes.. "context" - + "fmt" "github.com/aws/aws-sdk-go-v2/aws" + "github.com/hashicorp/terraform-plugin-log/tflog" + + "github.com/hashicorp/terraform-plugin-framework-timetypes/timetypes" + "github.com/aws/aws-sdk-go-v2/service/eks" awstypes "github.com/aws/aws-sdk-go-v2/service/eks/types" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/eks" "github.com/hashicorp/terraform-plugin-framework/datasource" "github.com/hashicorp/terraform-plugin-framework/datasource/schema" "github.com/hashicorp/terraform-plugin-framework/types" @@ -30,21 +20,9 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/framework" "github.com/hashicorp/terraform-provider-aws/internal/framework/flex" fwtypes "github.com/hashicorp/terraform-provider-aws/internal/framework/types" - tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" "github.com/hashicorp/terraform-provider-aws/names" ) -// TIP: ==== FILE STRUCTURE ==== -// All data sources should follow this basic outline. Improve this data source's -// maintainability by sticking to it. -// -// 1. Package declaration -// 2. Imports -// 3. Main data source struct with schema method -// 4. Read method -// 5. Other functions (flatteners, expanders, waiters, finders, etc.) - -// Function annotations are used for datasource registration to the Provider. DO NOT EDIT. // @FrameworkDataSource("aws_eks_cluster_versions", name="Cluster Versions") func newDataSourceClusterVersions(context.Context) (datasource.DataSourceWithConfigure, error) { return &dataSourceClusterVersions{}, nil @@ -62,136 +40,114 @@ func (d *dataSourceClusterVersions) Metadata(_ context.Context, req datasource.M resp.TypeName = "aws_eks_cluster_versions" } -// TIP: ==== SCHEMA ==== -// In the schema, add each of the arguments and attributes in snake -// case (e.g., delete_automated_backups). -// * Alphabetize arguments to make them easier to find. -// * Do not add a blank line between arguments/attributes. -// -// Users can configure argument values while attribute values cannot be -// configured and are used as output. Arguments have either: -// Required: true, -// Optional: true, -// -// All attributes will be computed and some arguments. If users will -// want to read updated information or detect drift for an argument, -// it should be computed: -// Computed: true, -// -// You will typically find arguments in the input struct -// (e.g., CreateDBInstanceInput) for the create operation. Sometimes -// they are only in the input struct (e.g., ModifyDBInstanceInput) for -// the modify operation. -// -// For more about schema options, visit -// https://developer.hashicorp.com/terraform/plugin/framework/handling-data/schemas?page=schemas func (d *dataSourceClusterVersions) Schema(ctx context.Context, req datasource.SchemaRequest, resp *datasource.SchemaResponse) { resp.Schema = schema.Schema{ Attributes: map[string]schema.Attribute{ "cluster_type": schema.StringAttribute{ - Computed: true, - }, - "cluster_versions": schema.StringAttribute{ - Computed: true, - }, - "default_platform_version": schema.StringAttribute{ - Computed: true, - }, - "default_version": schema.BoolAttribute{ - Computed: true, - }, - "end_of_extended_support_date": schema.StringAttribute{ - Computed: true, + Optional: true, }, - "end_of_standard_support_date": schema.StringAttribute{ - Computed: true, + "default_only": schema.BoolAttribute{ + Optional: true, }, - "kubernetes_patch_version": schema.StringAttribute{ - Computed: true, - }, - "release_date": schema.StringAttribute{ - Computed: true, + "cluster_versions_only": schema.ListAttribute{ + Optional: true, + CustomType: fwtypes.ListOfStringType, }, "status": schema.StringAttribute{ - CustomType: fwtypes.StringEnumType(awstypes.ClusterVersionStatus), - Computed: true, + Optional: true, + CustomType: fwtypes.StringEnumType[awstypes.ClusterVersionStatus](), }, + "cluster_versions": framework.DataSourceComputedListOfObjectAttribute[customDataSourceClusterVersion](ctx), }, } } -// TIP: ==== ASSIGN CRUD METHODS ==== -// Data sources only have a read method. func (d *dataSourceClusterVersions) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) { - // TIP: ==== DATA SOURCE READ ==== - // Generally, the Read function should do the following things. Make - // sure there is a good reason if you don't do one of these. - // - // 1. Get a client connection to the relevant service - // 2. Fetch the config - // 3. Get information about a resource from AWS - // 4. Set the ID, arguments, and attributes - // 5. Set the tags - // 6. Set the state - // TIP: -- 1. Get a client connection to the relevant service conn := d.Meta().EKSClient(ctx) - // TIP: -- 2. Fetch the config var data dataSourceClusterVersionsModel resp.Diagnostics.Append(req.Config.Get(ctx, &data)...) if resp.Diagnostics.HasError() { return } + input := &eks.DescribeClusterVersionsInput{} + + if data.ClusterType.String() != "" { + input.ClusterType = aws.String(data.ClusterType.ValueString()) + } + + input.DefaultOnly = aws.Bool(data.DefaultOnly.ValueBool()) + + if len(data.ClusterVersionsOnly.Elements()) > 0 && !data.ClusterVersions.IsNull() { + clVersions := make([]string, 0, len(data.ClusterVersionsOnly.Elements())) + for _, v := range data.ClusterVersionsOnly.Elements() { + clVersions = append(clVersions, v.String()) + } + + input.ClusterVersions = clVersions + } + + if data.Status.String() != "" { + input.Status = awstypes.ClusterVersionStatus(data.Status.ValueString()) + } + // TIP: -- 3. Get information about a resource from AWS - out, err := findClusterVersionsByName(ctx, conn, data.Name.ValueString()) + out, err := findClusterVersions(ctx, conn, input) if err != nil { - resp.Diagnostics.AddError( - create.ProblemStandardMessage(names.EKS, create.ErrActionReading, DSNameClusterVersions, data.Name.String(), err), - err.Error(), - ) + resp.Diagnostics.AddError(fmt.Sprint(names.EKS, create.ErrActionReading, DSNameClusterVersions, err), err.Error()) return } - // TIP: -- 4. Set the ID, arguments, and attributes - // Using a field name prefix allows mapping fields such as `ClusterVersionsId` to `ID` - resp.Diagnostics.Append(flex.Flatten(ctx, out, &data, flex.WithFieldNamePrefix("ClusterVersions"))...) + output := &eks.DescribeClusterVersionsOutput{ + ClusterVersions: out, + } + + resp.Diagnostics.Append(flex.Flatten(ctx, output, &data)...) if resp.Diagnostics.HasError() { return } - // TIP: -- 5. Set the tags - ignoreTagsConfig := d.Meta().IgnoreTagsConfig(ctx) - tags := KeyValueTags(ctx, out.Tags).IgnoreAWS().IgnoreConfig(ignoreTagsConfig) - data.Tags = tftags.FlattenStringValueMap(ctx, tags.Map()) - - // TIP: -- 6. Set the state resp.Diagnostics.Append(resp.State.Set(ctx, &data)...) } -// TIP: ==== DATA STRUCTURES ==== -// With Terraform Plugin-Framework configurations are deserialized into -// Go types, providing type safety without the need for type assertions. -// These structs should match the schema definition exactly, and the `tfsdk` -// tag value should match the attribute name. -// -// Nested objects are represented in their own data struct. These will -// also have a corresponding attribute type mapping for use inside flex -// functions. -// -// See more: -// https://developer.hashicorp.com/terraform/plugin/framework/handling-data/accessing-values +func findClusterVersions(ctx context.Context, conn *eks.Client, input *eks.DescribeClusterVersionsInput) ([]awstypes.ClusterVersionInformation, error) { + output := make([]awstypes.ClusterVersionInformation, 0) + + fmt.Printf("Finding cluster versions\n %v %v %v", input.ClusterVersions, aws.ToString(input.ClusterType), input.Status) + tflog.Debug(ctx, "Finding cluster versions", map[string]interface{}{"input": input}) + + pages := eks.NewDescribeClusterVersionsPaginator(conn, input) + for pages.HasMorePages() { + page, err := pages.NextPage(ctx) + if err != nil { + return nil, err + } + + output = append(output, page.ClusterVersions...) + } + + fmt.Printf("Found cluster versions %v", output) + tflog.Debug(ctx, "Found cluster versions", map[string]interface{}{"output": output}) + + return output, nil +} + type dataSourceClusterVersionsModel struct { - ARN types.String `tfsdk:"arn"` - ComplexArgument fwtypes.ListNestedObjectValueOf[complexArgumentModel] `tfsdk:"complex_argument"` - Description types.String `tfsdk:"description"` - ID types.String `tfsdk:"id"` - Name types.String `tfsdk:"name"` - Tags tftags.Map `tfsdk:"tags"` - Type types.String `tfsdk:"type"` + ClusterType types.String `tfsdk:"cluster_type"` + DefaultOnly types.Bool `tfsdk:"default_only"` + ClusterVersionsOnly fwtypes.ListValueOf[types.String] `tfsdk:"cluster_versions_only"` + Status fwtypes.StringEnum[awstypes.ClusterVersionStatus] `tfsdk:"status"` + ClusterVersions fwtypes.ListNestedObjectValueOf[customDataSourceClusterVersion] `tfsdk:"cluster_versions"` } -type complexArgumentModel struct { - NestedRequired types.String `tfsdk:"nested_required"` - NestedOptional types.String `tfsdk:"nested_optional"` +type customDataSourceClusterVersion struct { + ClusterType types.String `tfsdk:"cluster_type"` + ClusterVersion types.String `tfsdk:"cluster_version"` + DefaultPlatformVersion types.String `tfsdk:"default_platform_version"` + EndOfExtendedSupportDate timetypes.RFC3339 `tfsdk:"end_of_extended_support_date"` + EndOfStandardSupportDate timetypes.RFC3339 `tfsdk:"end_of_standard_support_date"` + KubernetesPatchVersion types.String `tfsdk:"kubernetes_patch_version"` + ReleaseDate timetypes.RFC3339 `tfsdk:"release_date"` + Status fwtypes.StringEnum[awstypes.ClusterVersionStatus] `tfsdk:"status"` } diff --git a/internal/service/eks/cluster_versions_data_source_test.go b/internal/service/eks/cluster_versions_data_source_test.go index f001030947cb..7db089ef3fbe 100644 --- a/internal/service/eks/cluster_versions_data_source_test.go +++ b/internal/service/eks/cluster_versions_data_source_test.go @@ -2,213 +2,74 @@ // SPDX-License-Identifier: MPL-2.0 package eks_test -// **PLEASE DELETE THIS AND ALL TIP COMMENTS BEFORE SUBMITTING A PR FOR REVIEW!** -// -// TIP: ==== INTRODUCTION ==== -// Thank you for trying the skaff tool! -// -// You have opted to include these helpful comments. They all include "TIP:" -// to help you find and remove them when you're done with them. -// -// While some aspects of this file are customized to your input, the -// scaffold tool does *not* look at the AWS API and ensure it has correct -// function, structure, and variable names. It makes guesses based on -// commonalities. You will need to make significant adjustments. -// -// In other words, as generated, this is a rough outline of the work you will -// need to do. If something doesn't make sense for your situation, get rid of -// it. import ( - // TIP: ==== IMPORTS ==== - // This is a common set of imports but not customized to your code since - // your code hasn't been written yet. Make sure you, your IDE, or - // goimports -w fixes these imports. - // - // The provider linter wants your imports to be in two groups: first, - // standard library (i.e., "fmt" or "strings"), second, everything else. - // - // Also, AWS Go SDK v2 may handle nested structures differently than v1, - // using the services/eks/types package. If so, you'll - // need to import types and reference the nested types, e.g., as - // types.. - "fmt" - "strings" "testing" - "github.com/YakDriver/regexache" - "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/service/eks" - "github.com/aws/aws-sdk-go-v2/service/eks/types" - "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" - "github.com/hashicorp/terraform-plugin-testing/terraform" "github.com/hashicorp/terraform-provider-aws/internal/acctest" - "github.com/hashicorp/terraform-provider-aws/internal/conns" - "github.com/hashicorp/terraform-provider-aws/internal/create" - // TIP: You will often need to import the package that this test file lives - // in. Since it is in the "test" context, it must import the package to use - // any normal context constants, variables, or functions. - tfeks "github.com/hashicorp/terraform-provider-aws/internal/service/eks" "github.com/hashicorp/terraform-provider-aws/names" ) -// TIP: File Structure. The basic outline for all test files should be as -// follows. Improve this data source's maintainability by following this -// outline. -// -// 1. Package declaration (add "_test" since this is a test file) -// 2. Imports -// 3. Unit tests -// 4. Basic test -// 5. Disappears test -// 6. All the other tests -// 7. Helper functions (exists, destroy, check, etc.) -// 8. Functions that return Terraform configurations - +func TestAccEKSClusterVersionsDataSource_basic(t *testing.T) { + ctx := acctest.Context(t) -// TIP: ==== UNIT TESTS ==== -// This is an example of a unit test. Its name is not prefixed with -// "TestAcc" like an acceptance test. -// -// Unlike acceptance tests, unit tests do not access AWS and are focused on a -// function (or method). Because of this, they are quick and cheap to run. -// -// In designing a data source's implementation, isolate complex bits from AWS bits -// so that they can be tested through a unit test. We encourage more unit tests -// in the provider. -// -// Cut and dry functions using well-used patterns, like typical flatteners and -// expanders, don't need unit testing. However, if they are complex or -// intricate, they should be unit tested. -func TestClusterVersionsExampleUnitTest(t *testing.T) { - t.Parallel() + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + dataSourceName := "data.aws_eks_cluster_versions.test" - testCases := []struct { - TestName string - Input string - Expected string - Error bool - }{ - { - TestName: "empty", - Input: "", - Expected: "", - Error: true, - }, - { - TestName: "descriptive name", - Input: "some input", - Expected: "some output", - Error: false, - }, - { - TestName: "another descriptive name", - Input: "more input", - Expected: "more output", - Error: false, + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.EKSServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckClusterDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccClusterVersionsDataSourceConfig_basic(rName), + Check: resource.ComposeAggregateTestCheckFunc( + acctest.CheckResourceAttrGreaterThanValue(dataSourceName, "cluster_versions.#", 0), + ), + }, }, - } - - for _, testCase := range testCases { - t.Run(testCase.TestName, func(t *testing.T) { - t.Parallel() - got, err := tfeks.FunctionFromDataSource(testCase.Input) - - if err != nil && !testCase.Error { - t.Errorf("got error (%s), expected no error", err) - } - - if err == nil && testCase.Error { - t.Errorf("got (%s) and no error, expected error", got) - } - - if got != testCase.Expected { - t.Errorf("got %s, expected %s", got, testCase.Expected) - } - }) - } + }) } - -// TIP: ==== ACCEPTANCE TESTS ==== -// This is an example of a basic acceptance test. This should test as much of -// standard functionality of the data source as possible, and test importing, if -// applicable. We prefix its name with "TestAcc", the service, and the -// data source name. -// -// Acceptance test access AWS and cost money to run. -func TestAccEKSClusterVersionsDataSource_basic(t *testing.T) { +func TestAccEKSClusterVersionsDataSource_clusterType(t *testing.T) { ctx := acctest.Context(t) - // TIP: This is a long-running test guard for tests that run longer than - // 300s (5 min) generally. - if testing.Short() { - t.Skip("skipping long-running test in short mode") - } - var clusterversions eks.DescribeClusterVersionsResponse rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) dataSourceName := "data.aws_eks_cluster_versions.test" resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { - acctest.PreCheck(ctx, t) - acctest.PreCheckPartitionHasService(t, names.EKSEndpointID) - testAccPreCheck(ctx, t) - }, + PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.EKSServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - CheckDestroy: testAccCheckClusterVersionsDestroy(ctx), + CheckDestroy: testAccCheckClusterDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccClusterVersionsDataSourceConfig_basic(rName), - Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckClusterVersionsExists(ctx, dataSourceName, &clusterversions), - resource.TestCheckResourceAttr(dataSourceName, "auto_minor_version_upgrade", "false"), - resource.TestCheckResourceAttrSet(dataSourceName, "maintenance_window_start_time.0.day_of_week"), - resource.TestCheckTypeSetElemNestedAttrs(dataSourceName, "user.*", map[string]string{ - "console_access": "false", - "groups.#": "0", - "username": "Test", - "password": "TestTest1234", - }), - // TIP: If the ARN can be partially or completely determined by the parameters passed, e.g. it contains the - // value of `rName`, either include the values in the regex or check for an exact match using `acctest.CheckResourceAttrRegionalARN` - // Alternatively, if the data source returns the values for a corresponding resource, use `resource.TestCheckResourceAttrPair` to - // check that the values are the same. - acctest.MatchResourceAttrRegionalARN(ctx, dataSourceName, names.AttrARN, "eks", regexache.MustCompile(`clusterversions:.+$`)), + Config: testAccClusterVersionsDataSourceConfig_clusterType(rName), + Check: resource.ComposeTestCheckFunc( + acctest.CheckResourceAttrGreaterThanValue(dataSourceName, "cluster_versions.#", 0), ), }, }, }) } -func testAccClusterVersionsDataSourceConfig_basic(rName, version string) string { - return fmt.Sprintf(` -data "aws_security_group" "test" { - name = %[1]q +func testAccClusterVersionsDataSourceConfig_basic(rName string) string { + return acctest.ConfigCompose(testAccClusterConfig_basic(rName), ` +data "aws_eks_cluster_versions" "test" { + depends_on = [aws_eks_cluster.test] +} +`) } +func testAccClusterVersionsDataSourceConfig_clusterType(rName string) string { + return acctest.ConfigCompose(testAccClusterConfig_basic(rName), ` data "aws_eks_cluster_versions" "test" { - cluster_versions_name = %[1]q - engine_type = "ActiveEKS" - engine_version = %[2]q - host_instance_type = "eks.t2.micro" - security_groups = [aws_security_group.test.id] - authentication_strategy = "simple" - storage_type = "efs" - - logs { - general = true - } - - user { - username = "Test" - password = "TestTest1234" - } + cluster_type = "eks" + depends_on = [aws_eks_cluster.test] } -`, rName, version) +`) } diff --git a/internal/service/eks/service_package_gen.go b/internal/service/eks/service_package_gen.go index c2d15e9b7d0c..57e57674737c 100644 --- a/internal/service/eks/service_package_gen.go +++ b/internal/service/eks/service_package_gen.go @@ -15,7 +15,12 @@ import ( type servicePackage struct{} func (p *servicePackage) FrameworkDataSources(ctx context.Context) []*types.ServicePackageFrameworkDataSource { - return []*types.ServicePackageFrameworkDataSource{} + return []*types.ServicePackageFrameworkDataSource{ + { + Factory: newDataSourceClusterVersions, + Name: "Cluster Versions", + }, + } } func (p *servicePackage) FrameworkResources(ctx context.Context) []*types.ServicePackageFrameworkResource { From c9b3b13d1b803cd867153d7f4bb953d0a729da67 Mon Sep 17 00:00:00 2001 From: hjoshi123 Date: Wed, 1 Jan 2025 16:43:23 -0700 Subject: [PATCH 3/7] fix --- internal/service/eks/cluster_versions_data_source.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/internal/service/eks/cluster_versions_data_source.go b/internal/service/eks/cluster_versions_data_source.go index a67c5f5b94e9..58970bcbc2b2 100644 --- a/internal/service/eks/cluster_versions_data_source.go +++ b/internal/service/eks/cluster_versions_data_source.go @@ -53,7 +53,7 @@ func (d *dataSourceClusterVersions) Schema(ctx context.Context, req datasource.S Optional: true, CustomType: fwtypes.ListOfStringType, }, - "status": schema.StringAttribute{ + names.AttrStatus: schema.StringAttribute{ Optional: true, CustomType: fwtypes.StringEnumType[awstypes.ClusterVersionStatus](), }, From 8a65ac83e100e87a50cb10845b0acf9db87188c1 Mon Sep 17 00:00:00 2001 From: hjoshi123 Date: Thu, 2 Jan 2025 20:43:14 -0700 Subject: [PATCH 4/7] fix: lint fixes --- .../service/eks/cluster_versions_data_source.go | 15 +++------------ .../eks/cluster_versions_data_source_test.go | 3 +-- 2 files changed, 4 insertions(+), 14 deletions(-) diff --git a/internal/service/eks/cluster_versions_data_source.go b/internal/service/eks/cluster_versions_data_source.go index 58970bcbc2b2..860463082b04 100644 --- a/internal/service/eks/cluster_versions_data_source.go +++ b/internal/service/eks/cluster_versions_data_source.go @@ -6,13 +6,10 @@ package eks import ( "context" "fmt" - "github.com/aws/aws-sdk-go-v2/aws" - "github.com/hashicorp/terraform-plugin-log/tflog" - - "github.com/hashicorp/terraform-plugin-framework-timetypes/timetypes" "github.com/aws/aws-sdk-go-v2/service/eks" awstypes "github.com/aws/aws-sdk-go-v2/service/eks/types" + "github.com/hashicorp/terraform-plugin-framework-timetypes/timetypes" "github.com/hashicorp/terraform-plugin-framework/datasource" "github.com/hashicorp/terraform-plugin-framework/datasource/schema" "github.com/hashicorp/terraform-plugin-framework/types" @@ -74,10 +71,10 @@ func (d *dataSourceClusterVersions) Read(ctx context.Context, req datasource.Rea input := &eks.DescribeClusterVersionsInput{} if data.ClusterType.String() != "" { - input.ClusterType = aws.String(data.ClusterType.ValueString()) + input.ClusterType = data.ClusterType.ValueStringPointer() } - input.DefaultOnly = aws.Bool(data.DefaultOnly.ValueBool()) + input.DefaultOnly = data.DefaultOnly.ValueBoolPointer() if len(data.ClusterVersionsOnly.Elements()) > 0 && !data.ClusterVersions.IsNull() { clVersions := make([]string, 0, len(data.ClusterVersionsOnly.Elements())) @@ -114,9 +111,6 @@ func (d *dataSourceClusterVersions) Read(ctx context.Context, req datasource.Rea func findClusterVersions(ctx context.Context, conn *eks.Client, input *eks.DescribeClusterVersionsInput) ([]awstypes.ClusterVersionInformation, error) { output := make([]awstypes.ClusterVersionInformation, 0) - fmt.Printf("Finding cluster versions\n %v %v %v", input.ClusterVersions, aws.ToString(input.ClusterType), input.Status) - tflog.Debug(ctx, "Finding cluster versions", map[string]interface{}{"input": input}) - pages := eks.NewDescribeClusterVersionsPaginator(conn, input) for pages.HasMorePages() { page, err := pages.NextPage(ctx) @@ -127,9 +121,6 @@ func findClusterVersions(ctx context.Context, conn *eks.Client, input *eks.Descr output = append(output, page.ClusterVersions...) } - fmt.Printf("Found cluster versions %v", output) - tflog.Debug(ctx, "Found cluster versions", map[string]interface{}{"output": output}) - return output, nil } diff --git a/internal/service/eks/cluster_versions_data_source_test.go b/internal/service/eks/cluster_versions_data_source_test.go index 7db089ef3fbe..a7eef151f400 100644 --- a/internal/service/eks/cluster_versions_data_source_test.go +++ b/internal/service/eks/cluster_versions_data_source_test.go @@ -9,7 +9,6 @@ import ( sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-provider-aws/internal/acctest" - "github.com/hashicorp/terraform-provider-aws/names" ) @@ -69,7 +68,7 @@ func testAccClusterVersionsDataSourceConfig_clusterType(rName string) string { return acctest.ConfigCompose(testAccClusterConfig_basic(rName), ` data "aws_eks_cluster_versions" "test" { cluster_type = "eks" - depends_on = [aws_eks_cluster.test] + depends_on = [aws_eks_cluster.test] } `) } From 1488798f036f04e3f7fb6406bcb286435c2da966 Mon Sep 17 00:00:00 2001 From: hjoshi123 Date: Sun, 12 Jan 2025 10:22:42 -0700 Subject: [PATCH 5/7] test: added more tests --- .../eks/cluster_versions_data_source.go | 35 ++++++++++---- .../eks/cluster_versions_data_source_test.go | 48 ++++++++++++++----- 2 files changed, 60 insertions(+), 23 deletions(-) diff --git a/internal/service/eks/cluster_versions_data_source.go b/internal/service/eks/cluster_versions_data_source.go index 860463082b04..b99e4be012e1 100644 --- a/internal/service/eks/cluster_versions_data_source.go +++ b/internal/service/eks/cluster_versions_data_source.go @@ -52,7 +52,7 @@ func (d *dataSourceClusterVersions) Schema(ctx context.Context, req datasource.S }, names.AttrStatus: schema.StringAttribute{ Optional: true, - CustomType: fwtypes.StringEnumType[awstypes.ClusterVersionStatus](), + CustomType: fwtypes.StringEnumType[clusterVersionAWSStatus](), }, "cluster_versions": framework.DataSourceComputedListOfObjectAttribute[customDataSourceClusterVersion](ctx), }, @@ -124,21 +124,36 @@ func findClusterVersions(ctx context.Context, conn *eks.Client, input *eks.Descr return output, nil } +type clusterVersionAWSStatus string + +// Values returns all known values for ClusterVersionStatus. Note that this can be +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. +func (clusterVersionAWSStatus) Values() []clusterVersionAWSStatus { + return []clusterVersionAWSStatus{ + "UNSUPPORTED", + "STANDARD_SUPPORT", + "EXTENDED_SUPPORT", + } +} + type dataSourceClusterVersionsModel struct { ClusterType types.String `tfsdk:"cluster_type"` DefaultOnly types.Bool `tfsdk:"default_only"` ClusterVersionsOnly fwtypes.ListValueOf[types.String] `tfsdk:"cluster_versions_only"` - Status fwtypes.StringEnum[awstypes.ClusterVersionStatus] `tfsdk:"status"` + Status fwtypes.StringEnum[clusterVersionAWSStatus] `tfsdk:"status"` ClusterVersions fwtypes.ListNestedObjectValueOf[customDataSourceClusterVersion] `tfsdk:"cluster_versions"` } type customDataSourceClusterVersion struct { - ClusterType types.String `tfsdk:"cluster_type"` - ClusterVersion types.String `tfsdk:"cluster_version"` - DefaultPlatformVersion types.String `tfsdk:"default_platform_version"` - EndOfExtendedSupportDate timetypes.RFC3339 `tfsdk:"end_of_extended_support_date"` - EndOfStandardSupportDate timetypes.RFC3339 `tfsdk:"end_of_standard_support_date"` - KubernetesPatchVersion types.String `tfsdk:"kubernetes_patch_version"` - ReleaseDate timetypes.RFC3339 `tfsdk:"release_date"` - Status fwtypes.StringEnum[awstypes.ClusterVersionStatus] `tfsdk:"status"` + ClusterType types.String `tfsdk:"cluster_type"` + ClusterVersion types.String `tfsdk:"cluster_version"` + DefaultPlatformVersion types.String `tfsdk:"default_platform_version"` + EndOfExtendedSupportDate timetypes.RFC3339 `tfsdk:"end_of_extended_support_date"` + EndOfStandardSupportDate timetypes.RFC3339 `tfsdk:"end_of_standard_support_date"` + KubernetesPatchVersion types.String `tfsdk:"kubernetes_patch_version"` + ReleaseDate timetypes.RFC3339 `tfsdk:"release_date"` + DefaultVersion types.Bool `tfsdk:"default_version"` + Status fwtypes.StringEnum[clusterVersionAWSStatus] `tfsdk:"status"` } diff --git a/internal/service/eks/cluster_versions_data_source_test.go b/internal/service/eks/cluster_versions_data_source_test.go index a7eef151f400..160de87c5af1 100644 --- a/internal/service/eks/cluster_versions_data_source_test.go +++ b/internal/service/eks/cluster_versions_data_source_test.go @@ -6,7 +6,6 @@ package eks_test import ( "testing" - sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-provider-aws/internal/acctest" "github.com/hashicorp/terraform-provider-aws/names" @@ -15,19 +14,18 @@ import ( func TestAccEKSClusterVersionsDataSource_basic(t *testing.T) { ctx := acctest.Context(t) - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) dataSourceName := "data.aws_eks_cluster_versions.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.EKSServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - CheckDestroy: testAccCheckClusterDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccClusterVersionsDataSourceConfig_basic(rName), + Config: testAccClusterVersionsDataSourceConfig_basic(), Check: resource.ComposeAggregateTestCheckFunc( acctest.CheckResourceAttrGreaterThanValue(dataSourceName, "cluster_versions.#", 0), + acctest.CheckResourceAttrContains(dataSourceName, "cluster_versions.0.default_version", "true"), ), }, }, @@ -37,17 +35,15 @@ func TestAccEKSClusterVersionsDataSource_basic(t *testing.T) { func TestAccEKSClusterVersionsDataSource_clusterType(t *testing.T) { ctx := acctest.Context(t) - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) dataSourceName := "data.aws_eks_cluster_versions.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.EKSServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - CheckDestroy: testAccCheckClusterDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccClusterVersionsDataSourceConfig_clusterType(rName), + Config: testAccClusterVersionsDataSourceConfig_clusterType(), Check: resource.ComposeTestCheckFunc( acctest.CheckResourceAttrGreaterThanValue(dataSourceName, "cluster_versions.#", 0), ), @@ -56,19 +52,45 @@ func TestAccEKSClusterVersionsDataSource_clusterType(t *testing.T) { }) } -func testAccClusterVersionsDataSourceConfig_basic(rName string) string { - return acctest.ConfigCompose(testAccClusterConfig_basic(rName), ` +func TestAccEKSClusterVersionsDataSource_defaultOnly(t *testing.T) { + ctx := acctest.Context(t) + + dataSourceName := "data.aws_eks_cluster_versions.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.EKSServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + { + Config: testAccClusterVersionsDataSourceConfig_defaultOnly(), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr(dataSourceName, "cluster_versions.#", "1"), + ), + }, + }, + }) +} + +func testAccClusterVersionsDataSourceConfig_basic() string { + return acctest.ConfigCompose(` data "aws_eks_cluster_versions" "test" { - depends_on = [aws_eks_cluster.test] } `) } -func testAccClusterVersionsDataSourceConfig_clusterType(rName string) string { - return acctest.ConfigCompose(testAccClusterConfig_basic(rName), ` +func testAccClusterVersionsDataSourceConfig_clusterType() string { + return acctest.ConfigCompose(` data "aws_eks_cluster_versions" "test" { cluster_type = "eks" - depends_on = [aws_eks_cluster.test] +} +`) +} + +func testAccClusterVersionsDataSourceConfig_defaultOnly() string { + return acctest.ConfigCompose(` +data "aws_eks_cluster_versions" "test" { + default_only = true } `) } From 86b1554b10e093425c2fac86021b829fb7ecded4 Mon Sep 17 00:00:00 2001 From: hjoshi123 Date: Sun, 12 Jan 2025 11:18:31 -0700 Subject: [PATCH 6/7] fix: lint and documentation fix --- .../eks/cluster_versions_data_source.go | 10 ++++-- .../eks/cluster_versions_data_source_test.go | 32 +++++++++++++++++- .../docs/d/eks_cluster_versions.html.markdown | 33 ++++++++++--------- 3 files changed, 55 insertions(+), 20 deletions(-) diff --git a/internal/service/eks/cluster_versions_data_source.go b/internal/service/eks/cluster_versions_data_source.go index b99e4be012e1..26b0b8c901d3 100644 --- a/internal/service/eks/cluster_versions_data_source.go +++ b/internal/service/eks/cluster_versions_data_source.go @@ -43,13 +43,16 @@ func (d *dataSourceClusterVersions) Schema(ctx context.Context, req datasource.S "cluster_type": schema.StringAttribute{ Optional: true, }, - "default_only": schema.BoolAttribute{ - Optional: true, - }, "cluster_versions_only": schema.ListAttribute{ Optional: true, CustomType: fwtypes.ListOfStringType, }, + "default_only": schema.BoolAttribute{ + Optional: true, + }, + "include_all": schema.BoolAttribute{ + Optional: true, + }, names.AttrStatus: schema.StringAttribute{ Optional: true, CustomType: fwtypes.StringEnumType[clusterVersionAWSStatus](), @@ -141,6 +144,7 @@ func (clusterVersionAWSStatus) Values() []clusterVersionAWSStatus { type dataSourceClusterVersionsModel struct { ClusterType types.String `tfsdk:"cluster_type"` DefaultOnly types.Bool `tfsdk:"default_only"` + IncludeAll types.Bool `tfsdk:"include_all"` ClusterVersionsOnly fwtypes.ListValueOf[types.String] `tfsdk:"cluster_versions_only"` Status fwtypes.StringEnum[clusterVersionAWSStatus] `tfsdk:"status"` ClusterVersions fwtypes.ListNestedObjectValueOf[customDataSourceClusterVersion] `tfsdk:"cluster_versions"` diff --git a/internal/service/eks/cluster_versions_data_source_test.go b/internal/service/eks/cluster_versions_data_source_test.go index 160de87c5af1..62f9df3e6aec 100644 --- a/internal/service/eks/cluster_versions_data_source_test.go +++ b/internal/service/eks/cluster_versions_data_source_test.go @@ -25,7 +25,7 @@ func TestAccEKSClusterVersionsDataSource_basic(t *testing.T) { Config: testAccClusterVersionsDataSourceConfig_basic(), Check: resource.ComposeAggregateTestCheckFunc( acctest.CheckResourceAttrGreaterThanValue(dataSourceName, "cluster_versions.#", 0), - acctest.CheckResourceAttrContains(dataSourceName, "cluster_versions.0.default_version", "true"), + acctest.CheckResourceAttrContains(dataSourceName, "cluster_versions.0.default_version", acctest.CtTrue), ), }, }, @@ -66,6 +66,28 @@ func TestAccEKSClusterVersionsDataSource_defaultOnly(t *testing.T) { Config: testAccClusterVersionsDataSourceConfig_defaultOnly(), Check: resource.ComposeTestCheckFunc( resource.TestCheckResourceAttr(dataSourceName, "cluster_versions.#", "1"), + acctest.CheckResourceAttrContains(dataSourceName, "cluster_versions.0.default_version", acctest.CtTrue), + ), + }, + }, + }) +} + +func TestAccEKSClusterVersionsDataSource_status(t *testing.T) { + ctx := acctest.Context(t) + + dataSourceName := "data.aws_eks_cluster_versions.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.EKSServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + { + Config: testAccClusterVersionsDataSourceConfig_status(), + Check: resource.ComposeTestCheckFunc( + acctest.CheckResourceAttrGreaterThanValue(dataSourceName, "cluster_versions.#", 0), + acctest.CheckResourceAttrContains(dataSourceName, "cluster_versions.0.status", "STANDARD_SUPPORT"), ), }, }, @@ -94,3 +116,11 @@ data "aws_eks_cluster_versions" "test" { } `) } + +func testAccClusterVersionsDataSourceConfig_status() string { + return acctest.ConfigCompose(` +data "aws_eks_cluster_versions" "test" { + status = "STANDARD_SUPPORT" +} +`) +} diff --git a/website/docs/d/eks_cluster_versions.html.markdown b/website/docs/d/eks_cluster_versions.html.markdown index 09bc442845bb..d38d7fb42694 100644 --- a/website/docs/d/eks_cluster_versions.html.markdown +++ b/website/docs/d/eks_cluster_versions.html.markdown @@ -5,14 +5,6 @@ page_title: "AWS: aws_eks_cluster_versions" description: |- Terraform data source for managing an AWS EKS (Elastic Kubernetes) Cluster Versions. --- - # Data Source: aws_eks_cluster_versions @@ -23,24 +15,33 @@ Terraform data source for managing an AWS EKS (Elastic Kubernetes) Cluster Versi ### Basic Usage ```terraform +data "aws_eks_cluster_versions" "example" {} + data "aws_eks_cluster_versions" "example" { + cluster_type = "eks" } ``` ## Argument Reference -The following arguments are required: - -* `example_arg` - (Required) Concise argument description. Do not begin the description with "An", "The", "Defines", "Indicates", or "Specifies," as these are verbose. In other words, "Indicates the amount of storage," can be rewritten as "Amount of storage," without losing any information. - The following arguments are optional: -* `optional_arg` - (Optional) Concise argument description. Do not begin the description with "An", "The", "Defines", "Indicates", or "Specifies," as these are verbose. In other words, "Indicates the amount of storage," can be rewritten as "Amount of storage," without losing any information. +* `cluster_type` - (Optional) The type of clusters to filter by. Currently only `eks` is supported. +* `default_only` - (Optional) Whether to show only the default versions of Kubernetes supported by EKS. Default is `false`. +* `cluster_versions` - (Optional) A list of Kubernetes versions that you can use to check if EKS supports it. +* `include_all` - (Optional) Whether to include all kubernetes versions in the response. Default is `false`. +* `status` - (Optional) The status of the EKS cluster versions to list. Can be `STANDARD_SUPPORT` or `UNSUPPORTED` or `EXTENDED_SUPPORT`. ## Attribute Reference This data source exports the following attributes in addition to the arguments above: -* `arn` - ARN of the Cluster Versions. Do not begin the description with "An", "The", "Defines", "Indicates", or "Specifies," as these are verbose. In other words, "Indicates the amount of storage," can be rewritten as "Amount of storage," without losing any information. -* `example_attribute` - Concise description. Do not begin the description with "An", "The", "Defines", "Indicates", or "Specifies," as these are verbose. In other words, "Indicates the amount of storage," can be rewritten as "Amount of storage," without losing any information. -* `tags` - Map of tags assigned to the resource. +* `cluster_version` - The Kubernetes version supported by EKS. +* `cluster_type` - The type of cluster that the version belongs to. Currently only `eks` is supported. +* `default_platform_version` - The default eks platform version for the cluster version. +* `default_version` - The default Kubernetes version for the cluster version. +* `status` - The status of the EKS cluster version. Can be `STANDARD_SUPPORT` or `UNSUPPORTED` or `EXTENDED_SUPPORT`. +* `end_of_extended_support_date` - The end of extended support date for the cluster version. +* `end_of_standard_support_date` - The end of standard support date for the cluster version. +* `kubernetes_patch_version` - The Kubernetes patch version for the cluster version. +* `release_date` - The release date of the cluster version. From 47a91526d01a10e560bd3c18b5c2c86ad0065680 Mon Sep 17 00:00:00 2001 From: hjoshi123 Date: Sat, 8 Feb 2025 00:03:42 -0700 Subject: [PATCH 7/7] merged main --- internal/service/eks/service_package_gen.go | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/internal/service/eks/service_package_gen.go b/internal/service/eks/service_package_gen.go index 0c4274cf10a5..efe2e4ecec70 100644 --- a/internal/service/eks/service_package_gen.go +++ b/internal/service/eks/service_package_gen.go @@ -27,8 +27,9 @@ func (p *servicePackage) EphemeralResources(ctx context.Context) []*types.Servic func (p *servicePackage) FrameworkDataSources(ctx context.Context) []*types.ServicePackageFrameworkDataSource { return []*types.ServicePackageFrameworkDataSource{ { - Factory: newDataSourceClusterVersions, - Name: "Cluster Versions", + Factory: newDataSourceClusterVersions, + TypeName: "aws_eks_cluster_versions", + Name: "Cluster Versions", }, } }