From b219053a84b0615798e8f1cb66d2d1ce2921d2d1 Mon Sep 17 00:00:00 2001 From: FingerLeader Date: Mon, 2 Dec 2024 17:06:49 +0800 Subject: [PATCH 1/6] add region service Signed-off-by: FingerLeader --- .vscode/settings.json | 6 + docs/data-sources/clusters.md | 1 - go.mod | 3 +- go.sum | 2 + internal/provider/backup_resource.go | 9 +- internal/provider/backups_data_source.go | 13 +- internal/provider/cluster_resource.go | 6 +- .../provider/cluster_specs_data_source.go | 11 +- internal/provider/clusters_data_source.go | 15 +- .../dedicated/dedicated_cluster_resource.go | 923 ++++++++++++++++++ .../provider/dedicated_region_data_source.go | 85 ++ .../provider/dedicated_regions_data_source.go | 124 +++ internal/provider/import_resource.go | 11 +- internal/provider/projects_data_source.go | 11 +- internal/provider/provider.go | 42 +- internal/provider/restore_resource.go | 6 +- internal/provider/restores_data_source.go | 11 +- internal/provider/util.go | 14 +- tidbcloud/api_client.go | 7 +- tidbcloud/dedicated_api_client.go | 122 +++ 20 files changed, 1358 insertions(+), 64 deletions(-) create mode 100644 .vscode/settings.json create mode 100644 internal/provider/dedicated/dedicated_cluster_resource.go create mode 100644 internal/provider/dedicated_region_data_source.go create mode 100644 internal/provider/dedicated_regions_data_source.go create mode 100644 tidbcloud/dedicated_api_client.go diff --git a/.vscode/settings.json b/.vscode/settings.json new file mode 100644 index 0000000..b4d0c8d --- /dev/null +++ b/.vscode/settings.json @@ -0,0 +1,6 @@ +{ + "cSpell.words": [ + "Apidedicated", + "Apidedicatedv1beta1" + ] +} \ No newline at end of file diff --git a/docs/data-sources/clusters.md b/docs/data-sources/clusters.md index 3b9f57c..4767116 100644 --- a/docs/data-sources/clusters.md +++ b/docs/data-sources/clusters.md @@ -64,7 +64,6 @@ Read-Only: - `create_timestamp` (String) The creation time of the cluster in Unix timestamp seconds (epoch time). - `id` (String) The ID of the cluster. - `name` (String) The name of the cluster. -- `project_id` (String) The ID of the project. - `region` (String) Region of the cluster. - `status` (Attributes) The status of the cluster. (see [below for nested schema](#nestedatt--items--status)) diff --git a/go.mod b/go.mod index aea12d0..3147c7f 100644 --- a/go.mod +++ b/go.mod @@ -1,6 +1,6 @@ module github.com/tidbcloud/terraform-provider-tidbcloud -go 1.21 +go 1.23 require ( github.com/c4pt0r/go-tidbcloud-sdk-v1 v0.0.0-20240415110020-a27efb454da5 @@ -16,6 +16,7 @@ require ( github.com/hashicorp/terraform-plugin-log v0.9.0 github.com/hashicorp/terraform-plugin-sdk/v2 v2.34.0 github.com/icholy/digest v0.1.15 + github.com/tidbcloud/tidbcloud-cli/pkg v0.0.0-20241125120734-8e2a11bc41c5 ) require ( diff --git a/go.sum b/go.sum index b6063bb..b3d1f5e 100644 --- a/go.sum +++ b/go.sum @@ -227,6 +227,8 @@ github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5 github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals= github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/tidbcloud/tidbcloud-cli/pkg v0.0.0-20241125120734-8e2a11bc41c5 h1:GhfOtxISf6CLjiBdx/02a+J3typ6C3U53ZWfgtf3rEM= +github.com/tidbcloud/tidbcloud-cli/pkg v0.0.0-20241125120734-8e2a11bc41c5/go.mod h1:ckUJi2ZhPQItzndsHOWfCnNBW7MlHHLHWEGOyKpN/kE= github.com/vmihailenco/msgpack v3.3.3+incompatible/go.mod h1:fy3FlTQTDXWkZ7Bh6AcGMlsjHatGryHQYUTf1ShIgkk= github.com/vmihailenco/msgpack v4.0.4+incompatible h1:dSLoQfGFAo3F6OoNhwUmLwVgaUXK79GlxNBwueZn0xI= github.com/vmihailenco/msgpack v4.0.4+incompatible/go.mod h1:fy3FlTQTDXWkZ7Bh6AcGMlsjHatGryHQYUTf1ShIgkk= diff --git a/internal/provider/backup_resource.go b/internal/provider/backup_resource.go index d12e0a0..b933c37 100644 --- a/internal/provider/backup_resource.go +++ b/internal/provider/backup_resource.go @@ -3,6 +3,8 @@ package provider import ( "context" "fmt" + "strings" + backupApi "github.com/c4pt0r/go-tidbcloud-sdk-v1/client/backup" "github.com/hashicorp/terraform-plugin-framework/path" "github.com/hashicorp/terraform-plugin-framework/resource" @@ -11,7 +13,6 @@ import ( "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringplanmodifier" "github.com/hashicorp/terraform-plugin-framework/types" "github.com/hashicorp/terraform-plugin-log/tflog" - "strings" ) type backupResourceData struct { @@ -31,7 +32,7 @@ var _ resource.Resource = &backupResource{} var _ resource.ResourceWithImportState = &backupResource{} type backupResource struct { - provider *tidbcloudProvider + provider *TidbcloudProvider } func NewBackupResource() resource.Resource { @@ -49,9 +50,9 @@ func (r *backupResource) Configure(_ context.Context, req resource.ConfigureRequ } var ok bool - if r.provider, ok = req.ProviderData.(*tidbcloudProvider); !ok { + if r.provider, ok = req.ProviderData.(*TidbcloudProvider); !ok { resp.Diagnostics.AddError("Internal provider error", - fmt.Sprintf("Error in Configure: expected %T but got %T", tidbcloudProvider{}, req.ProviderData)) + fmt.Sprintf("Error in Configure: expected %T but got %T", TidbcloudProvider{}, req.ProviderData)) } } diff --git a/internal/provider/backups_data_source.go b/internal/provider/backups_data_source.go index 13a430d..9a11e46 100644 --- a/internal/provider/backups_data_source.go +++ b/internal/provider/backups_data_source.go @@ -3,13 +3,14 @@ package provider import ( "context" "fmt" + "math/rand" + "strconv" + backupApi "github.com/c4pt0r/go-tidbcloud-sdk-v1/client/backup" "github.com/hashicorp/terraform-plugin-framework/datasource" "github.com/hashicorp/terraform-plugin-framework/datasource/schema" "github.com/hashicorp/terraform-plugin-framework/types" "github.com/hashicorp/terraform-plugin-log/tflog" - "math/rand" - "strconv" ) type backupsDataSourceData struct { @@ -36,7 +37,7 @@ type backup struct { var _ datasource.DataSource = &backupsDataSource{} type backupsDataSource struct { - provider *tidbcloudProvider + provider *TidbcloudProvider } func NewBackupsDataSource() datasource.DataSource { @@ -52,9 +53,9 @@ func (d *backupsDataSource) Configure(_ context.Context, req datasource.Configur return } var ok bool - if d.provider, ok = req.ProviderData.(*tidbcloudProvider); !ok { + if d.provider, ok = req.ProviderData.(*TidbcloudProvider); !ok { resp.Diagnostics.AddError("Internal provider error", - fmt.Sprintf("Error in Configure: expected %T but got %T", tidbcloudProvider{}, req.ProviderData)) + fmt.Sprintf("Error in Configure: expected %T but got %T", TidbcloudProvider{}, req.ProviderData)) } } @@ -149,7 +150,7 @@ func (d *backupsDataSource) Read(ctx context.Context, req datasource.ReadRequest tflog.Trace(ctx, "read backups data source") listBackUpOfClusterOK, err := d.provider.client.ListBackUpOfCluster(backupApi.NewListBackUpOfClusterParams().WithProjectID(data.ProjectId).WithClusterID(data.ClusterId).WithPage(&page).WithPageSize(&pageSize)) if err != nil { - resp.Diagnostics.AddError("Read Error", fmt.Sprintf("Unable to call GetBackups, got error: %s", err)) + resp.Diagnostics.AddError("Read Error", fmt.Sprintf("Unable to call ListBackups, got error: %s", err)) return } diff --git a/internal/provider/cluster_resource.go b/internal/provider/cluster_resource.go index aef0e35..bf177ff 100644 --- a/internal/provider/cluster_resource.go +++ b/internal/provider/cluster_resource.go @@ -98,7 +98,7 @@ type ipAccess struct { } type clusterResource struct { - provider *tidbcloudProvider + provider *TidbcloudProvider } func NewClusterResource() resource.Resource { @@ -116,9 +116,9 @@ func (r *clusterResource) Configure(_ context.Context, req resource.ConfigureReq } var ok bool - if r.provider, ok = req.ProviderData.(*tidbcloudProvider); !ok { + if r.provider, ok = req.ProviderData.(*TidbcloudProvider); !ok { resp.Diagnostics.AddError("Internal provider error", - fmt.Sprintf("Error in Configure: expected %T but got %T", tidbcloudProvider{}, req.ProviderData)) + fmt.Sprintf("Error in Configure: expected %T but got %T", TidbcloudProvider{}, req.ProviderData)) } } diff --git a/internal/provider/cluster_specs_data_source.go b/internal/provider/cluster_specs_data_source.go index 103c112..b97a6e3 100644 --- a/internal/provider/cluster_specs_data_source.go +++ b/internal/provider/cluster_specs_data_source.go @@ -3,13 +3,14 @@ package provider import ( "context" "fmt" + "math/rand" + "strconv" + clusterApi "github.com/c4pt0r/go-tidbcloud-sdk-v1/client/cluster" "github.com/hashicorp/terraform-plugin-framework/datasource" "github.com/hashicorp/terraform-plugin-framework/datasource/schema" "github.com/hashicorp/terraform-plugin-framework/types" "github.com/hashicorp/terraform-plugin-log/tflog" - "math/rand" - "strconv" ) type clusterSpecsDataSourceData struct { @@ -59,7 +60,7 @@ type storageSizeGiRange struct { var _ datasource.DataSource = &clusterSpecsDataSource{} type clusterSpecsDataSource struct { - provider *tidbcloudProvider + provider *TidbcloudProvider } func NewClusterSpecsDataSource() datasource.DataSource { @@ -75,9 +76,9 @@ func (d *clusterSpecsDataSource) Configure(_ context.Context, req datasource.Con return } var ok bool - if d.provider, ok = req.ProviderData.(*tidbcloudProvider); !ok { + if d.provider, ok = req.ProviderData.(*TidbcloudProvider); !ok { resp.Diagnostics.AddError("Internal provider error", - fmt.Sprintf("Error in Configure: expected %T but got %T", tidbcloudProvider{}, req.ProviderData)) + fmt.Sprintf("Error in Configure: expected %T but got %T", TidbcloudProvider{}, req.ProviderData)) } } diff --git a/internal/provider/clusters_data_source.go b/internal/provider/clusters_data_source.go index 581399c..e376ad7 100644 --- a/internal/provider/clusters_data_source.go +++ b/internal/provider/clusters_data_source.go @@ -3,13 +3,14 @@ package provider import ( "context" "fmt" + "math/rand" + "strconv" + clusterApi "github.com/c4pt0r/go-tidbcloud-sdk-v1/client/cluster" "github.com/hashicorp/terraform-plugin-framework/datasource" "github.com/hashicorp/terraform-plugin-framework/datasource/schema" "github.com/hashicorp/terraform-plugin-framework/types" "github.com/hashicorp/terraform-plugin-log/tflog" - "math/rand" - "strconv" ) type clustersDataSourceData struct { @@ -64,7 +65,7 @@ type connectionVpcPeering struct { var _ datasource.DataSource = &clustersDataSource{} type clustersDataSource struct { - provider *tidbcloudProvider + provider *TidbcloudProvider } func NewClustersDataSource() datasource.DataSource { @@ -80,9 +81,9 @@ func (d *clustersDataSource) Configure(_ context.Context, req datasource.Configu return } var ok bool - if d.provider, ok = req.ProviderData.(*tidbcloudProvider); !ok { + if d.provider, ok = req.ProviderData.(*TidbcloudProvider); !ok { resp.Diagnostics.AddError("Internal provider error", - fmt.Sprintf("Error in Configure: expected %T but got %T", tidbcloudProvider{}, req.ProviderData)) + fmt.Sprintf("Error in Configure: expected %T but got %T", TidbcloudProvider{}, req.ProviderData)) } } @@ -117,10 +118,6 @@ func (d *clustersDataSource) Schema(_ context.Context, _ datasource.SchemaReques MarkdownDescription: "The ID of the cluster.", Computed: true, }, - "project_id": schema.StringAttribute{ - MarkdownDescription: "The ID of the project.", - Computed: true, - }, "name": schema.StringAttribute{ MarkdownDescription: "The name of the cluster.", Computed: true, diff --git a/internal/provider/dedicated/dedicated_cluster_resource.go b/internal/provider/dedicated/dedicated_cluster_resource.go new file mode 100644 index 0000000..ef0882d --- /dev/null +++ b/internal/provider/dedicated/dedicated_cluster_resource.go @@ -0,0 +1,923 @@ +package dedicated + +// import ( +// "context" +// "fmt" +// "net/http" +// "sort" +// "strings" +// "time" + +// clusterApi "github.com/c4pt0r/go-tidbcloud-sdk-v1/client/cluster" +// "github.com/hashicorp/terraform-plugin-framework/path" +// "github.com/hashicorp/terraform-plugin-framework/resource" +// "github.com/hashicorp/terraform-plugin-framework/resource/schema" +// "github.com/hashicorp/terraform-plugin-framework/resource/schema/int64planmodifier" +// "github.com/hashicorp/terraform-plugin-framework/resource/schema/objectplanmodifier" +// "github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier" +// "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringplanmodifier" +// "github.com/hashicorp/terraform-plugin-framework/types" +// "github.com/hashicorp/terraform-plugin-log/tflog" +// "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" +// "github.com/tidbcloud/terraform-provider-tidbcloud/tidbcloud" +// ) + +// // Enum: [AVAILABLE CREATING MODIFYING PAUSED RESUMING UNAVAILABLE IMPORTING MAINTAINING PAUSING] +// type dedicatedClusterStatus string + +// const ( +// dedicatedClusterStatusCreating dedicatedClusterStatus = "CREATING" +// dedicatedClusterStatusAvailable dedicatedClusterStatus = "AVAILABLE" +// dedicatedClusterStatusModifying dedicatedClusterStatus = "MODIFYING" +// dedicatedClusterStatusPaused dedicatedClusterStatus = "PAUSED" +// dedicatedClusterStatusResuming dedicatedClusterStatus = "RESUMING" +// dedicatedClusterStatusUnavailable dedicatedClusterStatus = "UNAVAILABLE" +// dedicatedClusterStatusImporting dedicatedClusterStatus = "IMPORTING" +// dedicatedClusterStatusMaintaining dedicatedClusterStatus = "MAINTAINING" +// dedicatedClusterStatusPausing dedicatedClusterStatus = "PAUSING" +// ) + +// // const ( +// // clusterServerlessCreateTimeout = 180 * time.Second +// // clusterServerlessCreateInterval = 2 * time.Second +// // clusterCreateTimeout = time.Hour +// // clusterCreateInterval = 60 * time.Second +// // clusterUpdateTimeout = time.Hour +// // clusterUpdateInterval = 20 * time.Second +// // ) + +// type dedicatedClusterResourceData struct { +// ClusterId types.String `tfsdk:"id"` +// ProjectId string `tfsdk:"project_id"` +// Name string `tfsdk:"name"` +// ClusterType string `tfsdk:"cluster_type"` +// CloudProvider string `tfsdk:"cloud_provider"` +// RegionId string `tfsdk:"region_id"` +// CreateTime types.String `tfsdk:"create_time"` +// Labels map[string]string `tfsdk:"labels"` +// RootPassword types.String `tfsdk:"root_password"` +// Status *dedicatedClusterStatus `tfsdk:"status"` +// } + +// type clusterConfig struct { +// Paused *bool `tfsdk:"paused"` +// RootPassword types.String `tfsdk:"root_password"` +// Port types.Int64 `tfsdk:"port"` +// Components *components `tfsdk:"components"` +// IPAccessList []ipAccess `tfsdk:"ip_access_list"` +// } + +// type components struct { +// TiDB *componentTiDB `tfsdk:"tidb"` +// TiKV *componentTiKV `tfsdk:"tikv"` +// TiFlash *componentTiFlash `tfsdk:"tiflash"` +// } + +// type componentTiDB struct { +// NodeSize string `tfsdk:"node_size"` +// NodeQuantity int32 `tfsdk:"node_quantity"` +// } + +// type componentTiKV struct { +// NodeSize string `tfsdk:"node_size"` +// StorageSizeGib int32 `tfsdk:"storage_size_gib"` +// NodeQuantity int32 `tfsdk:"node_quantity"` +// } + +// type componentTiFlash struct { +// NodeSize string `tfsdk:"node_size"` +// StorageSizeGib int32 `tfsdk:"storage_size_gib"` +// NodeQuantity int32 `tfsdk:"node_quantity"` +// } + +// type ipAccess struct { +// CIDR string `tfsdk:"cidr"` +// Description string `tfsdk:"description"` +// } + +// type clusterResource struct { +// provider *tidbcloudProvider +// } + +// func NewClusterResource() resource.Resource { +// return &clusterResource{} +// } + +// func (r *clusterResource) Metadata(_ context.Context, req resource.MetadataRequest, resp *resource.MetadataResponse) { +// resp.TypeName = req.ProviderTypeName + "_cluster" +// } + +// func (r *clusterResource) Configure(_ context.Context, req resource.ConfigureRequest, resp *resource.ConfigureResponse) { +// // Prevent panic if the provider has not been configured. +// if req.ProviderData == nil { +// return +// } + +// var ok bool +// if r.provider, ok = req.ProviderData.(*tidbcloudProvider); !ok { +// resp.Diagnostics.AddError("Internal provider error", +// fmt.Sprintf("Error in Configure: expected %T but got %T", tidbcloudProvider{}, req.ProviderData)) +// } +// } + +// func (r *clusterResource) Schema(_ context.Context, _ resource.SchemaRequest, resp *resource.SchemaResponse) { +// resp.Schema = schema.Schema{ +// MarkdownDescription: "cluster resource", +// Attributes: map[string]schema.Attribute{ +// "project_id": schema.StringAttribute{ +// MarkdownDescription: "The ID of the project. You can get the project ID from [tidbcloud_projects datasource](../data-sources/projects.md).", +// Required: true, +// }, +// "name": schema.StringAttribute{ +// MarkdownDescription: "The name of the cluster.", +// Required: true, +// }, +// "id": schema.StringAttribute{ +// Computed: true, +// MarkdownDescription: "The ID of the cluster.", +// PlanModifiers: []planmodifier.String{ +// stringplanmodifier.UseStateForUnknown(), +// }, +// }, +// "cluster_type": schema.StringAttribute{ +// MarkdownDescription: "Enum: \"DEDICATED\" \"DEVELOPER\", The cluster type.", +// Required: true, +// }, +// "cloud_provider": schema.StringAttribute{ +// MarkdownDescription: "Enum: \"AWS\" \"GCP\", The cloud provider on which your TiDB cluster is hosted.", +// Required: true, +// }, +// "create_timestamp": schema.StringAttribute{ +// MarkdownDescription: "The creation time of the cluster in Unix timestamp seconds (epoch time).", +// Computed: true, +// PlanModifiers: []planmodifier.String{ +// stringplanmodifier.UseStateForUnknown(), +// }, +// }, +// "region": schema.StringAttribute{ +// MarkdownDescription: "the region value should match the cloud provider's region code. You can get the complete list of available regions from the [tidbcloud_cluster_specs datasource](../data-sources/cluster_specs.md).", +// Required: true, +// }, +// "status": schema.SingleNestedAttribute{ +// MarkdownDescription: "The status of the cluster.", +// Computed: true, +// PlanModifiers: []planmodifier.Object{ +// clusterResourceStatus(), +// }, +// Attributes: map[string]schema.Attribute{ +// "tidb_version": schema.StringAttribute{ +// MarkdownDescription: "TiDB version.", +// Computed: true, +// PlanModifiers: []planmodifier.String{ +// stringplanmodifier.UseStateForUnknown(), +// }, +// }, +// "cluster_status": schema.StringAttribute{ +// MarkdownDescription: "Status of the cluster.", +// Computed: true, +// }, +// "connection_strings": schema.SingleNestedAttribute{ +// MarkdownDescription: "Connection strings.", +// Computed: true, +// PlanModifiers: []planmodifier.Object{ +// objectplanmodifier.UseStateForUnknown(), +// }, +// Attributes: map[string]schema.Attribute{ +// "default_user": schema.StringAttribute{ +// MarkdownDescription: "The default TiDB user for connection.", +// Computed: true, +// PlanModifiers: []planmodifier.String{ +// stringplanmodifier.UseStateForUnknown(), +// }, +// }, +// "standard": schema.SingleNestedAttribute{ +// MarkdownDescription: "Standard connection string.", +// Computed: true, +// PlanModifiers: []planmodifier.Object{ +// objectplanmodifier.UseStateForUnknown(), +// }, +// Attributes: map[string]schema.Attribute{ +// "host": schema.StringAttribute{ +// MarkdownDescription: "The host of standard connection.", +// Computed: true, +// PlanModifiers: []planmodifier.String{ +// stringplanmodifier.UseStateForUnknown(), +// }, +// }, +// "port": schema.Int64Attribute{ +// MarkdownDescription: "The TiDB port for connection. The port must be in the range of 1024-65535 except 10080.", +// Computed: true, +// PlanModifiers: []planmodifier.Int64{ +// int64planmodifier.UseStateForUnknown(), +// }, +// }, +// }, +// }, +// "vpc_peering": schema.SingleNestedAttribute{ +// MarkdownDescription: "VPC peering connection string.", +// Computed: true, +// PlanModifiers: []planmodifier.Object{ +// objectplanmodifier.UseStateForUnknown(), +// }, +// Attributes: map[string]schema.Attribute{ +// "host": schema.StringAttribute{ +// MarkdownDescription: "The host of VPC peering connection.", +// Computed: true, +// PlanModifiers: []planmodifier.String{ +// stringplanmodifier.UseStateForUnknown(), +// }, +// }, +// "port": schema.Int64Attribute{ +// MarkdownDescription: "The TiDB port for connection. The port must be in the range of 1024-65535 except 10080.", +// Computed: true, +// PlanModifiers: []planmodifier.Int64{ +// int64planmodifier.UseStateForUnknown(), +// }, +// }, +// }, +// }, +// }, +// }, +// }, +// }, +// "config": schema.SingleNestedAttribute{ +// MarkdownDescription: "The configuration of the cluster.", +// Required: true, +// Attributes: map[string]schema.Attribute{ +// "root_password": schema.StringAttribute{ +// MarkdownDescription: "The root password to access the cluster. It must be 8-64 characters.", +// Optional: true, +// }, +// "port": schema.Int64Attribute{ +// MarkdownDescription: "The TiDB port for connection. The port must be in the range of 1024-65535 except 10080, 4000 in default.\n" + +// " - For a Serverless Tier cluster, only port 4000 is available.", +// Optional: true, +// Computed: true, +// PlanModifiers: []planmodifier.Int64{ +// int64planmodifier.UseStateForUnknown(), +// }, +// }, +// "paused": schema.BoolAttribute{ +// MarkdownDescription: "lag that indicates whether the cluster is paused. true means to pause the cluster, and false means to resume the cluster.\n" + +// " - The cluster can be paused only when the cluster_status is \"AVAILABLE\"." + +// " - The cluster can be resumed only when the cluster_status is \"PAUSED\".", +// Optional: true, +// }, +// "components": schema.SingleNestedAttribute{ +// MarkdownDescription: "The components of the cluster.\n" + +// " - For a Serverless Tier cluster, the components value can not be set." + +// " - For a Dedicated Tier cluster, the components value must be set.", +// Optional: true, +// Computed: true, +// PlanModifiers: []planmodifier.Object{ +// objectplanmodifier.UseStateForUnknown(), +// }, +// Attributes: map[string]schema.Attribute{ +// "tidb": schema.SingleNestedAttribute{ +// MarkdownDescription: "The TiDB component of the cluster", +// Required: true, +// PlanModifiers: []planmodifier.Object{ +// objectplanmodifier.UseStateForUnknown(), +// }, +// Attributes: map[string]schema.Attribute{ +// "node_size": schema.StringAttribute{ +// Required: true, +// MarkdownDescription: "The size of the TiDB component in the cluster, You can get the available node size of each region from the [tidbcloud_cluster_specs datasource](../data-sources/cluster_specs.md).\n" + +// " - If the vCPUs of TiDB or TiKV component is 2 or 4, then their vCPUs need to be the same.\n" + +// " - If the vCPUs of TiDB or TiKV component is 2 or 4, then the cluster does not support TiFlash.\n" + +// " - Can not modify node_size of an existing cluster.", +// PlanModifiers: []planmodifier.String{ +// stringplanmodifier.UseStateForUnknown(), +// }, +// }, +// "node_quantity": schema.Int64Attribute{ +// MarkdownDescription: "The number of nodes in the cluster. You can get the minimum and step of a node quantity from the [tidbcloud_cluster_specs datasource](../data-sources/cluster_specs.md).", +// Required: true, +// PlanModifiers: []planmodifier.Int64{ +// int64planmodifier.UseStateForUnknown(), +// }, +// }, +// }, +// }, +// "tikv": schema.SingleNestedAttribute{ +// MarkdownDescription: "The TiKV component of the cluster", +// Required: true, +// PlanModifiers: []planmodifier.Object{ +// objectplanmodifier.UseStateForUnknown(), +// }, +// Attributes: map[string]schema.Attribute{ +// "node_size": schema.StringAttribute{ +// MarkdownDescription: "The size of the TiKV component in the cluster, You can get the available node size of each region from the [tidbcloud_cluster_specs datasource](../data-sources/cluster_specs.md).\n" + +// " - If the vCPUs of TiDB or TiKV component is 2 or 4, then their vCPUs need to be the same.\n" + +// " - If the vCPUs of TiDB or TiKV component is 2 or 4, then the cluster does not support TiFlash.\n" + +// " - Can not modify node_size of an existing cluster.", +// Required: true, +// PlanModifiers: []planmodifier.String{ +// stringplanmodifier.UseStateForUnknown(), +// }, +// }, +// "storage_size_gib": schema.Int64Attribute{ +// MarkdownDescription: "The storage size of a node in the cluster. You can get the minimum and maximum of storage size from the [tidbcloud_cluster_specs datasource](../data-sources/cluster_specs.md).\n" + +// " - Can not modify storage_size_gib of an existing cluster.", +// Required: true, +// PlanModifiers: []planmodifier.Int64{ +// int64planmodifier.UseStateForUnknown(), +// }, +// }, +// "node_quantity": schema.Int64Attribute{ +// MarkdownDescription: "The number of nodes in the cluster. You can get the minimum and step of a node quantity from the [tidbcloud_cluster_specs datasource](../data-sources/cluster_specs.md).\n" + +// " - TiKV do not support decreasing node quantity.\n" + +// " - The node_quantity of TiKV must be a multiple of 3.", +// Required: true, +// PlanModifiers: []planmodifier.Int64{ +// int64planmodifier.UseStateForUnknown(), +// }, +// }, +// }, +// }, +// "tiflash": schema.SingleNestedAttribute{ +// MarkdownDescription: "The TiFlash component of the cluster.", +// Optional: true, +// PlanModifiers: []planmodifier.Object{ +// objectplanmodifier.UseStateForUnknown(), +// }, +// Attributes: map[string]schema.Attribute{ +// "node_size": schema.StringAttribute{ +// MarkdownDescription: "The size of the TiFlash component in the cluster, You can get the available node size of each region from the [tidbcloud_cluster_specs datasource](../data-sources/cluster_specs.md).\n" + +// " - Can not modify node_size of an existing cluster.", +// Required: true, +// PlanModifiers: []planmodifier.String{ +// stringplanmodifier.UseStateForUnknown(), +// }, +// }, +// "storage_size_gib": schema.Int64Attribute{ +// MarkdownDescription: "The storage size of a node in the cluster. You can get the minimum and maximum of storage size from the [tidbcloud_cluster_specs datasource](../data-sources/cluster_specs.md).\n" + +// " - Can not modify storage_size_gib of an existing cluster.", +// Required: true, +// PlanModifiers: []planmodifier.Int64{ +// int64planmodifier.UseStateForUnknown(), +// }, +// }, +// "node_quantity": schema.Int64Attribute{ +// MarkdownDescription: "The number of nodes in the cluster. You can get the minimum and step of a node quantity from the [tidbcloud_cluster_specs datasource](../data-sources/cluster_specs.md).\n" + +// " - TiFlash do not support decreasing node quantity.", +// Required: true, +// PlanModifiers: []planmodifier.Int64{ +// int64planmodifier.UseStateForUnknown(), +// }, +// }, +// }, +// }, +// }, +// }, +// "ip_access_list": schema.ListNestedAttribute{ +// MarkdownDescription: "A list of IP addresses and Classless Inter-Domain Routing (CIDR) addresses that are allowed to access the TiDB Cloud cluster via [standard connection](https://docs.pingcap.com/tidbcloud/connect-to-tidb-cluster#connect-via-standard-connection).", +// Optional: true, +// NestedObject: schema.NestedAttributeObject{ +// Attributes: map[string]schema.Attribute{ +// "cidr": schema.StringAttribute{ +// MarkdownDescription: "The IP address or CIDR range that you want to add to the cluster's IP access list.", +// Required: true, +// }, +// "description": schema.StringAttribute{ +// MarkdownDescription: "Description that explains the purpose of the entry.", +// Required: true, +// }, +// }, +// }, +// }, +// }, +// }, +// }, +// } +// } + +// func (r clusterResource) Create(ctx context.Context, req resource.CreateRequest, resp *resource.CreateResponse) { +// if !r.provider.configured { +// resp.Diagnostics.AddError( +// "Provider not configured", +// "The provider hasn't been configured before apply, likely because it depends on an unknown value from another resource. This leads to weird stuff happening, so we'd prefer if you didn't do that. Thanks!", +// ) +// return +// } + +// // get data from config +// var data clusterResourceData +// diags := req.Config.Get(ctx, &data) +// resp.Diagnostics.Append(diags...) +// if resp.Diagnostics.HasError() { +// return +// } + +// // for Serverless cluster, components is not allowed. or plan and state may be inconsistent +// if data.ClusterType == dev { +// if data.Config.Components != nil { +// resp.Diagnostics.AddError("Create Error", fmt.Sprintf("components is not allowed in %s cluster_type", dev)) +// return +// } +// } + +// // for DEDICATED cluster, components is required. +// if data.ClusterType == ded { +// if data.Config.Components == nil { +// resp.Diagnostics.AddError("Create Error", fmt.Sprintf("components is required in %s cluster_type", ded)) +// return +// } +// } + +// // write logs using the tflog package +// // see https://pkg.go.dev/github.com/hashicorp/terraform-plugin-log/tflog +// tflog.Trace(ctx, "created cluster_resource") +// createClusterParams := clusterApi.NewCreateClusterParams().WithProjectID(data.ProjectId).WithBody(buildCreateClusterBody(data)) +// createClusterResp, err := r.provider.client.CreateCluster(createClusterParams) +// if err != nil { +// resp.Diagnostics.AddError("Create Error", fmt.Sprintf("Unable to call CreateCluster, got error: %s", err)) +// return +// } +// // set clusterId. other computed attributes are not returned by create, they will be set when refresh +// clusterId := *createClusterResp.Payload.ID +// data.ClusterId = types.StringValue(clusterId) +// if r.provider.sync { +// var cluster *clusterApi.GetClusterOKBody +// if data.ClusterType == dev { +// tflog.Info(ctx, "wait serverless cluster ready") +// cluster, err = WaitClusterReady(ctx, clusterServerlessCreateTimeout, clusterServerlessCreateInterval, data.ProjectId, clusterId, r.provider.client) +// if err != nil { +// resp.Diagnostics.AddError( +// "Cluster creation failed", +// fmt.Sprintf("Cluster is not ready, get error: %s", err), +// ) +// return +// } +// } else { +// tflog.Info(ctx, "wait dedicated cluster ready") +// cluster, err = WaitClusterReady(ctx, clusterCreateTimeout, clusterCreateInterval, data.ProjectId, clusterId, r.provider.client) +// if err != nil { +// resp.Diagnostics.AddError( +// "Cluster creation failed", +// fmt.Sprintf("Cluster is not ready, get error: %s", err), +// ) +// return +// } +// } +// refreshClusterResourceData(ctx, cluster, &data) +// } else { +// // we refresh in create for any unknown value. if someone has other opinions which is better, he can delete the refresh logic +// tflog.Trace(ctx, "read cluster_resource") +// getClusterParams := clusterApi.NewGetClusterParams().WithProjectID(data.ProjectId).WithClusterID(data.ClusterId.ValueString()) +// getClusterResp, err := r.provider.client.GetCluster(getClusterParams) +// if err != nil { +// resp.Diagnostics.AddError("Create Error", fmt.Sprintf("Unable to call GetCluster, got error: %s", err)) +// return +// } +// refreshClusterResourceData(ctx, getClusterResp.Payload, &data) +// } + +// // save into the Terraform state. +// diags = resp.State.Set(ctx, &data) +// resp.Diagnostics.Append(diags...) +// } + +// func buildCreateClusterBody(data clusterResourceData) clusterApi.CreateClusterBody { +// // required +// rootPassWord := data.Config.RootPassword.ValueString() +// payload := clusterApi.CreateClusterBody{ +// Name: &data.Name, +// ClusterType: &data.ClusterType, +// CloudProvider: &data.CloudProvider, +// Region: &data.Region, +// Config: &clusterApi.CreateClusterParamsBodyConfig{ +// RootPassword: &rootPassWord, +// }, +// } + +// // optional +// if data.Config.Components != nil { +// tidb := data.Config.Components.TiDB +// tikv := data.Config.Components.TiKV +// tiflash := data.Config.Components.TiFlash + +// components := &clusterApi.CreateClusterParamsBodyConfigComponents{ +// Tidb: &clusterApi.CreateClusterParamsBodyConfigComponentsTidb{ +// NodeSize: &tidb.NodeSize, +// NodeQuantity: &tidb.NodeQuantity, +// }, +// Tikv: &clusterApi.CreateClusterParamsBodyConfigComponentsTikv{ +// NodeSize: &tikv.NodeSize, +// StorageSizeGib: &tikv.StorageSizeGib, +// NodeQuantity: &tikv.NodeQuantity, +// }, +// } +// // tiflash is optional +// if tiflash != nil { +// components.Tiflash = &clusterApi.CreateClusterParamsBodyConfigComponentsTiflash{ +// NodeSize: &tiflash.NodeSize, +// StorageSizeGib: &tiflash.StorageSizeGib, +// NodeQuantity: &tiflash.NodeQuantity, +// } +// } + +// payload.Config.Components = components +// } +// if data.Config.IPAccessList != nil { +// var IPAccessList []*clusterApi.CreateClusterParamsBodyConfigIPAccessListItems0 +// for _, key := range data.Config.IPAccessList { +// cidr := key.CIDR +// IPAccessList = append(IPAccessList, &clusterApi.CreateClusterParamsBodyConfigIPAccessListItems0{ +// Cidr: &cidr, +// Description: key.Description, +// }) +// } +// payload.Config.IPAccessList = IPAccessList +// } +// if !data.Config.Port.IsNull() && !data.Config.Port.IsUnknown() { +// payload.Config.Port = int32(data.Config.Port.ValueInt64()) +// } + +// return payload +// } + +// func (r clusterResource) Read(ctx context.Context, req resource.ReadRequest, resp *resource.ReadResponse) { +// var projectId, clusterId string + +// resp.Diagnostics.Append(req.State.GetAttribute(ctx, path.Root("project_id"), &projectId)...) +// resp.Diagnostics.Append(req.State.GetAttribute(ctx, path.Root("id"), &clusterId)...) +// if resp.Diagnostics.HasError() { +// return +// } + +// // call read api +// tflog.Trace(ctx, "read cluster_resource") +// getClusterParams := clusterApi.NewGetClusterParams().WithProjectID(projectId).WithClusterID(clusterId) +// getClusterResp, err := r.provider.client.GetCluster(getClusterParams) +// if err != nil { +// resp.Diagnostics.AddError("Read Error", fmt.Sprintf("Unable to call GetClusterById, got error: %s", err)) +// return +// } + +// // refresh data with read result +// var data clusterResourceData +// // root_password, ip_access_list and pause will not return by read api, so we just use state's value even it changed on console! +// // use types.String in case ImportState method throw unhandled null value +// var rootPassword types.String +// var iPAccessList []ipAccess +// var paused *bool +// resp.Diagnostics.Append(req.State.GetAttribute(ctx, path.Root("config").AtName("root_password"), &rootPassword)...) +// resp.Diagnostics.Append(req.State.GetAttribute(ctx, path.Root("config").AtName("ip_access_list"), &iPAccessList)...) +// resp.Diagnostics.Append(req.State.GetAttribute(ctx, path.Root("config").AtName("paused"), &paused)...) +// data.Config.RootPassword = rootPassword +// data.Config.IPAccessList = iPAccessList +// data.Config.Paused = paused +// refreshClusterResourceData(ctx, getClusterResp.Payload, &data) + +// // save into the Terraform state +// diags := resp.State.Set(ctx, &data) +// resp.Diagnostics.Append(diags...) +// } + +// func refreshClusterResourceData(ctx context.Context, resp *clusterApi.GetClusterOKBody, data *clusterResourceData) { +// // must return +// data.Name = resp.Name +// data.ClusterId = types.StringValue(*resp.ID) +// data.Region = resp.Region +// data.ProjectId = *resp.ProjectID +// data.ClusterType = resp.ClusterType +// data.CloudProvider = resp.CloudProvider +// data.CreateTimestamp = types.StringValue(resp.CreateTimestamp) +// data.Config.Port = types.Int64Value(int64(resp.Config.Port)) +// tidb := resp.Config.Components.Tidb +// tikv := resp.Config.Components.Tikv +// data.Config.Components = &components{ +// TiDB: &componentTiDB{ +// NodeSize: *tidb.NodeSize, +// NodeQuantity: *tidb.NodeQuantity, +// }, +// TiKV: &componentTiKV{ +// NodeSize: *tikv.NodeSize, +// NodeQuantity: *tikv.NodeQuantity, +// StorageSizeGib: *tikv.StorageSizeGib, +// }, +// } + +// var standard connectionStandard +// var vpcPeering connectionVpcPeering +// if resp.Status.ConnectionStrings.Standard != nil { +// standard.Host = resp.Status.ConnectionStrings.Standard.Host +// standard.Port = resp.Status.ConnectionStrings.Standard.Port +// } +// if resp.Status.ConnectionStrings.VpcPeering != nil { +// vpcPeering.Host = resp.Status.ConnectionStrings.VpcPeering.Host +// vpcPeering.Port = resp.Status.ConnectionStrings.VpcPeering.Port +// } +// data.Status = &clusterStatusDataSource{ +// TidbVersion: resp.Status.TidbVersion, +// ClusterStatus: types.StringValue(resp.Status.ClusterStatus), +// ConnectionStrings: &connection{ +// DefaultUser: resp.Status.ConnectionStrings.DefaultUser, +// Standard: &standard, +// VpcPeering: &vpcPeering, +// }, +// } +// // may return +// tiflash := resp.Config.Components.Tiflash +// if tiflash != nil { +// data.Config.Components.TiFlash = &componentTiFlash{ +// NodeSize: *tiflash.NodeSize, +// NodeQuantity: *tiflash.NodeQuantity, +// StorageSizeGib: *tiflash.StorageSizeGib, +// } +// } + +// // not return +// // IPAccessList, and password and pause will not update for it will not return by read api + +// } + +// // Update since open api is patch without check for the invalid parameter. we do a lot of check here to avoid inconsistency +// // check the date can't be updated +// // if plan and state is different, we can execute updated +// func (r clusterResource) Update(ctx context.Context, req resource.UpdateRequest, resp *resource.UpdateResponse) { +// // get plan +// var data clusterResourceData +// diags := req.Plan.Get(ctx, &data) +// resp.Diagnostics.Append(diags...) +// if resp.Diagnostics.HasError() { +// return +// } +// // get state +// var state clusterResourceData +// diags = req.State.Get(ctx, &state) +// resp.Diagnostics.Append(diags...) +// if resp.Diagnostics.HasError() { +// return +// } + +// // Severless can not be changed now +// if data.ClusterType == dev { +// resp.Diagnostics.AddError( +// "Update error", +// "Unable to update Serverless cluster", +// ) +// return +// } + +// // only components and paused can be changed now +// if data.Name != state.Name || data.ClusterType != state.ClusterType || data.Region != state.Region || data.CloudProvider != state.CloudProvider || +// data.ProjectId != state.ProjectId || data.ClusterId != state.ClusterId { +// resp.Diagnostics.AddError( +// "Update error", +// "You may update the name,cluster_type,region,cloud_provider or projectId. They can not be changed, only components can be changed now", +// ) +// return +// } +// if !data.Config.Port.IsNull() && !data.Config.Port.IsNull() && data.Config.Port.ValueInt64() != state.Config.Port.ValueInt64() { +// resp.Diagnostics.AddError( +// "Update error", +// "port can not be changed, only components can be changed now", +// ) +// return +// } +// if data.Config.IPAccessList != nil { +// // You cannot add an IP access list to an existing cluster without an IP rule. +// if len(state.Config.IPAccessList) == 0 { +// resp.Diagnostics.AddError( +// "Update error", +// "ip_access_list can not be added to the existing cluster.", +// ) +// return +// } + +// // You cannot insert or delete IP rule. +// if len(data.Config.IPAccessList) != len(state.Config.IPAccessList) { +// resp.Diagnostics.AddError( +// "Update error", +// "ip_access_list can not be changed, only components can be changed now", +// ) +// return +// } + +// // You cannot update the IP rule. +// newIPAccessList := make([]ipAccess, len(data.Config.IPAccessList)) +// copy(newIPAccessList, data.Config.IPAccessList) +// sort.Slice(newIPAccessList, func(i, j int) bool { +// return newIPAccessList[i].CIDR < newIPAccessList[j].CIDR +// }) + +// currentIPAccessList := make([]ipAccess, len(state.Config.IPAccessList)) +// copy(currentIPAccessList, state.Config.IPAccessList) +// sort.Slice(currentIPAccessList, func(i, j int) bool { +// return currentIPAccessList[i].CIDR < currentIPAccessList[j].CIDR +// }) + +// for index, key := range newIPAccessList { +// if currentIPAccessList[index].CIDR != key.CIDR || currentIPAccessList[index].Description != key.Description { +// resp.Diagnostics.AddError( +// "Update error", +// "ip_access_list can not be changed, only components can be changed now", +// ) +// return +// } +// } +// } else { +// // You cannot remove the IP access list. +// if len(state.Config.IPAccessList) > 0 { +// resp.Diagnostics.AddError( +// "Update error", +// "ip_access_list can not be changed, only components can be changed now", +// ) +// return +// } +// } + +// // check Components +// tidb := data.Config.Components.TiDB +// tikv := data.Config.Components.TiKV +// tiflash := data.Config.Components.TiFlash +// tidbState := state.Config.Components.TiDB +// tikvState := state.Config.Components.TiKV +// tiflashState := state.Config.Components.TiFlash +// if tidb.NodeSize != tidbState.NodeSize { +// resp.Diagnostics.AddError( +// "Update error", +// "tidb node_size can't be changed", +// ) +// return +// } +// if tikv.NodeSize != tikvState.NodeSize || tikv.StorageSizeGib != tikvState.StorageSizeGib { +// resp.Diagnostics.AddError( +// "Update error", +// "tikv node_size or storage_size_gib can't be changed", +// ) +// return +// } +// if tiflash != nil && tiflashState != nil { +// // if cluster have tiflash already, then we can't specify NodeSize and StorageSizeGib +// if tiflash.NodeSize != tiflashState.NodeSize || tiflash.StorageSizeGib != tiflashState.StorageSizeGib { +// resp.Diagnostics.AddError( +// "Update error", +// "tiflash node_size or storage_size_gib can't be changed", +// ) +// return +// } +// } + +// // build UpdateClusterBody +// var updateClusterBody clusterApi.UpdateClusterBody +// updateClusterBody.Config = &clusterApi.UpdateClusterParamsBodyConfig{} +// // build paused +// if data.Config.Paused != nil { +// if state.Config.Paused == nil || *data.Config.Paused != *state.Config.Paused { +// updateClusterBody.Config.Paused = data.Config.Paused +// } +// } +// // build components +// var isComponentsChanged = false +// if tidb.NodeQuantity != tidbState.NodeQuantity || tikv.NodeQuantity != tikvState.NodeQuantity { +// isComponentsChanged = true +// } + +// var componentTiFlash *clusterApi.UpdateClusterParamsBodyConfigComponentsTiflash +// if tiflash != nil { +// if tiflashState == nil { +// isComponentsChanged = true +// componentTiFlash = &clusterApi.UpdateClusterParamsBodyConfigComponentsTiflash{ +// NodeQuantity: &tiflash.NodeQuantity, +// NodeSize: &tiflash.NodeSize, +// StorageSizeGib: &tiflash.StorageSizeGib, +// } +// } else if tiflash.NodeQuantity != tiflashState.NodeQuantity { +// isComponentsChanged = true +// // NodeSize can't be changed +// componentTiFlash = &clusterApi.UpdateClusterParamsBodyConfigComponentsTiflash{ +// NodeQuantity: &tiflash.NodeQuantity, +// } +// } +// } + +// if isComponentsChanged { +// updateClusterBody.Config.Components = &clusterApi.UpdateClusterParamsBodyConfigComponents{ +// Tidb: &clusterApi.UpdateClusterParamsBodyConfigComponentsTidb{ +// NodeQuantity: &tidb.NodeQuantity, +// }, +// Tikv: &clusterApi.UpdateClusterParamsBodyConfigComponentsTikv{ +// NodeQuantity: &tikv.NodeQuantity, +// }, +// Tiflash: componentTiFlash, +// } +// } + +// tflog.Trace(ctx, "update cluster_resource") +// updateClusterParams := clusterApi.NewUpdateClusterParams().WithProjectID(data.ProjectId).WithClusterID(data.ClusterId.ValueString()).WithBody(updateClusterBody) +// _, err := r.provider.client.UpdateCluster(updateClusterParams) +// if err != nil { +// resp.Diagnostics.AddError("Update Error", fmt.Sprintf("Unable to call UpdateClusterById, got error: %s", err)) +// return +// } + +// if r.provider.sync { +// tflog.Info(ctx, "wait cluster ready") +// cluster, err := WaitClusterReady(ctx, clusterUpdateTimeout, clusterUpdateInterval, data.ProjectId, data.ClusterId.ValueString(), r.provider.client) +// if err != nil { +// resp.Diagnostics.AddError( +// "Cluster update failed", +// fmt.Sprintf("Cluster is not ready, get error: %s", err), +// ) +// return +// } +// refreshClusterResourceData(ctx, cluster, &data) +// } else { +// // we refresh for any unknown value. if someone has other opinions which is better, he can delete the refresh logic +// tflog.Trace(ctx, "read cluster_resource") +// getClusterResp, err := r.provider.client.GetCluster(clusterApi.NewGetClusterParams().WithProjectID(data.ProjectId).WithClusterID(data.ClusterId.ValueString())) +// if err != nil { +// resp.Diagnostics.AddError("Update Error", fmt.Sprintf("Unable to call GetClusterById, got error: %s", err)) +// return +// } +// refreshClusterResourceData(ctx, getClusterResp.Payload, &data) +// } + +// // save into the Terraform state. +// diags = resp.State.Set(ctx, &data) +// resp.Diagnostics.Append(diags...) +// } + +// func (r clusterResource) Delete(ctx context.Context, req resource.DeleteRequest, resp *resource.DeleteResponse) { +// var data clusterResourceData + +// diags := req.State.Get(ctx, &data) +// resp.Diagnostics.Append(diags...) + +// if resp.Diagnostics.HasError() { +// return +// } + +// tflog.Trace(ctx, "delete cluster_resource") +// _, err := r.provider.client.DeleteCluster(clusterApi.NewDeleteClusterParams().WithProjectID(data.ProjectId).WithClusterID(data.ClusterId.ValueString())) +// if err != nil { +// resp.Diagnostics.AddError("Delete Error", fmt.Sprintf("Unable to call DeleteClusterById, got error: %s", err)) +// return +// } +// } + +// func (r clusterResource) ImportState(ctx context.Context, req resource.ImportStateRequest, resp *resource.ImportStateResponse) { +// idParts := strings.Split(req.ID, ",") + +// if len(idParts) != 2 || idParts[0] == "" || idParts[1] == "" { +// resp.Diagnostics.AddError( +// "Unexpected Import Identifier", +// fmt.Sprintf("Expected import identifier with format: project_id,cluster_id. Got: %q", req.ID), +// ) +// return +// } + +// resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root("project_id"), idParts[0])...) +// resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root("id"), idParts[1])...) +// } + +// func WaitClusterReady(ctx context.Context, timeout time.Duration, interval time.Duration, projectId, clusterId string, +// client tidbcloud.TiDBCloudClient) (*clusterApi.GetClusterOKBody, error) { +// stateConf := &retry.StateChangeConf{ +// Pending: []string{ +// string(clusterStatusCreating), +// string(clusterStatusModifying), +// string(clusterStatusResuming), +// string(clusterStatusUnavailable), +// string(clusterStatusImporting), +// string(clusterStatusPausing), +// }, +// Target: []string{ +// string(clusterStatusAvailable), +// string(clusterStatusPaused), +// string(clusterStatusMaintaining), +// }, +// Timeout: timeout, +// MinTimeout: 500 * time.Millisecond, +// PollInterval: interval, +// Refresh: clusterStateRefreshFunc(ctx, projectId, clusterId, client), +// } + +// outputRaw, err := stateConf.WaitForStateContext(ctx) + +// if output, ok := outputRaw.(*clusterApi.GetClusterOKBody); ok { +// return output, err +// } +// return nil, err +// } + +// func clusterStateRefreshFunc(ctx context.Context, projectId, clusterId string, +// client tidbcloud.TiDBCloudClient) retry.StateRefreshFunc { +// return func() (interface{}, string, error) { +// param := clusterApi.NewGetClusterParams().WithProjectID(projectId).WithClusterID(clusterId).WithContext(ctx) +// getClusterResp, err := client.GetCluster(param) +// if err != nil { +// tflog.Warn(ctx, fmt.Sprintf("get cluster error: %s", err)) +// if getClusterResp != nil && getClusterResp.Code() < http.StatusInternalServerError { +// return nil, "", err +// } else { +// // regard as not found and retry again. Default is 20 times +// return nil, "", nil +// } +// } +// return getClusterResp.Payload, getClusterResp.Payload.Status.ClusterStatus, nil +// } +// } diff --git a/internal/provider/dedicated_region_data_source.go b/internal/provider/dedicated_region_data_source.go new file mode 100644 index 0000000..b984640 --- /dev/null +++ b/internal/provider/dedicated_region_data_source.go @@ -0,0 +1,85 @@ +package provider + +import ( + "context" + "fmt" + + "github.com/hashicorp/terraform-plugin-framework/datasource" + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-log/tflog" + // "github.com/tidbcloud/terraform-provider-tidbcloud/internal/provider" +) + +type dedicatedRegion struct { + RegionId types.String `tfsdk:"region_id"` + CloudProvider types.String `tfsdk:"cloud_provider"` + DisplayName types.String `tfsdk:"display_name"` +} + +var _ datasource.DataSource = &dedicatedRegionDataSource{} + +type dedicatedRegionDataSource struct { + provider *TidbcloudProvider +} + +func NewDedicatedRegionDataSource() datasource.DataSource { + return &dedicatedRegionDataSource{} +} + +func (d *dedicatedRegionDataSource) Metadata(_ context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) { + resp.TypeName = req.ProviderTypeName + "_dedicated_region" +} + +func (d *dedicatedRegionDataSource) Configure(_ context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) { + if req.ProviderData == nil { + return + } + var ok bool + if d.provider, ok = req.ProviderData.(*TidbcloudProvider); !ok { + resp.Diagnostics.AddError("Internal provider error", + fmt.Sprintf("Error in Configure: expected %T but got %T", TidbcloudProvider{}, req.ProviderData)) + } +} + +func (d *dedicatedRegionDataSource) Schema(_ context.Context, _ datasource.SchemaRequest, resp *datasource.SchemaResponse) { + resp.Schema = schema.Schema{ + MarkdownDescription: "dedicated region data source", + Attributes: map[string]schema.Attribute{ + "region_id": schema.StringAttribute{ + MarkdownDescription: "The ID of the region. It is generated by TiDB Cloud.", + Required: true, + }, + "cloud_provider": schema.StringAttribute{ + MarkdownDescription: "The cloud provider of the region.", + Computed: true, + }, + "display_name": schema.StringAttribute{ + MarkdownDescription: "The display name of the region.", + Computed: true, + }, + }, + } +} + +func (d *dedicatedRegionDataSource) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) { + var data dedicatedRegion + diags := req.Config.Get(ctx, &data) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + + tflog.Trace(ctx, "read region data source") + region, err := d.provider.DedicatedClient.GetRegion(ctx, data.RegionId.ValueString()) + if err != nil { + resp.Diagnostics.AddError("Read Error", fmt.Sprintf("Unable to call GetRegion, got error: %s", err)) + return + } + + data.CloudProvider = types.StringValue(string(*region.CloudProvider)) + data.DisplayName = types.StringValue(string(*region.DisplayName)) + + diags = resp.State.Set(ctx, &data) + resp.Diagnostics.Append(diags...) +} diff --git a/internal/provider/dedicated_regions_data_source.go b/internal/provider/dedicated_regions_data_source.go new file mode 100644 index 0000000..6c75095 --- /dev/null +++ b/internal/provider/dedicated_regions_data_source.go @@ -0,0 +1,124 @@ +package provider + +import ( + "context" + "fmt" + "math/rand" + "strconv" + + "github.com/hashicorp/terraform-plugin-framework/datasource" + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-log/tflog" + // "github.com/tidbcloud/terraform-provider-tidbcloud/internal/provider" +) + +type dedicatedRegionsDataSourceData struct { + Id types.String `tfsdk:"id"` + Page types.Int64 `tfsdk:"page"` + PageSize types.Int64 `tfsdk:"page_size"` + Items []dedicatedRegion `tfsdk:"items"` + Total types.Int64 `tfsdk:"total"` +} + +var _ datasource.DataSource = &dedicatedRegionsDataSource{} + +type dedicatedRegionsDataSource struct { + provider *TidbcloudProvider +} + +func NewDedicatedRegionsDataSource() datasource.DataSource { + return &dedicatedRegionsDataSource{} +} + +func (d *dedicatedRegionsDataSource) Metadata(_ context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) { + resp.TypeName = req.ProviderTypeName + "_dedicated_regions" +} + +func (d *dedicatedRegionsDataSource) Configure(_ context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) { + if req.ProviderData == nil { + return + } + var ok bool + if d.provider, ok = req.ProviderData.(*TidbcloudProvider); !ok { + resp.Diagnostics.AddError("Internal provider error", + fmt.Sprintf("Error in Configure: expected %T but got %T", TidbcloudProvider{}, req.ProviderData)) + } +} + +func (d *dedicatedRegionsDataSource) Schema(_ context.Context, _ datasource.SchemaRequest, resp *datasource.SchemaResponse) { + resp.Schema = schema.Schema{ + MarkdownDescription: "dedicated regions data source", + Attributes: map[string]schema.Attribute{ + "id": schema.StringAttribute{ + MarkdownDescription: "data source ID", + Computed: true, + }, + "page": schema.Int64Attribute{ + MarkdownDescription: "Default:1 The number of pages.", + Optional: true, + Computed: true, + }, + "page_size": schema.Int64Attribute{ + MarkdownDescription: "Default:10 The size of a pages.", + Optional: true, + Computed: true, + }, + "items": schema.ListNestedAttribute{ + MarkdownDescription: "The items of regions", + Computed: true, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "region_id": schema.StringAttribute{ + MarkdownDescription: "The ID of the region. It is generated by TiDB Cloud.", + Computed: true, + }, + "cloud_provider": schema.StringAttribute{ + MarkdownDescription: "The cloud provider of the region.", + Computed: true, + }, + "display_name": schema.StringAttribute{ + MarkdownDescription: "The display name of the region.", + Computed: true, + }, + }, + }, + }, + "total": schema.Int64Attribute{ + MarkdownDescription: "The total number of regions in the project.", + Computed: true, + }, + }, + } +} + +func (d *dedicatedRegionsDataSource) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) { + var data dedicatedRegionsDataSourceData + diags := req.Config.Get(ctx, &data) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + + tflog.Trace(ctx, "read regions data source") + regions, err := d.provider.DedicatedClient.ListRegions(ctx) + if err != nil { + resp.Diagnostics.AddError("Read Error", fmt.Sprintf("Unable to call ListRegions, got error: %s", err)) + return + } + + data.Id = types.StringValue(strconv.FormatInt(rand.Int63(), 10)) + data.Total = types.Int64Value(int64(*regions.TotalSize)) + var items []dedicatedRegion + for _, key := range regions.Regions { + items = append(items, dedicatedRegion{ + RegionId: types.StringValue(*key.RegionId), + CloudProvider: types.StringValue(string(*key.CloudProvider)), + DisplayName: types.StringValue(*key.DisplayName), + }) + } + data.Items = items + + diags = resp.State.Set(ctx, &data) + resp.Diagnostics.Append(diags...) +} diff --git a/internal/provider/import_resource.go b/internal/provider/import_resource.go index 8e3bdc7..637cf5b 100644 --- a/internal/provider/import_resource.go +++ b/internal/provider/import_resource.go @@ -4,6 +4,9 @@ import ( "context" "errors" "fmt" + "os" + "strconv" + "github.com/hashicorp/terraform-plugin-framework/attr" "github.com/hashicorp/terraform-plugin-framework/diag" "github.com/hashicorp/terraform-plugin-framework/resource" @@ -16,8 +19,6 @@ import ( "github.com/hashicorp/terraform-plugin-log/tflog" importService "github.com/tidbcloud/terraform-provider-tidbcloud/pkg/import/client/import_service" importModel "github.com/tidbcloud/terraform-provider-tidbcloud/pkg/import/models" - "os" - "strconv" ) // Ensure provider defined types fully satisfy framework interfaces. @@ -29,7 +30,7 @@ func NewImportResource() resource.Resource { // ImportResource defines the resource implementation. type ImportResource struct { - provider *tidbcloudProvider + provider *TidbcloudProvider } // ImportResourceModel describes the resource data model. @@ -114,9 +115,9 @@ func (r *ImportResource) Configure(_ context.Context, req resource.ConfigureRequ } var ok bool - if r.provider, ok = req.ProviderData.(*tidbcloudProvider); !ok { + if r.provider, ok = req.ProviderData.(*TidbcloudProvider); !ok { resp.Diagnostics.AddError("Internal provider error", - fmt.Sprintf("Error in Configure: expected %T but got %T", tidbcloudProvider{}, req.ProviderData)) + fmt.Sprintf("Error in Configure: expected %T but got %T", TidbcloudProvider{}, req.ProviderData)) } } diff --git a/internal/provider/projects_data_source.go b/internal/provider/projects_data_source.go index 4c843d3..ba26757 100644 --- a/internal/provider/projects_data_source.go +++ b/internal/provider/projects_data_source.go @@ -3,13 +3,14 @@ package provider import ( "context" "fmt" + "math/rand" + "strconv" + projectApi "github.com/c4pt0r/go-tidbcloud-sdk-v1/client/project" "github.com/hashicorp/terraform-plugin-framework/datasource" "github.com/hashicorp/terraform-plugin-framework/datasource/schema" "github.com/hashicorp/terraform-plugin-framework/types" "github.com/hashicorp/terraform-plugin-log/tflog" - "math/rand" - "strconv" ) type projectsDataSourceData struct { @@ -33,7 +34,7 @@ type project struct { var _ datasource.DataSource = &projectsDataSource{} type projectsDataSource struct { - provider *tidbcloudProvider + provider *TidbcloudProvider } func NewProjectsDataSource() datasource.DataSource { @@ -49,9 +50,9 @@ func (d *projectsDataSource) Configure(_ context.Context, req datasource.Configu return } var ok bool - if d.provider, ok = req.ProviderData.(*tidbcloudProvider); !ok { + if d.provider, ok = req.ProviderData.(*TidbcloudProvider); !ok { resp.Diagnostics.AddError("Internal provider error", - fmt.Sprintf("Error in Configure: expected %T but got %T", tidbcloudProvider{}, req.ProviderData)) + fmt.Sprintf("Error in Configure: expected %T but got %T", TidbcloudProvider{}, req.ProviderData)) } } diff --git a/internal/provider/provider.go b/internal/provider/provider.go index b756bee..a9baf37 100644 --- a/internal/provider/provider.go +++ b/internal/provider/provider.go @@ -11,22 +11,27 @@ import ( "github.com/hashicorp/terraform-plugin-framework/resource" "github.com/hashicorp/terraform-plugin-framework/types" "github.com/tidbcloud/terraform-provider-tidbcloud/tidbcloud" + // "github.com/tidbcloud/terraform-provider-tidbcloud/internal/provider/dedicated" ) // Ensure the implementation satisfies the provider.Provider interface. -var _ provider.Provider = &tidbcloudProvider{} +var _ provider.Provider = &TidbcloudProvider{} // NewClient overrides the NewClientDelegate method for testing. var NewClient = tidbcloud.NewClientDelegate +var NewDedicatedClient = tidbcloud.NewDedicatedClientDelegate + // provider satisfies the tfsdk.Provider interface and usually is included // with all Resource and DataSource implementations. -type tidbcloudProvider struct { +type TidbcloudProvider struct { // client can contain the upstream provider SDK or HTTP client used to // communicate with the upstream service. Resource and DataSource // implementations can then make calls using this client. client tidbcloud.TiDBCloudClient + DedicatedClient tidbcloud.TiDBCloudDedicatedClient + // configured is set to true at the end of the Configure method. // This can be used in Resource and DataSource implementations to verify // that the provider was previously configured. @@ -47,12 +52,12 @@ type providerData struct { Sync types.Bool `tfsdk:"sync"` } -func (p *tidbcloudProvider) Metadata(_ context.Context, _ provider.MetadataRequest, resp *provider.MetadataResponse) { +func (p *TidbcloudProvider) Metadata(_ context.Context, _ provider.MetadataRequest, resp *provider.MetadataResponse) { resp.TypeName = "tidbcloud" resp.Version = p.version } -func (p *tidbcloudProvider) Configure(ctx context.Context, req provider.ConfigureRequest, resp *provider.ConfigureResponse) { +func (p *TidbcloudProvider) Configure(ctx context.Context, req provider.ConfigureRequest, resp *provider.ConfigureResponse) { // get providerData var data providerData diags := req.Config.Get(ctx, &data) @@ -108,15 +113,33 @@ func (p *tidbcloudProvider) Configure(ctx context.Context, req provider.Configur return } + // Create a new dedicated client and set it to the provider dedicated client + var dedicatedEndpoint = tidbcloud.DefaultDedicatedEndpoint + if os.Getenv(TiDBCloudDedicatedEndpoint) != "" { + dedicatedEndpoint = os.Getenv(TiDBCloudDedicatedEndpoint) + } + var iamEndpoint = tidbcloud.DefaultIAMEndpoint + if os.Getenv(TiDBCloudIAMEndpoint) != "" { + iamEndpoint = os.Getenv(TiDBCloudIAMEndpoint) + } + dc, err := NewDedicatedClient(publicKey, privateKey, dedicatedEndpoint, iamEndpoint, fmt.Sprintf("%s/%s", UserAgent, p.version)) + if err != nil { + resp.Diagnostics.AddError( + "Unable to create client", + "Unable to create tidb dedicated client:\n\n"+err.Error(), + ) + return + } // sync p.sync = data.Sync.ValueBool() p.client = c + p.DedicatedClient = dc p.configured = true resp.ResourceData = p resp.DataSourceData = p } -func (p *tidbcloudProvider) Resources(ctx context.Context) []func() resource.Resource { +func (p *TidbcloudProvider) Resources(ctx context.Context) []func() resource.Resource { return []func() resource.Resource{ NewClusterResource, NewBackupResource, @@ -125,17 +148,20 @@ func (p *tidbcloudProvider) Resources(ctx context.Context) []func() resource.Res } } -func (p *tidbcloudProvider) DataSources(ctx context.Context) []func() datasource.DataSource { +func (p *TidbcloudProvider) DataSources(ctx context.Context) []func() datasource.DataSource { return []func() datasource.DataSource{ NewProjectsDataSource, NewClusterSpecsDataSource, NewBackupsDataSource, NewRestoresDataSource, NewClustersDataSource, + + NewDedicatedRegionsDataSource, + NewDedicatedRegionDataSource, } } -func (p *tidbcloudProvider) Schema(_ context.Context, _ provider.SchemaRequest, resp *provider.SchemaResponse) { +func (p *TidbcloudProvider) Schema(_ context.Context, _ provider.SchemaRequest, resp *provider.SchemaResponse) { resp.Schema = schema.Schema{ Attributes: map[string]schema.Attribute{ "public_key": schema.StringAttribute{ @@ -159,7 +185,7 @@ func (p *tidbcloudProvider) Schema(_ context.Context, _ provider.SchemaRequest, func New(version string) func() provider.Provider { return func() provider.Provider { - return &tidbcloudProvider{ + return &TidbcloudProvider{ version: version, } } diff --git a/internal/provider/restore_resource.go b/internal/provider/restore_resource.go index bae98b7..6afc0e4 100644 --- a/internal/provider/restore_resource.go +++ b/internal/provider/restore_resource.go @@ -45,7 +45,7 @@ type cluster struct { var _ resource.Resource = &restoreResource{} type restoreResource struct { - provider *tidbcloudProvider + provider *TidbcloudProvider } func NewRestoreResource() resource.Resource { @@ -63,9 +63,9 @@ func (r *restoreResource) Configure(_ context.Context, req resource.ConfigureReq } var ok bool - if r.provider, ok = req.ProviderData.(*tidbcloudProvider); !ok { + if r.provider, ok = req.ProviderData.(*TidbcloudProvider); !ok { resp.Diagnostics.AddError("Internal provider error", - fmt.Sprintf("Error in Configure: expected %T but got %T", tidbcloudProvider{}, req.ProviderData)) + fmt.Sprintf("Error in Configure: expected %T but got %T", TidbcloudProvider{}, req.ProviderData)) } } diff --git a/internal/provider/restores_data_source.go b/internal/provider/restores_data_source.go index bcb3758..fb29fc9 100644 --- a/internal/provider/restores_data_source.go +++ b/internal/provider/restores_data_source.go @@ -3,13 +3,14 @@ package provider import ( "context" "fmt" + "math/rand" + "strconv" + restoreApi "github.com/c4pt0r/go-tidbcloud-sdk-v1/client/restore" "github.com/hashicorp/terraform-plugin-framework/datasource" "github.com/hashicorp/terraform-plugin-framework/datasource/schema" "github.com/hashicorp/terraform-plugin-framework/types" "github.com/hashicorp/terraform-plugin-log/tflog" - "math/rand" - "strconv" ) type restoresDataSourceData struct { @@ -35,7 +36,7 @@ type restore struct { var _ datasource.DataSource = &restoresDataSource{} type restoresDataSource struct { - provider *tidbcloudProvider + provider *TidbcloudProvider } func NewRestoresDataSource() datasource.DataSource { @@ -51,9 +52,9 @@ func (d *restoresDataSource) Configure(_ context.Context, req datasource.Configu return } var ok bool - if d.provider, ok = req.ProviderData.(*tidbcloudProvider); !ok { + if d.provider, ok = req.ProviderData.(*TidbcloudProvider); !ok { resp.Diagnostics.AddError("Internal provider error", - fmt.Sprintf("Error in Configure: expected %T but got %T", tidbcloudProvider{}, req.ProviderData)) + fmt.Sprintf("Error in Configure: expected %T but got %T", TidbcloudProvider{}, req.ProviderData)) } } diff --git a/internal/provider/util.go b/internal/provider/util.go index bb9c483..7767252 100644 --- a/internal/provider/util.go +++ b/internal/provider/util.go @@ -8,12 +8,14 @@ import ( ) const ( - TiDBCloudPublicKey string = "TIDBCLOUD_PUBLIC_KEY" - TiDBCloudPrivateKey string = "TIDBCLOUD_PRIVATE_KEY" - TiDBCloudHOST string = "TIDBCLOUD_HOST" - TiDBCloudProjectID string = "TIDBCLOUD_PROJECT_ID" - TiDBCloudClusterID string = "TIDBCLOUD_CLUSTER_ID" - UserAgent string = "terraform-provider-tidbcloud" + TiDBCloudPublicKey string = "TIDBCLOUD_PUBLIC_KEY" + TiDBCloudPrivateKey string = "TIDBCLOUD_PRIVATE_KEY" + TiDBCloudHOST string = "TIDBCLOUD_HOST" + TiDBCloudDedicatedEndpoint string = "TIDBCLOUD_DEDICATED_ENDPOINT" + TiDBCloudIAMEndpoint string = "TIDBCLOUD_IAM_ENDPOINT" + TiDBCloudProjectID string = "TIDBCLOUD_PROJECT_ID" + TiDBCloudClusterID string = "TIDBCLOUD_CLUSTER_ID" + UserAgent string = "terraform-provider-tidbcloud" ) // HookGlobal sets `*ptr = val` and returns a closure for restoring `*ptr` to diff --git a/tidbcloud/api_client.go b/tidbcloud/api_client.go index fb967c3..161e07c 100644 --- a/tidbcloud/api_client.go +++ b/tidbcloud/api_client.go @@ -2,13 +2,14 @@ package tidbcloud import ( "fmt" - "github.com/c4pt0r/go-tidbcloud-sdk-v1/client/backup" - "github.com/c4pt0r/go-tidbcloud-sdk-v1/client/restore" - importService "github.com/tidbcloud/terraform-provider-tidbcloud/pkg/import/client/import_service" "net/http" "net/url" "os" + "github.com/c4pt0r/go-tidbcloud-sdk-v1/client/backup" + "github.com/c4pt0r/go-tidbcloud-sdk-v1/client/restore" + importService "github.com/tidbcloud/terraform-provider-tidbcloud/pkg/import/client/import_service" + apiClient "github.com/c4pt0r/go-tidbcloud-sdk-v1/client" "github.com/c4pt0r/go-tidbcloud-sdk-v1/client/cluster" "github.com/c4pt0r/go-tidbcloud-sdk-v1/client/project" diff --git a/tidbcloud/dedicated_api_client.go b/tidbcloud/dedicated_api_client.go new file mode 100644 index 0000000..4e70963 --- /dev/null +++ b/tidbcloud/dedicated_api_client.go @@ -0,0 +1,122 @@ +package tidbcloud + +import ( + "context" + "fmt" + "io" + "net/http" + "net/url" + + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/icholy/digest" + "github.com/tidbcloud/tidbcloud-cli/pkg/tidbcloud/v1beta1/dedicated" + "github.com/tidbcloud/tidbcloud-cli/pkg/tidbcloud/v1beta1/iam" +) + +const ( + DefaultDedicatedEndpoint = "https://dedicated.tidbapi.com" + DefaultIAMEndpoint = "https://iam.tidbapi.com" +) + +type TiDBCloudDedicatedClient interface { + // CreateCluster(ctx context.Context, body *dedicated.TidbCloudOpenApidedicatedv1beta1Cluster) (*dedicated.TidbCloudOpenApidedicatedv1beta1Cluster, error) + ListRegions(ctx context.Context) (*dedicated.TidbCloudOpenApidedicatedv1beta1ListRegionsResponse, error) + GetRegion(ctx context.Context, regionId string) (*dedicated.Commonv1beta1Region, error) +} + +func NewDedicatedApiClient(rt http.RoundTripper, dedicatedEndpoint string, iamEndpoint string, userAgent string) (*dedicated.APIClient, *iam.APIClient, error) { + httpClient := &http.Client{ + Transport: rt, + } + + iamURL, err := url.ParseRequestURI(iamEndpoint) + if err != nil { + return nil, nil, err + } + + // v1beta1 api (dedicated) + dedicatedURL, err := url.ParseRequestURI(dedicatedEndpoint) + if err != nil { + return nil, nil, err + } + + iamCfg := iam.NewConfiguration() + iamCfg.HTTPClient = httpClient + iamCfg.Host = iamURL.Host + iamCfg.UserAgent = userAgent + + dedicatedCfg := dedicated.NewConfiguration() + dedicatedCfg.HTTPClient = httpClient + dedicatedCfg.Host = dedicatedURL.Host + dedicatedCfg.UserAgent = userAgent + return dedicated.NewAPIClient(dedicatedCfg), iam.NewAPIClient(iamCfg), nil +} + +type DedicatedClientDelegate struct { + ic *iam.APIClient + dc *dedicated.APIClient +} + +func NewDedicatedClientDelegate(publicKey string, privateKey string, dedicatedEndpoint string, iamEndpoint string, userAgent string) (*DedicatedClientDelegate, error) { + transport := NewTransportWithAgent(&digest.Transport{ + Username: publicKey, + Password: privateKey, + }, userAgent) + + dc, ic, err := NewDedicatedApiClient(transport, dedicatedEndpoint, iamEndpoint, userAgent) + if err != nil { + return nil, err + } + return &DedicatedClientDelegate{ + dc: dc, + ic: ic, + }, nil +} + +func (d *DedicatedClientDelegate) ListRegions(ctx context.Context) (*dedicated.TidbCloudOpenApidedicatedv1beta1ListRegionsResponse, error) { + tflog.Debug(ctx, fmt.Sprintf("dc.cfg: %v", *d.dc.GetConfig())) + resp, h, err := d.dc.RegionServiceAPI.RegionServiceListRegions(ctx).Execute() + tflog.Trace(ctx, fmt.Sprintf("ListRegions: %v, h: %v, err: %v", resp, h, err)) + return resp, parseError(err, h) +} + +func (d *DedicatedClientDelegate) GetRegion(ctx context.Context, regionId string) (*dedicated.Commonv1beta1Region, error) { + resp, h, err := d.dc.RegionServiceAPI.RegionServiceGetRegion(ctx, regionId).Execute() + return resp, parseError(err, h) +} + +// func (d *DedicatedClientDelegate) CreateCluster(ctx context.Context, body *dedicated.TidbCloudOpenApidedicatedv1beta1Cluster) (*dedicated.TidbCloudOpenApidedicatedv1beta1Cluster, error) { +// r := d.dc.ClusterServiceAPI.ClusterServiceCreateCluster(ctx) +// if body != nil { +// r = r.Cluster(*body) +// } +// c, h, err := r.Execute() +// return c, parseError(err, h) +// } + +func parseError(err error, resp *http.Response) error { + defer func() { + if resp != nil { + resp.Body.Close() + } + }() + if err == nil { + return nil + } + if resp == nil { + return err + } + body, err1 := io.ReadAll(resp.Body) + if err1 != nil { + return err + } + path := "" + if resp.Request != nil { + path = fmt.Sprintf("[%s %s]", resp.Request.Method, resp.Request.URL.Path) + } + traceId := "" + if resp.Header.Get("X-Debug-Trace-Id") != "" { + traceId = resp.Header.Get("X-Debug-Trace-Id") + } + return fmt.Errorf("%s[%s][%s] %s", path, err.Error(), traceId, body) +} From bfa15afdb776ed81158e5965bdaebf6b4e9fe84c Mon Sep 17 00:00:00 2001 From: FingerLeader Date: Mon, 2 Dec 2024 17:09:50 +0800 Subject: [PATCH 2/6] update gitignore Signed-off-by: FingerLeader --- .gitignore | 1 + 1 file changed, 1 insertion(+) diff --git a/.gitignore b/.gitignore index 4b8f0ed..1369874 100644 --- a/.gitignore +++ b/.gitignore @@ -12,6 +12,7 @@ website/.bundle website/build website/node_modules .vagrant/ +.vscode/ *.backup ./*.tfstate .terraform/ From 44012ab45ee467642c176973de83a263cb34e740 Mon Sep 17 00:00:00 2001 From: FingerLeader Date: Mon, 2 Dec 2024 17:10:27 +0800 Subject: [PATCH 3/6] update gitignore Signed-off-by: FingerLeader --- .vscode/settings.json | 6 ------ 1 file changed, 6 deletions(-) delete mode 100644 .vscode/settings.json diff --git a/.vscode/settings.json b/.vscode/settings.json deleted file mode 100644 index b4d0c8d..0000000 --- a/.vscode/settings.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "cSpell.words": [ - "Apidedicated", - "Apidedicatedv1beta1" - ] -} \ No newline at end of file From f3aae640738c9af875730d55789a78ab92c42f60 Mon Sep 17 00:00:00 2001 From: FingerLeader Date: Wed, 4 Dec 2024 18:16:15 +0800 Subject: [PATCH 4/6] remove dedicated folder and delete page Signed-off-by: FingerLeader --- internal/provider/backup_resource.go | 6 +- internal/provider/backups_data_source.go | 6 +- internal/provider/cluster_resource.go | 6 +- .../provider/cluster_specs_data_source.go | 6 +- internal/provider/clusters_data_source.go | 6 +- .../dedicated/dedicated_cluster_resource.go | 923 ------------------ .../provider/dedicated_region_data_source.go | 7 +- .../provider/dedicated_regions_data_source.go | 25 +- internal/provider/import_resource.go | 6 +- internal/provider/projects_data_source.go | 6 +- internal/provider/provider.go | 16 +- internal/provider/restore_resource.go | 6 +- internal/provider/restores_data_source.go | 6 +- 13 files changed, 44 insertions(+), 981 deletions(-) delete mode 100644 internal/provider/dedicated/dedicated_cluster_resource.go diff --git a/internal/provider/backup_resource.go b/internal/provider/backup_resource.go index b933c37..276cb08 100644 --- a/internal/provider/backup_resource.go +++ b/internal/provider/backup_resource.go @@ -32,7 +32,7 @@ var _ resource.Resource = &backupResource{} var _ resource.ResourceWithImportState = &backupResource{} type backupResource struct { - provider *TidbcloudProvider + provider *tidbcloudProvider } func NewBackupResource() resource.Resource { @@ -50,9 +50,9 @@ func (r *backupResource) Configure(_ context.Context, req resource.ConfigureRequ } var ok bool - if r.provider, ok = req.ProviderData.(*TidbcloudProvider); !ok { + if r.provider, ok = req.ProviderData.(*tidbcloudProvider); !ok { resp.Diagnostics.AddError("Internal provider error", - fmt.Sprintf("Error in Configure: expected %T but got %T", TidbcloudProvider{}, req.ProviderData)) + fmt.Sprintf("Error in Configure: expected %T but got %T", tidbcloudProvider{}, req.ProviderData)) } } diff --git a/internal/provider/backups_data_source.go b/internal/provider/backups_data_source.go index 9a11e46..94e1a7f 100644 --- a/internal/provider/backups_data_source.go +++ b/internal/provider/backups_data_source.go @@ -37,7 +37,7 @@ type backup struct { var _ datasource.DataSource = &backupsDataSource{} type backupsDataSource struct { - provider *TidbcloudProvider + provider *tidbcloudProvider } func NewBackupsDataSource() datasource.DataSource { @@ -53,9 +53,9 @@ func (d *backupsDataSource) Configure(_ context.Context, req datasource.Configur return } var ok bool - if d.provider, ok = req.ProviderData.(*TidbcloudProvider); !ok { + if d.provider, ok = req.ProviderData.(*tidbcloudProvider); !ok { resp.Diagnostics.AddError("Internal provider error", - fmt.Sprintf("Error in Configure: expected %T but got %T", TidbcloudProvider{}, req.ProviderData)) + fmt.Sprintf("Error in Configure: expected %T but got %T", tidbcloudProvider{}, req.ProviderData)) } } diff --git a/internal/provider/cluster_resource.go b/internal/provider/cluster_resource.go index bf177ff..aef0e35 100644 --- a/internal/provider/cluster_resource.go +++ b/internal/provider/cluster_resource.go @@ -98,7 +98,7 @@ type ipAccess struct { } type clusterResource struct { - provider *TidbcloudProvider + provider *tidbcloudProvider } func NewClusterResource() resource.Resource { @@ -116,9 +116,9 @@ func (r *clusterResource) Configure(_ context.Context, req resource.ConfigureReq } var ok bool - if r.provider, ok = req.ProviderData.(*TidbcloudProvider); !ok { + if r.provider, ok = req.ProviderData.(*tidbcloudProvider); !ok { resp.Diagnostics.AddError("Internal provider error", - fmt.Sprintf("Error in Configure: expected %T but got %T", TidbcloudProvider{}, req.ProviderData)) + fmt.Sprintf("Error in Configure: expected %T but got %T", tidbcloudProvider{}, req.ProviderData)) } } diff --git a/internal/provider/cluster_specs_data_source.go b/internal/provider/cluster_specs_data_source.go index b97a6e3..6e26f2f 100644 --- a/internal/provider/cluster_specs_data_source.go +++ b/internal/provider/cluster_specs_data_source.go @@ -60,7 +60,7 @@ type storageSizeGiRange struct { var _ datasource.DataSource = &clusterSpecsDataSource{} type clusterSpecsDataSource struct { - provider *TidbcloudProvider + provider *tidbcloudProvider } func NewClusterSpecsDataSource() datasource.DataSource { @@ -76,9 +76,9 @@ func (d *clusterSpecsDataSource) Configure(_ context.Context, req datasource.Con return } var ok bool - if d.provider, ok = req.ProviderData.(*TidbcloudProvider); !ok { + if d.provider, ok = req.ProviderData.(*tidbcloudProvider); !ok { resp.Diagnostics.AddError("Internal provider error", - fmt.Sprintf("Error in Configure: expected %T but got %T", TidbcloudProvider{}, req.ProviderData)) + fmt.Sprintf("Error in Configure: expected %T but got %T", tidbcloudProvider{}, req.ProviderData)) } } diff --git a/internal/provider/clusters_data_source.go b/internal/provider/clusters_data_source.go index e376ad7..2c36724 100644 --- a/internal/provider/clusters_data_source.go +++ b/internal/provider/clusters_data_source.go @@ -65,7 +65,7 @@ type connectionVpcPeering struct { var _ datasource.DataSource = &clustersDataSource{} type clustersDataSource struct { - provider *TidbcloudProvider + provider *tidbcloudProvider } func NewClustersDataSource() datasource.DataSource { @@ -81,9 +81,9 @@ func (d *clustersDataSource) Configure(_ context.Context, req datasource.Configu return } var ok bool - if d.provider, ok = req.ProviderData.(*TidbcloudProvider); !ok { + if d.provider, ok = req.ProviderData.(*tidbcloudProvider); !ok { resp.Diagnostics.AddError("Internal provider error", - fmt.Sprintf("Error in Configure: expected %T but got %T", TidbcloudProvider{}, req.ProviderData)) + fmt.Sprintf("Error in Configure: expected %T but got %T", tidbcloudProvider{}, req.ProviderData)) } } diff --git a/internal/provider/dedicated/dedicated_cluster_resource.go b/internal/provider/dedicated/dedicated_cluster_resource.go deleted file mode 100644 index ef0882d..0000000 --- a/internal/provider/dedicated/dedicated_cluster_resource.go +++ /dev/null @@ -1,923 +0,0 @@ -package dedicated - -// import ( -// "context" -// "fmt" -// "net/http" -// "sort" -// "strings" -// "time" - -// clusterApi "github.com/c4pt0r/go-tidbcloud-sdk-v1/client/cluster" -// "github.com/hashicorp/terraform-plugin-framework/path" -// "github.com/hashicorp/terraform-plugin-framework/resource" -// "github.com/hashicorp/terraform-plugin-framework/resource/schema" -// "github.com/hashicorp/terraform-plugin-framework/resource/schema/int64planmodifier" -// "github.com/hashicorp/terraform-plugin-framework/resource/schema/objectplanmodifier" -// "github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier" -// "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringplanmodifier" -// "github.com/hashicorp/terraform-plugin-framework/types" -// "github.com/hashicorp/terraform-plugin-log/tflog" -// "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" -// "github.com/tidbcloud/terraform-provider-tidbcloud/tidbcloud" -// ) - -// // Enum: [AVAILABLE CREATING MODIFYING PAUSED RESUMING UNAVAILABLE IMPORTING MAINTAINING PAUSING] -// type dedicatedClusterStatus string - -// const ( -// dedicatedClusterStatusCreating dedicatedClusterStatus = "CREATING" -// dedicatedClusterStatusAvailable dedicatedClusterStatus = "AVAILABLE" -// dedicatedClusterStatusModifying dedicatedClusterStatus = "MODIFYING" -// dedicatedClusterStatusPaused dedicatedClusterStatus = "PAUSED" -// dedicatedClusterStatusResuming dedicatedClusterStatus = "RESUMING" -// dedicatedClusterStatusUnavailable dedicatedClusterStatus = "UNAVAILABLE" -// dedicatedClusterStatusImporting dedicatedClusterStatus = "IMPORTING" -// dedicatedClusterStatusMaintaining dedicatedClusterStatus = "MAINTAINING" -// dedicatedClusterStatusPausing dedicatedClusterStatus = "PAUSING" -// ) - -// // const ( -// // clusterServerlessCreateTimeout = 180 * time.Second -// // clusterServerlessCreateInterval = 2 * time.Second -// // clusterCreateTimeout = time.Hour -// // clusterCreateInterval = 60 * time.Second -// // clusterUpdateTimeout = time.Hour -// // clusterUpdateInterval = 20 * time.Second -// // ) - -// type dedicatedClusterResourceData struct { -// ClusterId types.String `tfsdk:"id"` -// ProjectId string `tfsdk:"project_id"` -// Name string `tfsdk:"name"` -// ClusterType string `tfsdk:"cluster_type"` -// CloudProvider string `tfsdk:"cloud_provider"` -// RegionId string `tfsdk:"region_id"` -// CreateTime types.String `tfsdk:"create_time"` -// Labels map[string]string `tfsdk:"labels"` -// RootPassword types.String `tfsdk:"root_password"` -// Status *dedicatedClusterStatus `tfsdk:"status"` -// } - -// type clusterConfig struct { -// Paused *bool `tfsdk:"paused"` -// RootPassword types.String `tfsdk:"root_password"` -// Port types.Int64 `tfsdk:"port"` -// Components *components `tfsdk:"components"` -// IPAccessList []ipAccess `tfsdk:"ip_access_list"` -// } - -// type components struct { -// TiDB *componentTiDB `tfsdk:"tidb"` -// TiKV *componentTiKV `tfsdk:"tikv"` -// TiFlash *componentTiFlash `tfsdk:"tiflash"` -// } - -// type componentTiDB struct { -// NodeSize string `tfsdk:"node_size"` -// NodeQuantity int32 `tfsdk:"node_quantity"` -// } - -// type componentTiKV struct { -// NodeSize string `tfsdk:"node_size"` -// StorageSizeGib int32 `tfsdk:"storage_size_gib"` -// NodeQuantity int32 `tfsdk:"node_quantity"` -// } - -// type componentTiFlash struct { -// NodeSize string `tfsdk:"node_size"` -// StorageSizeGib int32 `tfsdk:"storage_size_gib"` -// NodeQuantity int32 `tfsdk:"node_quantity"` -// } - -// type ipAccess struct { -// CIDR string `tfsdk:"cidr"` -// Description string `tfsdk:"description"` -// } - -// type clusterResource struct { -// provider *tidbcloudProvider -// } - -// func NewClusterResource() resource.Resource { -// return &clusterResource{} -// } - -// func (r *clusterResource) Metadata(_ context.Context, req resource.MetadataRequest, resp *resource.MetadataResponse) { -// resp.TypeName = req.ProviderTypeName + "_cluster" -// } - -// func (r *clusterResource) Configure(_ context.Context, req resource.ConfigureRequest, resp *resource.ConfigureResponse) { -// // Prevent panic if the provider has not been configured. -// if req.ProviderData == nil { -// return -// } - -// var ok bool -// if r.provider, ok = req.ProviderData.(*tidbcloudProvider); !ok { -// resp.Diagnostics.AddError("Internal provider error", -// fmt.Sprintf("Error in Configure: expected %T but got %T", tidbcloudProvider{}, req.ProviderData)) -// } -// } - -// func (r *clusterResource) Schema(_ context.Context, _ resource.SchemaRequest, resp *resource.SchemaResponse) { -// resp.Schema = schema.Schema{ -// MarkdownDescription: "cluster resource", -// Attributes: map[string]schema.Attribute{ -// "project_id": schema.StringAttribute{ -// MarkdownDescription: "The ID of the project. You can get the project ID from [tidbcloud_projects datasource](../data-sources/projects.md).", -// Required: true, -// }, -// "name": schema.StringAttribute{ -// MarkdownDescription: "The name of the cluster.", -// Required: true, -// }, -// "id": schema.StringAttribute{ -// Computed: true, -// MarkdownDescription: "The ID of the cluster.", -// PlanModifiers: []planmodifier.String{ -// stringplanmodifier.UseStateForUnknown(), -// }, -// }, -// "cluster_type": schema.StringAttribute{ -// MarkdownDescription: "Enum: \"DEDICATED\" \"DEVELOPER\", The cluster type.", -// Required: true, -// }, -// "cloud_provider": schema.StringAttribute{ -// MarkdownDescription: "Enum: \"AWS\" \"GCP\", The cloud provider on which your TiDB cluster is hosted.", -// Required: true, -// }, -// "create_timestamp": schema.StringAttribute{ -// MarkdownDescription: "The creation time of the cluster in Unix timestamp seconds (epoch time).", -// Computed: true, -// PlanModifiers: []planmodifier.String{ -// stringplanmodifier.UseStateForUnknown(), -// }, -// }, -// "region": schema.StringAttribute{ -// MarkdownDescription: "the region value should match the cloud provider's region code. You can get the complete list of available regions from the [tidbcloud_cluster_specs datasource](../data-sources/cluster_specs.md).", -// Required: true, -// }, -// "status": schema.SingleNestedAttribute{ -// MarkdownDescription: "The status of the cluster.", -// Computed: true, -// PlanModifiers: []planmodifier.Object{ -// clusterResourceStatus(), -// }, -// Attributes: map[string]schema.Attribute{ -// "tidb_version": schema.StringAttribute{ -// MarkdownDescription: "TiDB version.", -// Computed: true, -// PlanModifiers: []planmodifier.String{ -// stringplanmodifier.UseStateForUnknown(), -// }, -// }, -// "cluster_status": schema.StringAttribute{ -// MarkdownDescription: "Status of the cluster.", -// Computed: true, -// }, -// "connection_strings": schema.SingleNestedAttribute{ -// MarkdownDescription: "Connection strings.", -// Computed: true, -// PlanModifiers: []planmodifier.Object{ -// objectplanmodifier.UseStateForUnknown(), -// }, -// Attributes: map[string]schema.Attribute{ -// "default_user": schema.StringAttribute{ -// MarkdownDescription: "The default TiDB user for connection.", -// Computed: true, -// PlanModifiers: []planmodifier.String{ -// stringplanmodifier.UseStateForUnknown(), -// }, -// }, -// "standard": schema.SingleNestedAttribute{ -// MarkdownDescription: "Standard connection string.", -// Computed: true, -// PlanModifiers: []planmodifier.Object{ -// objectplanmodifier.UseStateForUnknown(), -// }, -// Attributes: map[string]schema.Attribute{ -// "host": schema.StringAttribute{ -// MarkdownDescription: "The host of standard connection.", -// Computed: true, -// PlanModifiers: []planmodifier.String{ -// stringplanmodifier.UseStateForUnknown(), -// }, -// }, -// "port": schema.Int64Attribute{ -// MarkdownDescription: "The TiDB port for connection. The port must be in the range of 1024-65535 except 10080.", -// Computed: true, -// PlanModifiers: []planmodifier.Int64{ -// int64planmodifier.UseStateForUnknown(), -// }, -// }, -// }, -// }, -// "vpc_peering": schema.SingleNestedAttribute{ -// MarkdownDescription: "VPC peering connection string.", -// Computed: true, -// PlanModifiers: []planmodifier.Object{ -// objectplanmodifier.UseStateForUnknown(), -// }, -// Attributes: map[string]schema.Attribute{ -// "host": schema.StringAttribute{ -// MarkdownDescription: "The host of VPC peering connection.", -// Computed: true, -// PlanModifiers: []planmodifier.String{ -// stringplanmodifier.UseStateForUnknown(), -// }, -// }, -// "port": schema.Int64Attribute{ -// MarkdownDescription: "The TiDB port for connection. The port must be in the range of 1024-65535 except 10080.", -// Computed: true, -// PlanModifiers: []planmodifier.Int64{ -// int64planmodifier.UseStateForUnknown(), -// }, -// }, -// }, -// }, -// }, -// }, -// }, -// }, -// "config": schema.SingleNestedAttribute{ -// MarkdownDescription: "The configuration of the cluster.", -// Required: true, -// Attributes: map[string]schema.Attribute{ -// "root_password": schema.StringAttribute{ -// MarkdownDescription: "The root password to access the cluster. It must be 8-64 characters.", -// Optional: true, -// }, -// "port": schema.Int64Attribute{ -// MarkdownDescription: "The TiDB port for connection. The port must be in the range of 1024-65535 except 10080, 4000 in default.\n" + -// " - For a Serverless Tier cluster, only port 4000 is available.", -// Optional: true, -// Computed: true, -// PlanModifiers: []planmodifier.Int64{ -// int64planmodifier.UseStateForUnknown(), -// }, -// }, -// "paused": schema.BoolAttribute{ -// MarkdownDescription: "lag that indicates whether the cluster is paused. true means to pause the cluster, and false means to resume the cluster.\n" + -// " - The cluster can be paused only when the cluster_status is \"AVAILABLE\"." + -// " - The cluster can be resumed only when the cluster_status is \"PAUSED\".", -// Optional: true, -// }, -// "components": schema.SingleNestedAttribute{ -// MarkdownDescription: "The components of the cluster.\n" + -// " - For a Serverless Tier cluster, the components value can not be set." + -// " - For a Dedicated Tier cluster, the components value must be set.", -// Optional: true, -// Computed: true, -// PlanModifiers: []planmodifier.Object{ -// objectplanmodifier.UseStateForUnknown(), -// }, -// Attributes: map[string]schema.Attribute{ -// "tidb": schema.SingleNestedAttribute{ -// MarkdownDescription: "The TiDB component of the cluster", -// Required: true, -// PlanModifiers: []planmodifier.Object{ -// objectplanmodifier.UseStateForUnknown(), -// }, -// Attributes: map[string]schema.Attribute{ -// "node_size": schema.StringAttribute{ -// Required: true, -// MarkdownDescription: "The size of the TiDB component in the cluster, You can get the available node size of each region from the [tidbcloud_cluster_specs datasource](../data-sources/cluster_specs.md).\n" + -// " - If the vCPUs of TiDB or TiKV component is 2 or 4, then their vCPUs need to be the same.\n" + -// " - If the vCPUs of TiDB or TiKV component is 2 or 4, then the cluster does not support TiFlash.\n" + -// " - Can not modify node_size of an existing cluster.", -// PlanModifiers: []planmodifier.String{ -// stringplanmodifier.UseStateForUnknown(), -// }, -// }, -// "node_quantity": schema.Int64Attribute{ -// MarkdownDescription: "The number of nodes in the cluster. You can get the minimum and step of a node quantity from the [tidbcloud_cluster_specs datasource](../data-sources/cluster_specs.md).", -// Required: true, -// PlanModifiers: []planmodifier.Int64{ -// int64planmodifier.UseStateForUnknown(), -// }, -// }, -// }, -// }, -// "tikv": schema.SingleNestedAttribute{ -// MarkdownDescription: "The TiKV component of the cluster", -// Required: true, -// PlanModifiers: []planmodifier.Object{ -// objectplanmodifier.UseStateForUnknown(), -// }, -// Attributes: map[string]schema.Attribute{ -// "node_size": schema.StringAttribute{ -// MarkdownDescription: "The size of the TiKV component in the cluster, You can get the available node size of each region from the [tidbcloud_cluster_specs datasource](../data-sources/cluster_specs.md).\n" + -// " - If the vCPUs of TiDB or TiKV component is 2 or 4, then their vCPUs need to be the same.\n" + -// " - If the vCPUs of TiDB or TiKV component is 2 or 4, then the cluster does not support TiFlash.\n" + -// " - Can not modify node_size of an existing cluster.", -// Required: true, -// PlanModifiers: []planmodifier.String{ -// stringplanmodifier.UseStateForUnknown(), -// }, -// }, -// "storage_size_gib": schema.Int64Attribute{ -// MarkdownDescription: "The storage size of a node in the cluster. You can get the minimum and maximum of storage size from the [tidbcloud_cluster_specs datasource](../data-sources/cluster_specs.md).\n" + -// " - Can not modify storage_size_gib of an existing cluster.", -// Required: true, -// PlanModifiers: []planmodifier.Int64{ -// int64planmodifier.UseStateForUnknown(), -// }, -// }, -// "node_quantity": schema.Int64Attribute{ -// MarkdownDescription: "The number of nodes in the cluster. You can get the minimum and step of a node quantity from the [tidbcloud_cluster_specs datasource](../data-sources/cluster_specs.md).\n" + -// " - TiKV do not support decreasing node quantity.\n" + -// " - The node_quantity of TiKV must be a multiple of 3.", -// Required: true, -// PlanModifiers: []planmodifier.Int64{ -// int64planmodifier.UseStateForUnknown(), -// }, -// }, -// }, -// }, -// "tiflash": schema.SingleNestedAttribute{ -// MarkdownDescription: "The TiFlash component of the cluster.", -// Optional: true, -// PlanModifiers: []planmodifier.Object{ -// objectplanmodifier.UseStateForUnknown(), -// }, -// Attributes: map[string]schema.Attribute{ -// "node_size": schema.StringAttribute{ -// MarkdownDescription: "The size of the TiFlash component in the cluster, You can get the available node size of each region from the [tidbcloud_cluster_specs datasource](../data-sources/cluster_specs.md).\n" + -// " - Can not modify node_size of an existing cluster.", -// Required: true, -// PlanModifiers: []planmodifier.String{ -// stringplanmodifier.UseStateForUnknown(), -// }, -// }, -// "storage_size_gib": schema.Int64Attribute{ -// MarkdownDescription: "The storage size of a node in the cluster. You can get the minimum and maximum of storage size from the [tidbcloud_cluster_specs datasource](../data-sources/cluster_specs.md).\n" + -// " - Can not modify storage_size_gib of an existing cluster.", -// Required: true, -// PlanModifiers: []planmodifier.Int64{ -// int64planmodifier.UseStateForUnknown(), -// }, -// }, -// "node_quantity": schema.Int64Attribute{ -// MarkdownDescription: "The number of nodes in the cluster. You can get the minimum and step of a node quantity from the [tidbcloud_cluster_specs datasource](../data-sources/cluster_specs.md).\n" + -// " - TiFlash do not support decreasing node quantity.", -// Required: true, -// PlanModifiers: []planmodifier.Int64{ -// int64planmodifier.UseStateForUnknown(), -// }, -// }, -// }, -// }, -// }, -// }, -// "ip_access_list": schema.ListNestedAttribute{ -// MarkdownDescription: "A list of IP addresses and Classless Inter-Domain Routing (CIDR) addresses that are allowed to access the TiDB Cloud cluster via [standard connection](https://docs.pingcap.com/tidbcloud/connect-to-tidb-cluster#connect-via-standard-connection).", -// Optional: true, -// NestedObject: schema.NestedAttributeObject{ -// Attributes: map[string]schema.Attribute{ -// "cidr": schema.StringAttribute{ -// MarkdownDescription: "The IP address or CIDR range that you want to add to the cluster's IP access list.", -// Required: true, -// }, -// "description": schema.StringAttribute{ -// MarkdownDescription: "Description that explains the purpose of the entry.", -// Required: true, -// }, -// }, -// }, -// }, -// }, -// }, -// }, -// } -// } - -// func (r clusterResource) Create(ctx context.Context, req resource.CreateRequest, resp *resource.CreateResponse) { -// if !r.provider.configured { -// resp.Diagnostics.AddError( -// "Provider not configured", -// "The provider hasn't been configured before apply, likely because it depends on an unknown value from another resource. This leads to weird stuff happening, so we'd prefer if you didn't do that. Thanks!", -// ) -// return -// } - -// // get data from config -// var data clusterResourceData -// diags := req.Config.Get(ctx, &data) -// resp.Diagnostics.Append(diags...) -// if resp.Diagnostics.HasError() { -// return -// } - -// // for Serverless cluster, components is not allowed. or plan and state may be inconsistent -// if data.ClusterType == dev { -// if data.Config.Components != nil { -// resp.Diagnostics.AddError("Create Error", fmt.Sprintf("components is not allowed in %s cluster_type", dev)) -// return -// } -// } - -// // for DEDICATED cluster, components is required. -// if data.ClusterType == ded { -// if data.Config.Components == nil { -// resp.Diagnostics.AddError("Create Error", fmt.Sprintf("components is required in %s cluster_type", ded)) -// return -// } -// } - -// // write logs using the tflog package -// // see https://pkg.go.dev/github.com/hashicorp/terraform-plugin-log/tflog -// tflog.Trace(ctx, "created cluster_resource") -// createClusterParams := clusterApi.NewCreateClusterParams().WithProjectID(data.ProjectId).WithBody(buildCreateClusterBody(data)) -// createClusterResp, err := r.provider.client.CreateCluster(createClusterParams) -// if err != nil { -// resp.Diagnostics.AddError("Create Error", fmt.Sprintf("Unable to call CreateCluster, got error: %s", err)) -// return -// } -// // set clusterId. other computed attributes are not returned by create, they will be set when refresh -// clusterId := *createClusterResp.Payload.ID -// data.ClusterId = types.StringValue(clusterId) -// if r.provider.sync { -// var cluster *clusterApi.GetClusterOKBody -// if data.ClusterType == dev { -// tflog.Info(ctx, "wait serverless cluster ready") -// cluster, err = WaitClusterReady(ctx, clusterServerlessCreateTimeout, clusterServerlessCreateInterval, data.ProjectId, clusterId, r.provider.client) -// if err != nil { -// resp.Diagnostics.AddError( -// "Cluster creation failed", -// fmt.Sprintf("Cluster is not ready, get error: %s", err), -// ) -// return -// } -// } else { -// tflog.Info(ctx, "wait dedicated cluster ready") -// cluster, err = WaitClusterReady(ctx, clusterCreateTimeout, clusterCreateInterval, data.ProjectId, clusterId, r.provider.client) -// if err != nil { -// resp.Diagnostics.AddError( -// "Cluster creation failed", -// fmt.Sprintf("Cluster is not ready, get error: %s", err), -// ) -// return -// } -// } -// refreshClusterResourceData(ctx, cluster, &data) -// } else { -// // we refresh in create for any unknown value. if someone has other opinions which is better, he can delete the refresh logic -// tflog.Trace(ctx, "read cluster_resource") -// getClusterParams := clusterApi.NewGetClusterParams().WithProjectID(data.ProjectId).WithClusterID(data.ClusterId.ValueString()) -// getClusterResp, err := r.provider.client.GetCluster(getClusterParams) -// if err != nil { -// resp.Diagnostics.AddError("Create Error", fmt.Sprintf("Unable to call GetCluster, got error: %s", err)) -// return -// } -// refreshClusterResourceData(ctx, getClusterResp.Payload, &data) -// } - -// // save into the Terraform state. -// diags = resp.State.Set(ctx, &data) -// resp.Diagnostics.Append(diags...) -// } - -// func buildCreateClusterBody(data clusterResourceData) clusterApi.CreateClusterBody { -// // required -// rootPassWord := data.Config.RootPassword.ValueString() -// payload := clusterApi.CreateClusterBody{ -// Name: &data.Name, -// ClusterType: &data.ClusterType, -// CloudProvider: &data.CloudProvider, -// Region: &data.Region, -// Config: &clusterApi.CreateClusterParamsBodyConfig{ -// RootPassword: &rootPassWord, -// }, -// } - -// // optional -// if data.Config.Components != nil { -// tidb := data.Config.Components.TiDB -// tikv := data.Config.Components.TiKV -// tiflash := data.Config.Components.TiFlash - -// components := &clusterApi.CreateClusterParamsBodyConfigComponents{ -// Tidb: &clusterApi.CreateClusterParamsBodyConfigComponentsTidb{ -// NodeSize: &tidb.NodeSize, -// NodeQuantity: &tidb.NodeQuantity, -// }, -// Tikv: &clusterApi.CreateClusterParamsBodyConfigComponentsTikv{ -// NodeSize: &tikv.NodeSize, -// StorageSizeGib: &tikv.StorageSizeGib, -// NodeQuantity: &tikv.NodeQuantity, -// }, -// } -// // tiflash is optional -// if tiflash != nil { -// components.Tiflash = &clusterApi.CreateClusterParamsBodyConfigComponentsTiflash{ -// NodeSize: &tiflash.NodeSize, -// StorageSizeGib: &tiflash.StorageSizeGib, -// NodeQuantity: &tiflash.NodeQuantity, -// } -// } - -// payload.Config.Components = components -// } -// if data.Config.IPAccessList != nil { -// var IPAccessList []*clusterApi.CreateClusterParamsBodyConfigIPAccessListItems0 -// for _, key := range data.Config.IPAccessList { -// cidr := key.CIDR -// IPAccessList = append(IPAccessList, &clusterApi.CreateClusterParamsBodyConfigIPAccessListItems0{ -// Cidr: &cidr, -// Description: key.Description, -// }) -// } -// payload.Config.IPAccessList = IPAccessList -// } -// if !data.Config.Port.IsNull() && !data.Config.Port.IsUnknown() { -// payload.Config.Port = int32(data.Config.Port.ValueInt64()) -// } - -// return payload -// } - -// func (r clusterResource) Read(ctx context.Context, req resource.ReadRequest, resp *resource.ReadResponse) { -// var projectId, clusterId string - -// resp.Diagnostics.Append(req.State.GetAttribute(ctx, path.Root("project_id"), &projectId)...) -// resp.Diagnostics.Append(req.State.GetAttribute(ctx, path.Root("id"), &clusterId)...) -// if resp.Diagnostics.HasError() { -// return -// } - -// // call read api -// tflog.Trace(ctx, "read cluster_resource") -// getClusterParams := clusterApi.NewGetClusterParams().WithProjectID(projectId).WithClusterID(clusterId) -// getClusterResp, err := r.provider.client.GetCluster(getClusterParams) -// if err != nil { -// resp.Diagnostics.AddError("Read Error", fmt.Sprintf("Unable to call GetClusterById, got error: %s", err)) -// return -// } - -// // refresh data with read result -// var data clusterResourceData -// // root_password, ip_access_list and pause will not return by read api, so we just use state's value even it changed on console! -// // use types.String in case ImportState method throw unhandled null value -// var rootPassword types.String -// var iPAccessList []ipAccess -// var paused *bool -// resp.Diagnostics.Append(req.State.GetAttribute(ctx, path.Root("config").AtName("root_password"), &rootPassword)...) -// resp.Diagnostics.Append(req.State.GetAttribute(ctx, path.Root("config").AtName("ip_access_list"), &iPAccessList)...) -// resp.Diagnostics.Append(req.State.GetAttribute(ctx, path.Root("config").AtName("paused"), &paused)...) -// data.Config.RootPassword = rootPassword -// data.Config.IPAccessList = iPAccessList -// data.Config.Paused = paused -// refreshClusterResourceData(ctx, getClusterResp.Payload, &data) - -// // save into the Terraform state -// diags := resp.State.Set(ctx, &data) -// resp.Diagnostics.Append(diags...) -// } - -// func refreshClusterResourceData(ctx context.Context, resp *clusterApi.GetClusterOKBody, data *clusterResourceData) { -// // must return -// data.Name = resp.Name -// data.ClusterId = types.StringValue(*resp.ID) -// data.Region = resp.Region -// data.ProjectId = *resp.ProjectID -// data.ClusterType = resp.ClusterType -// data.CloudProvider = resp.CloudProvider -// data.CreateTimestamp = types.StringValue(resp.CreateTimestamp) -// data.Config.Port = types.Int64Value(int64(resp.Config.Port)) -// tidb := resp.Config.Components.Tidb -// tikv := resp.Config.Components.Tikv -// data.Config.Components = &components{ -// TiDB: &componentTiDB{ -// NodeSize: *tidb.NodeSize, -// NodeQuantity: *tidb.NodeQuantity, -// }, -// TiKV: &componentTiKV{ -// NodeSize: *tikv.NodeSize, -// NodeQuantity: *tikv.NodeQuantity, -// StorageSizeGib: *tikv.StorageSizeGib, -// }, -// } - -// var standard connectionStandard -// var vpcPeering connectionVpcPeering -// if resp.Status.ConnectionStrings.Standard != nil { -// standard.Host = resp.Status.ConnectionStrings.Standard.Host -// standard.Port = resp.Status.ConnectionStrings.Standard.Port -// } -// if resp.Status.ConnectionStrings.VpcPeering != nil { -// vpcPeering.Host = resp.Status.ConnectionStrings.VpcPeering.Host -// vpcPeering.Port = resp.Status.ConnectionStrings.VpcPeering.Port -// } -// data.Status = &clusterStatusDataSource{ -// TidbVersion: resp.Status.TidbVersion, -// ClusterStatus: types.StringValue(resp.Status.ClusterStatus), -// ConnectionStrings: &connection{ -// DefaultUser: resp.Status.ConnectionStrings.DefaultUser, -// Standard: &standard, -// VpcPeering: &vpcPeering, -// }, -// } -// // may return -// tiflash := resp.Config.Components.Tiflash -// if tiflash != nil { -// data.Config.Components.TiFlash = &componentTiFlash{ -// NodeSize: *tiflash.NodeSize, -// NodeQuantity: *tiflash.NodeQuantity, -// StorageSizeGib: *tiflash.StorageSizeGib, -// } -// } - -// // not return -// // IPAccessList, and password and pause will not update for it will not return by read api - -// } - -// // Update since open api is patch without check for the invalid parameter. we do a lot of check here to avoid inconsistency -// // check the date can't be updated -// // if plan and state is different, we can execute updated -// func (r clusterResource) Update(ctx context.Context, req resource.UpdateRequest, resp *resource.UpdateResponse) { -// // get plan -// var data clusterResourceData -// diags := req.Plan.Get(ctx, &data) -// resp.Diagnostics.Append(diags...) -// if resp.Diagnostics.HasError() { -// return -// } -// // get state -// var state clusterResourceData -// diags = req.State.Get(ctx, &state) -// resp.Diagnostics.Append(diags...) -// if resp.Diagnostics.HasError() { -// return -// } - -// // Severless can not be changed now -// if data.ClusterType == dev { -// resp.Diagnostics.AddError( -// "Update error", -// "Unable to update Serverless cluster", -// ) -// return -// } - -// // only components and paused can be changed now -// if data.Name != state.Name || data.ClusterType != state.ClusterType || data.Region != state.Region || data.CloudProvider != state.CloudProvider || -// data.ProjectId != state.ProjectId || data.ClusterId != state.ClusterId { -// resp.Diagnostics.AddError( -// "Update error", -// "You may update the name,cluster_type,region,cloud_provider or projectId. They can not be changed, only components can be changed now", -// ) -// return -// } -// if !data.Config.Port.IsNull() && !data.Config.Port.IsNull() && data.Config.Port.ValueInt64() != state.Config.Port.ValueInt64() { -// resp.Diagnostics.AddError( -// "Update error", -// "port can not be changed, only components can be changed now", -// ) -// return -// } -// if data.Config.IPAccessList != nil { -// // You cannot add an IP access list to an existing cluster without an IP rule. -// if len(state.Config.IPAccessList) == 0 { -// resp.Diagnostics.AddError( -// "Update error", -// "ip_access_list can not be added to the existing cluster.", -// ) -// return -// } - -// // You cannot insert or delete IP rule. -// if len(data.Config.IPAccessList) != len(state.Config.IPAccessList) { -// resp.Diagnostics.AddError( -// "Update error", -// "ip_access_list can not be changed, only components can be changed now", -// ) -// return -// } - -// // You cannot update the IP rule. -// newIPAccessList := make([]ipAccess, len(data.Config.IPAccessList)) -// copy(newIPAccessList, data.Config.IPAccessList) -// sort.Slice(newIPAccessList, func(i, j int) bool { -// return newIPAccessList[i].CIDR < newIPAccessList[j].CIDR -// }) - -// currentIPAccessList := make([]ipAccess, len(state.Config.IPAccessList)) -// copy(currentIPAccessList, state.Config.IPAccessList) -// sort.Slice(currentIPAccessList, func(i, j int) bool { -// return currentIPAccessList[i].CIDR < currentIPAccessList[j].CIDR -// }) - -// for index, key := range newIPAccessList { -// if currentIPAccessList[index].CIDR != key.CIDR || currentIPAccessList[index].Description != key.Description { -// resp.Diagnostics.AddError( -// "Update error", -// "ip_access_list can not be changed, only components can be changed now", -// ) -// return -// } -// } -// } else { -// // You cannot remove the IP access list. -// if len(state.Config.IPAccessList) > 0 { -// resp.Diagnostics.AddError( -// "Update error", -// "ip_access_list can not be changed, only components can be changed now", -// ) -// return -// } -// } - -// // check Components -// tidb := data.Config.Components.TiDB -// tikv := data.Config.Components.TiKV -// tiflash := data.Config.Components.TiFlash -// tidbState := state.Config.Components.TiDB -// tikvState := state.Config.Components.TiKV -// tiflashState := state.Config.Components.TiFlash -// if tidb.NodeSize != tidbState.NodeSize { -// resp.Diagnostics.AddError( -// "Update error", -// "tidb node_size can't be changed", -// ) -// return -// } -// if tikv.NodeSize != tikvState.NodeSize || tikv.StorageSizeGib != tikvState.StorageSizeGib { -// resp.Diagnostics.AddError( -// "Update error", -// "tikv node_size or storage_size_gib can't be changed", -// ) -// return -// } -// if tiflash != nil && tiflashState != nil { -// // if cluster have tiflash already, then we can't specify NodeSize and StorageSizeGib -// if tiflash.NodeSize != tiflashState.NodeSize || tiflash.StorageSizeGib != tiflashState.StorageSizeGib { -// resp.Diagnostics.AddError( -// "Update error", -// "tiflash node_size or storage_size_gib can't be changed", -// ) -// return -// } -// } - -// // build UpdateClusterBody -// var updateClusterBody clusterApi.UpdateClusterBody -// updateClusterBody.Config = &clusterApi.UpdateClusterParamsBodyConfig{} -// // build paused -// if data.Config.Paused != nil { -// if state.Config.Paused == nil || *data.Config.Paused != *state.Config.Paused { -// updateClusterBody.Config.Paused = data.Config.Paused -// } -// } -// // build components -// var isComponentsChanged = false -// if tidb.NodeQuantity != tidbState.NodeQuantity || tikv.NodeQuantity != tikvState.NodeQuantity { -// isComponentsChanged = true -// } - -// var componentTiFlash *clusterApi.UpdateClusterParamsBodyConfigComponentsTiflash -// if tiflash != nil { -// if tiflashState == nil { -// isComponentsChanged = true -// componentTiFlash = &clusterApi.UpdateClusterParamsBodyConfigComponentsTiflash{ -// NodeQuantity: &tiflash.NodeQuantity, -// NodeSize: &tiflash.NodeSize, -// StorageSizeGib: &tiflash.StorageSizeGib, -// } -// } else if tiflash.NodeQuantity != tiflashState.NodeQuantity { -// isComponentsChanged = true -// // NodeSize can't be changed -// componentTiFlash = &clusterApi.UpdateClusterParamsBodyConfigComponentsTiflash{ -// NodeQuantity: &tiflash.NodeQuantity, -// } -// } -// } - -// if isComponentsChanged { -// updateClusterBody.Config.Components = &clusterApi.UpdateClusterParamsBodyConfigComponents{ -// Tidb: &clusterApi.UpdateClusterParamsBodyConfigComponentsTidb{ -// NodeQuantity: &tidb.NodeQuantity, -// }, -// Tikv: &clusterApi.UpdateClusterParamsBodyConfigComponentsTikv{ -// NodeQuantity: &tikv.NodeQuantity, -// }, -// Tiflash: componentTiFlash, -// } -// } - -// tflog.Trace(ctx, "update cluster_resource") -// updateClusterParams := clusterApi.NewUpdateClusterParams().WithProjectID(data.ProjectId).WithClusterID(data.ClusterId.ValueString()).WithBody(updateClusterBody) -// _, err := r.provider.client.UpdateCluster(updateClusterParams) -// if err != nil { -// resp.Diagnostics.AddError("Update Error", fmt.Sprintf("Unable to call UpdateClusterById, got error: %s", err)) -// return -// } - -// if r.provider.sync { -// tflog.Info(ctx, "wait cluster ready") -// cluster, err := WaitClusterReady(ctx, clusterUpdateTimeout, clusterUpdateInterval, data.ProjectId, data.ClusterId.ValueString(), r.provider.client) -// if err != nil { -// resp.Diagnostics.AddError( -// "Cluster update failed", -// fmt.Sprintf("Cluster is not ready, get error: %s", err), -// ) -// return -// } -// refreshClusterResourceData(ctx, cluster, &data) -// } else { -// // we refresh for any unknown value. if someone has other opinions which is better, he can delete the refresh logic -// tflog.Trace(ctx, "read cluster_resource") -// getClusterResp, err := r.provider.client.GetCluster(clusterApi.NewGetClusterParams().WithProjectID(data.ProjectId).WithClusterID(data.ClusterId.ValueString())) -// if err != nil { -// resp.Diagnostics.AddError("Update Error", fmt.Sprintf("Unable to call GetClusterById, got error: %s", err)) -// return -// } -// refreshClusterResourceData(ctx, getClusterResp.Payload, &data) -// } - -// // save into the Terraform state. -// diags = resp.State.Set(ctx, &data) -// resp.Diagnostics.Append(diags...) -// } - -// func (r clusterResource) Delete(ctx context.Context, req resource.DeleteRequest, resp *resource.DeleteResponse) { -// var data clusterResourceData - -// diags := req.State.Get(ctx, &data) -// resp.Diagnostics.Append(diags...) - -// if resp.Diagnostics.HasError() { -// return -// } - -// tflog.Trace(ctx, "delete cluster_resource") -// _, err := r.provider.client.DeleteCluster(clusterApi.NewDeleteClusterParams().WithProjectID(data.ProjectId).WithClusterID(data.ClusterId.ValueString())) -// if err != nil { -// resp.Diagnostics.AddError("Delete Error", fmt.Sprintf("Unable to call DeleteClusterById, got error: %s", err)) -// return -// } -// } - -// func (r clusterResource) ImportState(ctx context.Context, req resource.ImportStateRequest, resp *resource.ImportStateResponse) { -// idParts := strings.Split(req.ID, ",") - -// if len(idParts) != 2 || idParts[0] == "" || idParts[1] == "" { -// resp.Diagnostics.AddError( -// "Unexpected Import Identifier", -// fmt.Sprintf("Expected import identifier with format: project_id,cluster_id. Got: %q", req.ID), -// ) -// return -// } - -// resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root("project_id"), idParts[0])...) -// resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root("id"), idParts[1])...) -// } - -// func WaitClusterReady(ctx context.Context, timeout time.Duration, interval time.Duration, projectId, clusterId string, -// client tidbcloud.TiDBCloudClient) (*clusterApi.GetClusterOKBody, error) { -// stateConf := &retry.StateChangeConf{ -// Pending: []string{ -// string(clusterStatusCreating), -// string(clusterStatusModifying), -// string(clusterStatusResuming), -// string(clusterStatusUnavailable), -// string(clusterStatusImporting), -// string(clusterStatusPausing), -// }, -// Target: []string{ -// string(clusterStatusAvailable), -// string(clusterStatusPaused), -// string(clusterStatusMaintaining), -// }, -// Timeout: timeout, -// MinTimeout: 500 * time.Millisecond, -// PollInterval: interval, -// Refresh: clusterStateRefreshFunc(ctx, projectId, clusterId, client), -// } - -// outputRaw, err := stateConf.WaitForStateContext(ctx) - -// if output, ok := outputRaw.(*clusterApi.GetClusterOKBody); ok { -// return output, err -// } -// return nil, err -// } - -// func clusterStateRefreshFunc(ctx context.Context, projectId, clusterId string, -// client tidbcloud.TiDBCloudClient) retry.StateRefreshFunc { -// return func() (interface{}, string, error) { -// param := clusterApi.NewGetClusterParams().WithProjectID(projectId).WithClusterID(clusterId).WithContext(ctx) -// getClusterResp, err := client.GetCluster(param) -// if err != nil { -// tflog.Warn(ctx, fmt.Sprintf("get cluster error: %s", err)) -// if getClusterResp != nil && getClusterResp.Code() < http.StatusInternalServerError { -// return nil, "", err -// } else { -// // regard as not found and retry again. Default is 20 times -// return nil, "", nil -// } -// } -// return getClusterResp.Payload, getClusterResp.Payload.Status.ClusterStatus, nil -// } -// } diff --git a/internal/provider/dedicated_region_data_source.go b/internal/provider/dedicated_region_data_source.go index b984640..f8d55f0 100644 --- a/internal/provider/dedicated_region_data_source.go +++ b/internal/provider/dedicated_region_data_source.go @@ -8,7 +8,6 @@ import ( "github.com/hashicorp/terraform-plugin-framework/datasource/schema" "github.com/hashicorp/terraform-plugin-framework/types" "github.com/hashicorp/terraform-plugin-log/tflog" - // "github.com/tidbcloud/terraform-provider-tidbcloud/internal/provider" ) type dedicatedRegion struct { @@ -20,7 +19,7 @@ type dedicatedRegion struct { var _ datasource.DataSource = &dedicatedRegionDataSource{} type dedicatedRegionDataSource struct { - provider *TidbcloudProvider + provider *tidbcloudProvider } func NewDedicatedRegionDataSource() datasource.DataSource { @@ -36,9 +35,9 @@ func (d *dedicatedRegionDataSource) Configure(_ context.Context, req datasource. return } var ok bool - if d.provider, ok = req.ProviderData.(*TidbcloudProvider); !ok { + if d.provider, ok = req.ProviderData.(*tidbcloudProvider); !ok { resp.Diagnostics.AddError("Internal provider error", - fmt.Sprintf("Error in Configure: expected %T but got %T", TidbcloudProvider{}, req.ProviderData)) + fmt.Sprintf("Error in Configure: expected %T but got %T", tidbcloudProvider{}, req.ProviderData)) } } diff --git a/internal/provider/dedicated_regions_data_source.go b/internal/provider/dedicated_regions_data_source.go index 6c75095..51dd999 100644 --- a/internal/provider/dedicated_regions_data_source.go +++ b/internal/provider/dedicated_regions_data_source.go @@ -10,21 +10,18 @@ import ( "github.com/hashicorp/terraform-plugin-framework/datasource/schema" "github.com/hashicorp/terraform-plugin-framework/types" "github.com/hashicorp/terraform-plugin-log/tflog" - // "github.com/tidbcloud/terraform-provider-tidbcloud/internal/provider" ) type dedicatedRegionsDataSourceData struct { - Id types.String `tfsdk:"id"` - Page types.Int64 `tfsdk:"page"` - PageSize types.Int64 `tfsdk:"page_size"` - Items []dedicatedRegion `tfsdk:"items"` - Total types.Int64 `tfsdk:"total"` + Id types.String `tfsdk:"id"` + Items []dedicatedRegion `tfsdk:"items"` + Total types.Int64 `tfsdk:"total"` } var _ datasource.DataSource = &dedicatedRegionsDataSource{} type dedicatedRegionsDataSource struct { - provider *TidbcloudProvider + provider *tidbcloudProvider } func NewDedicatedRegionsDataSource() datasource.DataSource { @@ -40,9 +37,9 @@ func (d *dedicatedRegionsDataSource) Configure(_ context.Context, req datasource return } var ok bool - if d.provider, ok = req.ProviderData.(*TidbcloudProvider); !ok { + if d.provider, ok = req.ProviderData.(*tidbcloudProvider); !ok { resp.Diagnostics.AddError("Internal provider error", - fmt.Sprintf("Error in Configure: expected %T but got %T", TidbcloudProvider{}, req.ProviderData)) + fmt.Sprintf("Error in Configure: expected %T but got %T", tidbcloudProvider{}, req.ProviderData)) } } @@ -54,16 +51,6 @@ func (d *dedicatedRegionsDataSource) Schema(_ context.Context, _ datasource.Sche MarkdownDescription: "data source ID", Computed: true, }, - "page": schema.Int64Attribute{ - MarkdownDescription: "Default:1 The number of pages.", - Optional: true, - Computed: true, - }, - "page_size": schema.Int64Attribute{ - MarkdownDescription: "Default:10 The size of a pages.", - Optional: true, - Computed: true, - }, "items": schema.ListNestedAttribute{ MarkdownDescription: "The items of regions", Computed: true, diff --git a/internal/provider/import_resource.go b/internal/provider/import_resource.go index 637cf5b..378dbbe 100644 --- a/internal/provider/import_resource.go +++ b/internal/provider/import_resource.go @@ -30,7 +30,7 @@ func NewImportResource() resource.Resource { // ImportResource defines the resource implementation. type ImportResource struct { - provider *TidbcloudProvider + provider *tidbcloudProvider } // ImportResourceModel describes the resource data model. @@ -115,9 +115,9 @@ func (r *ImportResource) Configure(_ context.Context, req resource.ConfigureRequ } var ok bool - if r.provider, ok = req.ProviderData.(*TidbcloudProvider); !ok { + if r.provider, ok = req.ProviderData.(*tidbcloudProvider); !ok { resp.Diagnostics.AddError("Internal provider error", - fmt.Sprintf("Error in Configure: expected %T but got %T", TidbcloudProvider{}, req.ProviderData)) + fmt.Sprintf("Error in Configure: expected %T but got %T", tidbcloudProvider{}, req.ProviderData)) } } diff --git a/internal/provider/projects_data_source.go b/internal/provider/projects_data_source.go index ba26757..19d44bf 100644 --- a/internal/provider/projects_data_source.go +++ b/internal/provider/projects_data_source.go @@ -34,7 +34,7 @@ type project struct { var _ datasource.DataSource = &projectsDataSource{} type projectsDataSource struct { - provider *TidbcloudProvider + provider *tidbcloudProvider } func NewProjectsDataSource() datasource.DataSource { @@ -50,9 +50,9 @@ func (d *projectsDataSource) Configure(_ context.Context, req datasource.Configu return } var ok bool - if d.provider, ok = req.ProviderData.(*TidbcloudProvider); !ok { + if d.provider, ok = req.ProviderData.(*tidbcloudProvider); !ok { resp.Diagnostics.AddError("Internal provider error", - fmt.Sprintf("Error in Configure: expected %T but got %T", TidbcloudProvider{}, req.ProviderData)) + fmt.Sprintf("Error in Configure: expected %T but got %T", tidbcloudProvider{}, req.ProviderData)) } } diff --git a/internal/provider/provider.go b/internal/provider/provider.go index a9baf37..e5545d5 100644 --- a/internal/provider/provider.go +++ b/internal/provider/provider.go @@ -15,7 +15,7 @@ import ( ) // Ensure the implementation satisfies the provider.Provider interface. -var _ provider.Provider = &TidbcloudProvider{} +var _ provider.Provider = &tidbcloudProvider{} // NewClient overrides the NewClientDelegate method for testing. var NewClient = tidbcloud.NewClientDelegate @@ -24,7 +24,7 @@ var NewDedicatedClient = tidbcloud.NewDedicatedClientDelegate // provider satisfies the tfsdk.Provider interface and usually is included // with all Resource and DataSource implementations. -type TidbcloudProvider struct { +type tidbcloudProvider struct { // client can contain the upstream provider SDK or HTTP client used to // communicate with the upstream service. Resource and DataSource // implementations can then make calls using this client. @@ -52,12 +52,12 @@ type providerData struct { Sync types.Bool `tfsdk:"sync"` } -func (p *TidbcloudProvider) Metadata(_ context.Context, _ provider.MetadataRequest, resp *provider.MetadataResponse) { +func (p *tidbcloudProvider) Metadata(_ context.Context, _ provider.MetadataRequest, resp *provider.MetadataResponse) { resp.TypeName = "tidbcloud" resp.Version = p.version } -func (p *TidbcloudProvider) Configure(ctx context.Context, req provider.ConfigureRequest, resp *provider.ConfigureResponse) { +func (p *tidbcloudProvider) Configure(ctx context.Context, req provider.ConfigureRequest, resp *provider.ConfigureResponse) { // get providerData var data providerData diags := req.Config.Get(ctx, &data) @@ -139,7 +139,7 @@ func (p *TidbcloudProvider) Configure(ctx context.Context, req provider.Configur resp.DataSourceData = p } -func (p *TidbcloudProvider) Resources(ctx context.Context) []func() resource.Resource { +func (p *tidbcloudProvider) Resources(ctx context.Context) []func() resource.Resource { return []func() resource.Resource{ NewClusterResource, NewBackupResource, @@ -148,7 +148,7 @@ func (p *TidbcloudProvider) Resources(ctx context.Context) []func() resource.Res } } -func (p *TidbcloudProvider) DataSources(ctx context.Context) []func() datasource.DataSource { +func (p *tidbcloudProvider) DataSources(ctx context.Context) []func() datasource.DataSource { return []func() datasource.DataSource{ NewProjectsDataSource, NewClusterSpecsDataSource, @@ -161,7 +161,7 @@ func (p *TidbcloudProvider) DataSources(ctx context.Context) []func() datasource } } -func (p *TidbcloudProvider) Schema(_ context.Context, _ provider.SchemaRequest, resp *provider.SchemaResponse) { +func (p *tidbcloudProvider) Schema(_ context.Context, _ provider.SchemaRequest, resp *provider.SchemaResponse) { resp.Schema = schema.Schema{ Attributes: map[string]schema.Attribute{ "public_key": schema.StringAttribute{ @@ -185,7 +185,7 @@ func (p *TidbcloudProvider) Schema(_ context.Context, _ provider.SchemaRequest, func New(version string) func() provider.Provider { return func() provider.Provider { - return &TidbcloudProvider{ + return &tidbcloudProvider{ version: version, } } diff --git a/internal/provider/restore_resource.go b/internal/provider/restore_resource.go index 6afc0e4..bae98b7 100644 --- a/internal/provider/restore_resource.go +++ b/internal/provider/restore_resource.go @@ -45,7 +45,7 @@ type cluster struct { var _ resource.Resource = &restoreResource{} type restoreResource struct { - provider *TidbcloudProvider + provider *tidbcloudProvider } func NewRestoreResource() resource.Resource { @@ -63,9 +63,9 @@ func (r *restoreResource) Configure(_ context.Context, req resource.ConfigureReq } var ok bool - if r.provider, ok = req.ProviderData.(*TidbcloudProvider); !ok { + if r.provider, ok = req.ProviderData.(*tidbcloudProvider); !ok { resp.Diagnostics.AddError("Internal provider error", - fmt.Sprintf("Error in Configure: expected %T but got %T", TidbcloudProvider{}, req.ProviderData)) + fmt.Sprintf("Error in Configure: expected %T but got %T", tidbcloudProvider{}, req.ProviderData)) } } diff --git a/internal/provider/restores_data_source.go b/internal/provider/restores_data_source.go index fb29fc9..9302a2b 100644 --- a/internal/provider/restores_data_source.go +++ b/internal/provider/restores_data_source.go @@ -36,7 +36,7 @@ type restore struct { var _ datasource.DataSource = &restoresDataSource{} type restoresDataSource struct { - provider *TidbcloudProvider + provider *tidbcloudProvider } func NewRestoresDataSource() datasource.DataSource { @@ -52,9 +52,9 @@ func (d *restoresDataSource) Configure(_ context.Context, req datasource.Configu return } var ok bool - if d.provider, ok = req.ProviderData.(*TidbcloudProvider); !ok { + if d.provider, ok = req.ProviderData.(*tidbcloudProvider); !ok { resp.Diagnostics.AddError("Internal provider error", - fmt.Sprintf("Error in Configure: expected %T but got %T", TidbcloudProvider{}, req.ProviderData)) + fmt.Sprintf("Error in Configure: expected %T but got %T", tidbcloudProvider{}, req.ProviderData)) } } From 458410e8d60068fbf37899b4b70ed029b68daa37 Mon Sep 17 00:00:00 2001 From: FingerLeader Date: Fri, 6 Dec 2024 14:16:01 +0800 Subject: [PATCH 5/6] tmp save Signed-off-by: FingerLeader --- .../provider/dedicated_cluster_resource.go | 924 ++++++++++++++++++ internal/provider/provider.go | 2 +- 2 files changed, 925 insertions(+), 1 deletion(-) create mode 100644 internal/provider/dedicated_cluster_resource.go diff --git a/internal/provider/dedicated_cluster_resource.go b/internal/provider/dedicated_cluster_resource.go new file mode 100644 index 0000000..4668ef6 --- /dev/null +++ b/internal/provider/dedicated_cluster_resource.go @@ -0,0 +1,924 @@ +package provider + +import ( + "context" + "fmt" + "net/http" + "sort" + "strings" + "time" + + clusterApi "github.com/c4pt0r/go-tidbcloud-sdk-v1/client/cluster" + "github.com/hashicorp/terraform-plugin-framework/path" + "github.com/hashicorp/terraform-plugin-framework/resource" + "github.com/hashicorp/terraform-plugin-framework/resource/schema" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/int64planmodifier" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/objectplanmodifier" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringplanmodifier" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" + "github.com/tidbcloud/terraform-provider-tidbcloud/tidbcloud" +) + +// const dev = "DEVELOPER" +// const ded = "DEDICATED" + +// // Enum: [AVAILABLE CREATING MODIFYING PAUSED RESUMING UNAVAILABLE IMPORTING MAINTAINING PAUSING] +// type clusterStatus string + +// const ( +// clusterStatusCreating clusterStatus = "CREATING" +// clusterStatusAvailable clusterStatus = "AVAILABLE" +// clusterStatusModifying clusterStatus = "MODIFYING" +// clusterStatusPaused clusterStatus = "PAUSED" +// clusterStatusResuming clusterStatus = "RESUMING" +// clusterStatusUnavailable clusterStatus = "UNAVAILABLE" +// clusterStatusImporting clusterStatus = "IMPORTING" +// clusterStatusMaintaining clusterStatus = "MAINTAINING" +// clusterStatusPausing clusterStatus = "PAUSING" +// ) + +// const ( +// clusterServerlessCreateTimeout = 180 * time.Second +// clusterServerlessCreateInterval = 2 * time.Second +// clusterCreateTimeout = time.Hour +// clusterCreateInterval = 60 * time.Second +// clusterUpdateTimeout = time.Hour +// clusterUpdateInterval = 20 * time.Second +// ) + +type dedicatedClusterResourceData struct { + ProjectId types.String `tfsdk:"project_id"` + ClusterId types.String `tfsdk:"id"` + Name types.String `tfsdk:"name"` + CloudProvider types.String `tfsdk:"cloud_provider"` + RegionId types.String `tfsdk:"region_id"` + Labels map[string]string `tfsdk:"labels"` + RootPassword types.String `tfsdk:"root_password"` + Port types.Int64 `tfsdk:"port"` + Paused types.Bool `tfsdk:"paused"` + PausePlan pausePlan `tfsdk:"pause_plan"` + State types.String `tfsdk:"state"` + Version types.String `tfsdk:"version"` + CreatedBy types.String `tfsdk:"created_by"` + CreateTime types.String `tfsdk:"create_time"` + UpdateTime types.String `tfsdk:"update_time"` + RegionDisplayName types.String `tfsdk:"region_display_name"` + Annotations map[string]string `tfsdk:"annotations"` +} + +type pausePlan struct { + PauseType types.String `tfsdk:"pause_type"` + scheduledResumeTime types.String `tfsdk:"scheduled_resume_time"` +} + +type tidbNodeSetting struct { + NodeSpecKey types.String `tfsdk:"node_spec_key"` + NodeCount types.Int64 `tfsdk:"node_count"` + NodeGroups []nodeGroup `tfsdk:"node_groups"` +} + +type nodeGroup struct { + NodeCount types.Int64 `tfsdk:"node_count"` + TiDBNodeGroupId types.String `tfsdk:"tidb_node_group_id"` + TIDBNodeGroupName types.String `tfsdk:"tidb_node_group_name"` + NodeSpecKey types.String `tfsdk:"node_spec_key"` + NodeSpecDisplayName types.String `tfsdk:"node_spec_display_name"` + IsDefaultGroup types.Bool `tfsdk:"is_default_group"` + State types.String `tfsdk:"state"` + NodeChangingProgress nodeChangingProgress `tfsdk:"node_changing_progress"` +} + +type nodeChangingProgress struct { + MatchingNodeSpecNodeCount types.Int64 `tfsdk:"matching_node_spec_node_count"` + RemainingDeletionNodeCount types.Int64 `tfsdk:"remaining_deletion_node_count"` +} + +type dedicatedClusterResource struct { + provider *tidbcloudProvider +} + +func NewDedicatedClusterResource() resource.Resource { + return &dedicatedClusterResource{} +} + +func (r *clusterResource) Metadata(_ context.Context, req resource.MetadataRequest, resp *resource.MetadataResponse) { + resp.TypeName = req.ProviderTypeName + "_cluster" +} + +func (r *clusterResource) Configure(_ context.Context, req resource.ConfigureRequest, resp *resource.ConfigureResponse) { + // Prevent panic if the provider has not been configured. + if req.ProviderData == nil { + return + } + + var ok bool + if r.provider, ok = req.ProviderData.(*tidbcloudProvider); !ok { + resp.Diagnostics.AddError("Internal provider error", + fmt.Sprintf("Error in Configure: expected %T but got %T", tidbcloudProvider{}, req.ProviderData)) + } +} + +func (r *clusterResource) Schema(_ context.Context, _ resource.SchemaRequest, resp *resource.SchemaResponse) { + resp.Schema = schema.Schema{ + MarkdownDescription: "cluster resource", + Attributes: map[string]schema.Attribute{ + "project_id": schema.StringAttribute{ + MarkdownDescription: "The ID of the project. You can get the project ID from [tidbcloud_projects datasource](../data-sources/projects.md).", + Required: true, + }, + "name": schema.StringAttribute{ + MarkdownDescription: "The name of the cluster.", + Required: true, + }, + "id": schema.StringAttribute{ + Computed: true, + MarkdownDescription: "The ID of the cluster.", + PlanModifiers: []planmodifier.String{ + stringplanmodifier.UseStateForUnknown(), + }, + }, + "cluster_type": schema.StringAttribute{ + MarkdownDescription: "Enum: \"DEDICATED\" \"DEVELOPER\", The cluster type.", + Required: true, + }, + "cloud_provider": schema.StringAttribute{ + MarkdownDescription: "Enum: \"AWS\" \"GCP\", The cloud provider on which your TiDB cluster is hosted.", + Required: true, + }, + "create_timestamp": schema.StringAttribute{ + MarkdownDescription: "The creation time of the cluster in Unix timestamp seconds (epoch time).", + Computed: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.UseStateForUnknown(), + }, + }, + "region": schema.StringAttribute{ + MarkdownDescription: "the region value should match the cloud provider's region code. You can get the complete list of available regions from the [tidbcloud_cluster_specs datasource](../data-sources/cluster_specs.md).", + Required: true, + }, + "status": schema.SingleNestedAttribute{ + MarkdownDescription: "The status of the cluster.", + Computed: true, + PlanModifiers: []planmodifier.Object{ + clusterResourceStatus(), + }, + Attributes: map[string]schema.Attribute{ + "tidb_version": schema.StringAttribute{ + MarkdownDescription: "TiDB version.", + Computed: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.UseStateForUnknown(), + }, + }, + "cluster_status": schema.StringAttribute{ + MarkdownDescription: "Status of the cluster.", + Computed: true, + }, + "connection_strings": schema.SingleNestedAttribute{ + MarkdownDescription: "Connection strings.", + Computed: true, + PlanModifiers: []planmodifier.Object{ + objectplanmodifier.UseStateForUnknown(), + }, + Attributes: map[string]schema.Attribute{ + "default_user": schema.StringAttribute{ + MarkdownDescription: "The default TiDB user for connection.", + Computed: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.UseStateForUnknown(), + }, + }, + "standard": schema.SingleNestedAttribute{ + MarkdownDescription: "Standard connection string.", + Computed: true, + PlanModifiers: []planmodifier.Object{ + objectplanmodifier.UseStateForUnknown(), + }, + Attributes: map[string]schema.Attribute{ + "host": schema.StringAttribute{ + MarkdownDescription: "The host of standard connection.", + Computed: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.UseStateForUnknown(), + }, + }, + "port": schema.Int64Attribute{ + MarkdownDescription: "The TiDB port for connection. The port must be in the range of 1024-65535 except 10080.", + Computed: true, + PlanModifiers: []planmodifier.Int64{ + int64planmodifier.UseStateForUnknown(), + }, + }, + }, + }, + "vpc_peering": schema.SingleNestedAttribute{ + MarkdownDescription: "VPC peering connection string.", + Computed: true, + PlanModifiers: []planmodifier.Object{ + objectplanmodifier.UseStateForUnknown(), + }, + Attributes: map[string]schema.Attribute{ + "host": schema.StringAttribute{ + MarkdownDescription: "The host of VPC peering connection.", + Computed: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.UseStateForUnknown(), + }, + }, + "port": schema.Int64Attribute{ + MarkdownDescription: "The TiDB port for connection. The port must be in the range of 1024-65535 except 10080.", + Computed: true, + PlanModifiers: []planmodifier.Int64{ + int64planmodifier.UseStateForUnknown(), + }, + }, + }, + }, + }, + }, + }, + }, + "config": schema.SingleNestedAttribute{ + MarkdownDescription: "The configuration of the cluster.", + Required: true, + Attributes: map[string]schema.Attribute{ + "root_password": schema.StringAttribute{ + MarkdownDescription: "The root password to access the cluster. It must be 8-64 characters.", + Optional: true, + }, + "port": schema.Int64Attribute{ + MarkdownDescription: "The TiDB port for connection. The port must be in the range of 1024-65535 except 10080, 4000 in default.\n" + + " - For a Serverless Tier cluster, only port 4000 is available.", + Optional: true, + Computed: true, + PlanModifiers: []planmodifier.Int64{ + int64planmodifier.UseStateForUnknown(), + }, + }, + "paused": schema.BoolAttribute{ + MarkdownDescription: "lag that indicates whether the cluster is paused. true means to pause the cluster, and false means to resume the cluster.\n" + + " - The cluster can be paused only when the cluster_status is \"AVAILABLE\"." + + " - The cluster can be resumed only when the cluster_status is \"PAUSED\".", + Optional: true, + }, + "components": schema.SingleNestedAttribute{ + MarkdownDescription: "The components of the cluster.\n" + + " - For a Serverless Tier cluster, the components value can not be set." + + " - For a Dedicated Tier cluster, the components value must be set.", + Optional: true, + Computed: true, + PlanModifiers: []planmodifier.Object{ + objectplanmodifier.UseStateForUnknown(), + }, + Attributes: map[string]schema.Attribute{ + "tidb": schema.SingleNestedAttribute{ + MarkdownDescription: "The TiDB component of the cluster", + Required: true, + PlanModifiers: []planmodifier.Object{ + objectplanmodifier.UseStateForUnknown(), + }, + Attributes: map[string]schema.Attribute{ + "node_size": schema.StringAttribute{ + Required: true, + MarkdownDescription: "The size of the TiDB component in the cluster, You can get the available node size of each region from the [tidbcloud_cluster_specs datasource](../data-sources/cluster_specs.md).\n" + + " - If the vCPUs of TiDB or TiKV component is 2 or 4, then their vCPUs need to be the same.\n" + + " - If the vCPUs of TiDB or TiKV component is 2 or 4, then the cluster does not support TiFlash.\n" + + " - Can not modify node_size of an existing cluster.", + PlanModifiers: []planmodifier.String{ + stringplanmodifier.UseStateForUnknown(), + }, + }, + "node_quantity": schema.Int64Attribute{ + MarkdownDescription: "The number of nodes in the cluster. You can get the minimum and step of a node quantity from the [tidbcloud_cluster_specs datasource](../data-sources/cluster_specs.md).", + Required: true, + PlanModifiers: []planmodifier.Int64{ + int64planmodifier.UseStateForUnknown(), + }, + }, + }, + }, + "tikv": schema.SingleNestedAttribute{ + MarkdownDescription: "The TiKV component of the cluster", + Required: true, + PlanModifiers: []planmodifier.Object{ + objectplanmodifier.UseStateForUnknown(), + }, + Attributes: map[string]schema.Attribute{ + "node_size": schema.StringAttribute{ + MarkdownDescription: "The size of the TiKV component in the cluster, You can get the available node size of each region from the [tidbcloud_cluster_specs datasource](../data-sources/cluster_specs.md).\n" + + " - If the vCPUs of TiDB or TiKV component is 2 or 4, then their vCPUs need to be the same.\n" + + " - If the vCPUs of TiDB or TiKV component is 2 or 4, then the cluster does not support TiFlash.\n" + + " - Can not modify node_size of an existing cluster.", + Required: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.UseStateForUnknown(), + }, + }, + "storage_size_gib": schema.Int64Attribute{ + MarkdownDescription: "The storage size of a node in the cluster. You can get the minimum and maximum of storage size from the [tidbcloud_cluster_specs datasource](../data-sources/cluster_specs.md).\n" + + " - Can not modify storage_size_gib of an existing cluster.", + Required: true, + PlanModifiers: []planmodifier.Int64{ + int64planmodifier.UseStateForUnknown(), + }, + }, + "node_quantity": schema.Int64Attribute{ + MarkdownDescription: "The number of nodes in the cluster. You can get the minimum and step of a node quantity from the [tidbcloud_cluster_specs datasource](../data-sources/cluster_specs.md).\n" + + " - TiKV do not support decreasing node quantity.\n" + + " - The node_quantity of TiKV must be a multiple of 3.", + Required: true, + PlanModifiers: []planmodifier.Int64{ + int64planmodifier.UseStateForUnknown(), + }, + }, + }, + }, + "tiflash": schema.SingleNestedAttribute{ + MarkdownDescription: "The TiFlash component of the cluster.", + Optional: true, + PlanModifiers: []planmodifier.Object{ + objectplanmodifier.UseStateForUnknown(), + }, + Attributes: map[string]schema.Attribute{ + "node_size": schema.StringAttribute{ + MarkdownDescription: "The size of the TiFlash component in the cluster, You can get the available node size of each region from the [tidbcloud_cluster_specs datasource](../data-sources/cluster_specs.md).\n" + + " - Can not modify node_size of an existing cluster.", + Required: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.UseStateForUnknown(), + }, + }, + "storage_size_gib": schema.Int64Attribute{ + MarkdownDescription: "The storage size of a node in the cluster. You can get the minimum and maximum of storage size from the [tidbcloud_cluster_specs datasource](../data-sources/cluster_specs.md).\n" + + " - Can not modify storage_size_gib of an existing cluster.", + Required: true, + PlanModifiers: []planmodifier.Int64{ + int64planmodifier.UseStateForUnknown(), + }, + }, + "node_quantity": schema.Int64Attribute{ + MarkdownDescription: "The number of nodes in the cluster. You can get the minimum and step of a node quantity from the [tidbcloud_cluster_specs datasource](../data-sources/cluster_specs.md).\n" + + " - TiFlash do not support decreasing node quantity.", + Required: true, + PlanModifiers: []planmodifier.Int64{ + int64planmodifier.UseStateForUnknown(), + }, + }, + }, + }, + }, + }, + "ip_access_list": schema.ListNestedAttribute{ + MarkdownDescription: "A list of IP addresses and Classless Inter-Domain Routing (CIDR) addresses that are allowed to access the TiDB Cloud cluster via [standard connection](https://docs.pingcap.com/tidbcloud/connect-to-tidb-cluster#connect-via-standard-connection).", + Optional: true, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "cidr": schema.StringAttribute{ + MarkdownDescription: "The IP address or CIDR range that you want to add to the cluster's IP access list.", + Required: true, + }, + "description": schema.StringAttribute{ + MarkdownDescription: "Description that explains the purpose of the entry.", + Required: true, + }, + }, + }, + }, + }, + }, + }, + } +} + +func (r clusterResource) Create(ctx context.Context, req resource.CreateRequest, resp *resource.CreateResponse) { + if !r.provider.configured { + resp.Diagnostics.AddError( + "Provider not configured", + "The provider hasn't been configured before apply, likely because it depends on an unknown value from another resource. This leads to weird stuff happening, so we'd prefer if you didn't do that. Thanks!", + ) + return + } + + // get data from config + var data clusterResourceData + diags := req.Config.Get(ctx, &data) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + + // for Serverless cluster, components is not allowed. or plan and state may be inconsistent + if data.ClusterType == dev { + if data.Config.Components != nil { + resp.Diagnostics.AddError("Create Error", fmt.Sprintf("components is not allowed in %s cluster_type", dev)) + return + } + } + + // for DEDICATED cluster, components is required. + if data.ClusterType == ded { + if data.Config.Components == nil { + resp.Diagnostics.AddError("Create Error", fmt.Sprintf("components is required in %s cluster_type", ded)) + return + } + } + + // write logs using the tflog package + // see https://pkg.go.dev/github.com/hashicorp/terraform-plugin-log/tflog + tflog.Trace(ctx, "created cluster_resource") + createClusterParams := clusterApi.NewCreateClusterParams().WithProjectID(data.ProjectId).WithBody(buildCreateClusterBody(data)) + createClusterResp, err := r.provider.client.CreateCluster(createClusterParams) + if err != nil { + resp.Diagnostics.AddError("Create Error", fmt.Sprintf("Unable to call CreateCluster, got error: %s", err)) + return + } + // set clusterId. other computed attributes are not returned by create, they will be set when refresh + clusterId := *createClusterResp.Payload.ID + data.ClusterId = types.StringValue(clusterId) + if r.provider.sync { + var cluster *clusterApi.GetClusterOKBody + if data.ClusterType == dev { + tflog.Info(ctx, "wait serverless cluster ready") + cluster, err = WaitClusterReady(ctx, clusterServerlessCreateTimeout, clusterServerlessCreateInterval, data.ProjectId, clusterId, r.provider.client) + if err != nil { + resp.Diagnostics.AddError( + "Cluster creation failed", + fmt.Sprintf("Cluster is not ready, get error: %s", err), + ) + return + } + } else { + tflog.Info(ctx, "wait dedicated cluster ready") + cluster, err = WaitClusterReady(ctx, clusterCreateTimeout, clusterCreateInterval, data.ProjectId, clusterId, r.provider.client) + if err != nil { + resp.Diagnostics.AddError( + "Cluster creation failed", + fmt.Sprintf("Cluster is not ready, get error: %s", err), + ) + return + } + } + refreshClusterResourceData(ctx, cluster, &data) + } else { + // we refresh in create for any unknown value. if someone has other opinions which is better, he can delete the refresh logic + tflog.Trace(ctx, "read cluster_resource") + getClusterParams := clusterApi.NewGetClusterParams().WithProjectID(data.ProjectId).WithClusterID(data.ClusterId.ValueString()) + getClusterResp, err := r.provider.client.GetCluster(getClusterParams) + if err != nil { + resp.Diagnostics.AddError("Create Error", fmt.Sprintf("Unable to call GetCluster, got error: %s", err)) + return + } + refreshClusterResourceData(ctx, getClusterResp.Payload, &data) + } + + // save into the Terraform state. + diags = resp.State.Set(ctx, &data) + resp.Diagnostics.Append(diags...) +} + +func buildCreateClusterBody(data clusterResourceData) clusterApi.CreateClusterBody { + // required + rootPassWord := data.Config.RootPassword.ValueString() + payload := clusterApi.CreateClusterBody{ + Name: &data.Name, + ClusterType: &data.ClusterType, + CloudProvider: &data.CloudProvider, + Region: &data.Region, + Config: &clusterApi.CreateClusterParamsBodyConfig{ + RootPassword: &rootPassWord, + }, + } + + // optional + if data.Config.Components != nil { + tidb := data.Config.Components.TiDB + tikv := data.Config.Components.TiKV + tiflash := data.Config.Components.TiFlash + + components := &clusterApi.CreateClusterParamsBodyConfigComponents{ + Tidb: &clusterApi.CreateClusterParamsBodyConfigComponentsTidb{ + NodeSize: &tidb.NodeSize, + NodeQuantity: &tidb.NodeQuantity, + }, + Tikv: &clusterApi.CreateClusterParamsBodyConfigComponentsTikv{ + NodeSize: &tikv.NodeSize, + StorageSizeGib: &tikv.StorageSizeGib, + NodeQuantity: &tikv.NodeQuantity, + }, + } + // tiflash is optional + if tiflash != nil { + components.Tiflash = &clusterApi.CreateClusterParamsBodyConfigComponentsTiflash{ + NodeSize: &tiflash.NodeSize, + StorageSizeGib: &tiflash.StorageSizeGib, + NodeQuantity: &tiflash.NodeQuantity, + } + } + + payload.Config.Components = components + } + if data.Config.IPAccessList != nil { + var IPAccessList []*clusterApi.CreateClusterParamsBodyConfigIPAccessListItems0 + for _, key := range data.Config.IPAccessList { + cidr := key.CIDR + IPAccessList = append(IPAccessList, &clusterApi.CreateClusterParamsBodyConfigIPAccessListItems0{ + Cidr: &cidr, + Description: key.Description, + }) + } + payload.Config.IPAccessList = IPAccessList + } + if !data.Config.Port.IsNull() && !data.Config.Port.IsUnknown() { + payload.Config.Port = int32(data.Config.Port.ValueInt64()) + } + + return payload +} + +func (r clusterResource) Read(ctx context.Context, req resource.ReadRequest, resp *resource.ReadResponse) { + var projectId, clusterId string + + resp.Diagnostics.Append(req.State.GetAttribute(ctx, path.Root("project_id"), &projectId)...) + resp.Diagnostics.Append(req.State.GetAttribute(ctx, path.Root("id"), &clusterId)...) + if resp.Diagnostics.HasError() { + return + } + + // call read api + tflog.Trace(ctx, "read cluster_resource") + getClusterParams := clusterApi.NewGetClusterParams().WithProjectID(projectId).WithClusterID(clusterId) + getClusterResp, err := r.provider.client.GetCluster(getClusterParams) + if err != nil { + resp.Diagnostics.AddError("Read Error", fmt.Sprintf("Unable to call GetClusterById, got error: %s", err)) + return + } + + // refresh data with read result + var data clusterResourceData + // root_password, ip_access_list and pause will not return by read api, so we just use state's value even it changed on console! + // use types.String in case ImportState method throw unhandled null value + var rootPassword types.String + var iPAccessList []ipAccess + var paused *bool + resp.Diagnostics.Append(req.State.GetAttribute(ctx, path.Root("config").AtName("root_password"), &rootPassword)...) + resp.Diagnostics.Append(req.State.GetAttribute(ctx, path.Root("config").AtName("ip_access_list"), &iPAccessList)...) + resp.Diagnostics.Append(req.State.GetAttribute(ctx, path.Root("config").AtName("paused"), &paused)...) + data.Config.RootPassword = rootPassword + data.Config.IPAccessList = iPAccessList + data.Config.Paused = paused + refreshClusterResourceData(ctx, getClusterResp.Payload, &data) + + // save into the Terraform state + diags := resp.State.Set(ctx, &data) + resp.Diagnostics.Append(diags...) +} + +func refreshClusterResourceData(ctx context.Context, resp *clusterApi.GetClusterOKBody, data *clusterResourceData) { + // must return + data.Name = resp.Name + data.ClusterId = types.StringValue(*resp.ID) + data.Region = resp.Region + data.ProjectId = *resp.ProjectID + data.ClusterType = resp.ClusterType + data.CloudProvider = resp.CloudProvider + data.CreateTimestamp = types.StringValue(resp.CreateTimestamp) + data.Config.Port = types.Int64Value(int64(resp.Config.Port)) + tidb := resp.Config.Components.Tidb + tikv := resp.Config.Components.Tikv + data.Config.Components = &components{ + TiDB: &componentTiDB{ + NodeSize: *tidb.NodeSize, + NodeQuantity: *tidb.NodeQuantity, + }, + TiKV: &componentTiKV{ + NodeSize: *tikv.NodeSize, + NodeQuantity: *tikv.NodeQuantity, + StorageSizeGib: *tikv.StorageSizeGib, + }, + } + + var standard connectionStandard + var vpcPeering connectionVpcPeering + if resp.Status.ConnectionStrings.Standard != nil { + standard.Host = resp.Status.ConnectionStrings.Standard.Host + standard.Port = resp.Status.ConnectionStrings.Standard.Port + } + if resp.Status.ConnectionStrings.VpcPeering != nil { + vpcPeering.Host = resp.Status.ConnectionStrings.VpcPeering.Host + vpcPeering.Port = resp.Status.ConnectionStrings.VpcPeering.Port + } + data.Status = &clusterStatusDataSource{ + TidbVersion: resp.Status.TidbVersion, + ClusterStatus: types.StringValue(resp.Status.ClusterStatus), + ConnectionStrings: &connection{ + DefaultUser: resp.Status.ConnectionStrings.DefaultUser, + Standard: &standard, + VpcPeering: &vpcPeering, + }, + } + // may return + tiflash := resp.Config.Components.Tiflash + if tiflash != nil { + data.Config.Components.TiFlash = &componentTiFlash{ + NodeSize: *tiflash.NodeSize, + NodeQuantity: *tiflash.NodeQuantity, + StorageSizeGib: *tiflash.StorageSizeGib, + } + } + + // not return + // IPAccessList, and password and pause will not update for it will not return by read api + +} + +// Update since open api is patch without check for the invalid parameter. we do a lot of check here to avoid inconsistency +// check the date can't be updated +// if plan and state is different, we can execute updated +func (r clusterResource) Update(ctx context.Context, req resource.UpdateRequest, resp *resource.UpdateResponse) { + // get plan + var data clusterResourceData + diags := req.Plan.Get(ctx, &data) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + // get state + var state clusterResourceData + diags = req.State.Get(ctx, &state) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + + // Severless can not be changed now + if data.ClusterType == dev { + resp.Diagnostics.AddError( + "Update error", + "Unable to update Serverless cluster", + ) + return + } + + // only components and paused can be changed now + if data.Name != state.Name || data.ClusterType != state.ClusterType || data.Region != state.Region || data.CloudProvider != state.CloudProvider || + data.ProjectId != state.ProjectId || data.ClusterId != state.ClusterId { + resp.Diagnostics.AddError( + "Update error", + "You may update the name,cluster_type,region,cloud_provider or projectId. They can not be changed, only components can be changed now", + ) + return + } + if !data.Config.Port.IsNull() && !data.Config.Port.IsNull() && data.Config.Port.ValueInt64() != state.Config.Port.ValueInt64() { + resp.Diagnostics.AddError( + "Update error", + "port can not be changed, only components can be changed now", + ) + return + } + if data.Config.IPAccessList != nil { + // You cannot add an IP access list to an existing cluster without an IP rule. + if len(state.Config.IPAccessList) == 0 { + resp.Diagnostics.AddError( + "Update error", + "ip_access_list can not be added to the existing cluster.", + ) + return + } + + // You cannot insert or delete IP rule. + if len(data.Config.IPAccessList) != len(state.Config.IPAccessList) { + resp.Diagnostics.AddError( + "Update error", + "ip_access_list can not be changed, only components can be changed now", + ) + return + } + + // You cannot update the IP rule. + newIPAccessList := make([]ipAccess, len(data.Config.IPAccessList)) + copy(newIPAccessList, data.Config.IPAccessList) + sort.Slice(newIPAccessList, func(i, j int) bool { + return newIPAccessList[i].CIDR < newIPAccessList[j].CIDR + }) + + currentIPAccessList := make([]ipAccess, len(state.Config.IPAccessList)) + copy(currentIPAccessList, state.Config.IPAccessList) + sort.Slice(currentIPAccessList, func(i, j int) bool { + return currentIPAccessList[i].CIDR < currentIPAccessList[j].CIDR + }) + + for index, key := range newIPAccessList { + if currentIPAccessList[index].CIDR != key.CIDR || currentIPAccessList[index].Description != key.Description { + resp.Diagnostics.AddError( + "Update error", + "ip_access_list can not be changed, only components can be changed now", + ) + return + } + } + } else { + // You cannot remove the IP access list. + if len(state.Config.IPAccessList) > 0 { + resp.Diagnostics.AddError( + "Update error", + "ip_access_list can not be changed, only components can be changed now", + ) + return + } + } + + // check Components + tidb := data.Config.Components.TiDB + tikv := data.Config.Components.TiKV + tiflash := data.Config.Components.TiFlash + tidbState := state.Config.Components.TiDB + tikvState := state.Config.Components.TiKV + tiflashState := state.Config.Components.TiFlash + if tidb.NodeSize != tidbState.NodeSize { + resp.Diagnostics.AddError( + "Update error", + "tidb node_size can't be changed", + ) + return + } + if tikv.NodeSize != tikvState.NodeSize || tikv.StorageSizeGib != tikvState.StorageSizeGib { + resp.Diagnostics.AddError( + "Update error", + "tikv node_size or storage_size_gib can't be changed", + ) + return + } + if tiflash != nil && tiflashState != nil { + // if cluster have tiflash already, then we can't specify NodeSize and StorageSizeGib + if tiflash.NodeSize != tiflashState.NodeSize || tiflash.StorageSizeGib != tiflashState.StorageSizeGib { + resp.Diagnostics.AddError( + "Update error", + "tiflash node_size or storage_size_gib can't be changed", + ) + return + } + } + + // build UpdateClusterBody + var updateClusterBody clusterApi.UpdateClusterBody + updateClusterBody.Config = &clusterApi.UpdateClusterParamsBodyConfig{} + // build paused + if data.Config.Paused != nil { + if state.Config.Paused == nil || *data.Config.Paused != *state.Config.Paused { + updateClusterBody.Config.Paused = data.Config.Paused + } + } + // build components + var isComponentsChanged = false + if tidb.NodeQuantity != tidbState.NodeQuantity || tikv.NodeQuantity != tikvState.NodeQuantity { + isComponentsChanged = true + } + + var componentTiFlash *clusterApi.UpdateClusterParamsBodyConfigComponentsTiflash + if tiflash != nil { + if tiflashState == nil { + isComponentsChanged = true + componentTiFlash = &clusterApi.UpdateClusterParamsBodyConfigComponentsTiflash{ + NodeQuantity: &tiflash.NodeQuantity, + NodeSize: &tiflash.NodeSize, + StorageSizeGib: &tiflash.StorageSizeGib, + } + } else if tiflash.NodeQuantity != tiflashState.NodeQuantity { + isComponentsChanged = true + // NodeSize can't be changed + componentTiFlash = &clusterApi.UpdateClusterParamsBodyConfigComponentsTiflash{ + NodeQuantity: &tiflash.NodeQuantity, + } + } + } + + if isComponentsChanged { + updateClusterBody.Config.Components = &clusterApi.UpdateClusterParamsBodyConfigComponents{ + Tidb: &clusterApi.UpdateClusterParamsBodyConfigComponentsTidb{ + NodeQuantity: &tidb.NodeQuantity, + }, + Tikv: &clusterApi.UpdateClusterParamsBodyConfigComponentsTikv{ + NodeQuantity: &tikv.NodeQuantity, + }, + Tiflash: componentTiFlash, + } + } + + tflog.Trace(ctx, "update cluster_resource") + updateClusterParams := clusterApi.NewUpdateClusterParams().WithProjectID(data.ProjectId).WithClusterID(data.ClusterId.ValueString()).WithBody(updateClusterBody) + _, err := r.provider.client.UpdateCluster(updateClusterParams) + if err != nil { + resp.Diagnostics.AddError("Update Error", fmt.Sprintf("Unable to call UpdateClusterById, got error: %s", err)) + return + } + + if r.provider.sync { + tflog.Info(ctx, "wait cluster ready") + cluster, err := WaitClusterReady(ctx, clusterUpdateTimeout, clusterUpdateInterval, data.ProjectId, data.ClusterId.ValueString(), r.provider.client) + if err != nil { + resp.Diagnostics.AddError( + "Cluster update failed", + fmt.Sprintf("Cluster is not ready, get error: %s", err), + ) + return + } + refreshClusterResourceData(ctx, cluster, &data) + } else { + // we refresh for any unknown value. if someone has other opinions which is better, he can delete the refresh logic + tflog.Trace(ctx, "read cluster_resource") + getClusterResp, err := r.provider.client.GetCluster(clusterApi.NewGetClusterParams().WithProjectID(data.ProjectId).WithClusterID(data.ClusterId.ValueString())) + if err != nil { + resp.Diagnostics.AddError("Update Error", fmt.Sprintf("Unable to call GetClusterById, got error: %s", err)) + return + } + refreshClusterResourceData(ctx, getClusterResp.Payload, &data) + } + + // save into the Terraform state. + diags = resp.State.Set(ctx, &data) + resp.Diagnostics.Append(diags...) +} + +func (r clusterResource) Delete(ctx context.Context, req resource.DeleteRequest, resp *resource.DeleteResponse) { + var data clusterResourceData + + diags := req.State.Get(ctx, &data) + resp.Diagnostics.Append(diags...) + + if resp.Diagnostics.HasError() { + return + } + + tflog.Trace(ctx, "delete cluster_resource") + _, err := r.provider.client.DeleteCluster(clusterApi.NewDeleteClusterParams().WithProjectID(data.ProjectId).WithClusterID(data.ClusterId.ValueString())) + if err != nil { + resp.Diagnostics.AddError("Delete Error", fmt.Sprintf("Unable to call DeleteClusterById, got error: %s", err)) + return + } +} + +func (r clusterResource) ImportState(ctx context.Context, req resource.ImportStateRequest, resp *resource.ImportStateResponse) { + idParts := strings.Split(req.ID, ",") + + if len(idParts) != 2 || idParts[0] == "" || idParts[1] == "" { + resp.Diagnostics.AddError( + "Unexpected Import Identifier", + fmt.Sprintf("Expected import identifier with format: project_id,cluster_id. Got: %q", req.ID), + ) + return + } + + resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root("project_id"), idParts[0])...) + resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root("id"), idParts[1])...) +} + +func WaitClusterReady(ctx context.Context, timeout time.Duration, interval time.Duration, projectId, clusterId string, + client tidbcloud.TiDBCloudClient) (*clusterApi.GetClusterOKBody, error) { + stateConf := &retry.StateChangeConf{ + Pending: []string{ + string(clusterStatusCreating), + string(clusterStatusModifying), + string(clusterStatusResuming), + string(clusterStatusUnavailable), + string(clusterStatusImporting), + string(clusterStatusPausing), + }, + Target: []string{ + string(clusterStatusAvailable), + string(clusterStatusPaused), + string(clusterStatusMaintaining), + }, + Timeout: timeout, + MinTimeout: 500 * time.Millisecond, + PollInterval: interval, + Refresh: clusterStateRefreshFunc(ctx, projectId, clusterId, client), + } + + outputRaw, err := stateConf.WaitForStateContext(ctx) + + if output, ok := outputRaw.(*clusterApi.GetClusterOKBody); ok { + return output, err + } + return nil, err +} + +func clusterStateRefreshFunc(ctx context.Context, projectId, clusterId string, + client tidbcloud.TiDBCloudClient) retry.StateRefreshFunc { + return func() (interface{}, string, error) { + param := clusterApi.NewGetClusterParams().WithProjectID(projectId).WithClusterID(clusterId).WithContext(ctx) + getClusterResp, err := client.GetCluster(param) + if err != nil { + tflog.Warn(ctx, fmt.Sprintf("get cluster error: %s", err)) + if getClusterResp != nil && getClusterResp.Code() < http.StatusInternalServerError { + return nil, "", err + } else { + // regard as not found and retry again. Default is 20 times + return nil, "", nil + } + } + return getClusterResp.Payload, getClusterResp.Payload.Status.ClusterStatus, nil + } +} diff --git a/internal/provider/provider.go b/internal/provider/provider.go index e5545d5..250a27f 100644 --- a/internal/provider/provider.go +++ b/internal/provider/provider.go @@ -11,7 +11,7 @@ import ( "github.com/hashicorp/terraform-plugin-framework/resource" "github.com/hashicorp/terraform-plugin-framework/types" "github.com/tidbcloud/terraform-provider-tidbcloud/tidbcloud" - // "github.com/tidbcloud/terraform-provider-tidbcloud/internal/provider/dedicated" + "github.com/tidbcloud/terraform-provider-tidbcloud/internal/service/dedicated" ) // Ensure the implementation satisfies the provider.Provider interface. From 904bfe19379a19230705a5ebefd9d4fc119f391b Mon Sep 17 00:00:00 2001 From: FingerLeader Date: Thu, 19 Dec 2024 11:56:46 +0800 Subject: [PATCH 6/6] tmp save for creating dedicated cluster Signed-off-by: FingerLeader --- docs/data-sources/clusters.md | 1 + go.mod | 5 - internal/provider/clusters_data_source.go | 4 + .../provider/dedicated_cluster_resource.go | 1333 ++++++++--------- internal/provider/provider.go | 5 +- internal/provider/util.go | 11 + tidbcloud/dedicated_api_client.go | 22 + 7 files changed, 691 insertions(+), 690 deletions(-) diff --git a/docs/data-sources/clusters.md b/docs/data-sources/clusters.md index 4767116..3b9f57c 100644 --- a/docs/data-sources/clusters.md +++ b/docs/data-sources/clusters.md @@ -64,6 +64,7 @@ Read-Only: - `create_timestamp` (String) The creation time of the cluster in Unix timestamp seconds (epoch time). - `id` (String) The ID of the cluster. - `name` (String) The name of the cluster. +- `project_id` (String) The ID of the project. - `region` (String) Region of the cluster. - `status` (Attributes) The status of the cluster. (see [below for nested schema](#nestedatt--items--status)) diff --git a/go.mod b/go.mod index b26b380..890c53b 100644 --- a/go.mod +++ b/go.mod @@ -14,15 +14,10 @@ require ( github.com/hashicorp/terraform-plugin-framework v1.13.0 github.com/hashicorp/terraform-plugin-go v0.25.0 github.com/hashicorp/terraform-plugin-log v0.9.0 -<<<<<<< HEAD - github.com/hashicorp/terraform-plugin-sdk/v2 v2.34.0 - github.com/icholy/digest v0.1.15 -======= github.com/hashicorp/terraform-plugin-sdk/v2 v2.35.0 github.com/hashicorp/terraform-plugin-testing v1.11.0 github.com/icholy/digest v1.0.1 github.com/stretchr/testify v1.9.0 ->>>>>>> main github.com/tidbcloud/tidbcloud-cli/pkg v0.0.0-20241125120734-8e2a11bc41c5 ) diff --git a/internal/provider/clusters_data_source.go b/internal/provider/clusters_data_source.go index 2c36724..a1b9c6c 100644 --- a/internal/provider/clusters_data_source.go +++ b/internal/provider/clusters_data_source.go @@ -118,6 +118,10 @@ func (d *clustersDataSource) Schema(_ context.Context, _ datasource.SchemaReques MarkdownDescription: "The ID of the cluster.", Computed: true, }, + "project_id": schema.StringAttribute{ + MarkdownDescription: "The ID of the project.", + Computed: true, + }, "name": schema.StringAttribute{ MarkdownDescription: "The name of the cluster.", Computed: true, diff --git a/internal/provider/dedicated_cluster_resource.go b/internal/provider/dedicated_cluster_resource.go index 4668ef6..e0fdaf0 100644 --- a/internal/provider/dedicated_cluster_resource.go +++ b/internal/provider/dedicated_cluster_resource.go @@ -3,23 +3,19 @@ package provider import ( "context" "fmt" - "net/http" - "sort" - "strings" "time" - clusterApi "github.com/c4pt0r/go-tidbcloud-sdk-v1/client/cluster" "github.com/hashicorp/terraform-plugin-framework/path" "github.com/hashicorp/terraform-plugin-framework/resource" "github.com/hashicorp/terraform-plugin-framework/resource/schema" - "github.com/hashicorp/terraform-plugin-framework/resource/schema/int64planmodifier" - "github.com/hashicorp/terraform-plugin-framework/resource/schema/objectplanmodifier" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/mapplanmodifier" "github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier" "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringplanmodifier" "github.com/hashicorp/terraform-plugin-framework/types" "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/tidbcloud/terraform-provider-tidbcloud/tidbcloud" + "github.com/tidbcloud/tidbcloud-cli/pkg/tidbcloud/v1beta1/dedicated" ) // const dev = "DEVELOPER" @@ -28,17 +24,21 @@ import ( // // Enum: [AVAILABLE CREATING MODIFYING PAUSED RESUMING UNAVAILABLE IMPORTING MAINTAINING PAUSING] // type clusterStatus string -// const ( -// clusterStatusCreating clusterStatus = "CREATING" -// clusterStatusAvailable clusterStatus = "AVAILABLE" -// clusterStatusModifying clusterStatus = "MODIFYING" -// clusterStatusPaused clusterStatus = "PAUSED" -// clusterStatusResuming clusterStatus = "RESUMING" -// clusterStatusUnavailable clusterStatus = "UNAVAILABLE" -// clusterStatusImporting clusterStatus = "IMPORTING" -// clusterStatusMaintaining clusterStatus = "MAINTAINING" -// clusterStatusPausing clusterStatus = "PAUSING" -// ) +const ( + dedicatedClusterStatusCreating clusterStatus = "CREATING" + dedicatedClusterStatusDeleting clusterStatus = "DELETING" + dedicatedClusterStatusActive clusterStatus = "ACTIVE" + dedicatedClusterStatusRestoring clusterStatus = "RESTORING" + dedicatedClusterStatusMaintenance clusterStatus = "MAINTENANCE" + dedicatedClusterStatusDeleted clusterStatus = "DELETED" + dedicatedClusterStatusInactive clusterStatus = "INACTIVE" + dedicatedClusterStatusUPgrading clusterStatus = "UPGRADING" + dedicatedClusterStatusImporting clusterStatus = "IMPORTING" + dedicatedClusterStatusModifying clusterStatus = "MODIFYING" + dedicatedClusterStatusPausing clusterStatus = "PAUSING" + dedicatedClusterStatusPaused clusterStatus = "PAUSED" + dedicatedClusterStatusResuming clusterStatus = "RESUMING" +) // const ( // clusterServerlessCreateTimeout = 180 * time.Second @@ -50,23 +50,26 @@ import ( // ) type dedicatedClusterResourceData struct { - ProjectId types.String `tfsdk:"project_id"` - ClusterId types.String `tfsdk:"id"` - Name types.String `tfsdk:"name"` - CloudProvider types.String `tfsdk:"cloud_provider"` - RegionId types.String `tfsdk:"region_id"` - Labels map[string]string `tfsdk:"labels"` - RootPassword types.String `tfsdk:"root_password"` - Port types.Int64 `tfsdk:"port"` - Paused types.Bool `tfsdk:"paused"` - PausePlan pausePlan `tfsdk:"pause_plan"` - State types.String `tfsdk:"state"` - Version types.String `tfsdk:"version"` - CreatedBy types.String `tfsdk:"created_by"` - CreateTime types.String `tfsdk:"create_time"` - UpdateTime types.String `tfsdk:"update_time"` - RegionDisplayName types.String `tfsdk:"region_display_name"` - Annotations map[string]string `tfsdk:"annotations"` + ProjectId types.String `tfsdk:"project_id"` + ClusterId types.String `tfsdk:"id"` + Name types.String `tfsdk:"name"` + CloudProvider types.String `tfsdk:"cloud_provider"` + RegionId types.String `tfsdk:"region_id"` + Labels map[string]string `tfsdk:"labels"` + RootPassword types.String `tfsdk:"root_password"` + Port types.Int64 `tfsdk:"port"` + Paused types.Bool `tfsdk:"paused"` + PausePlan *pausePlan `tfsdk:"pause_plan"` + State types.String `tfsdk:"state"` + Version types.String `tfsdk:"version"` + CreatedBy types.String `tfsdk:"created_by"` + CreateTime types.String `tfsdk:"create_time"` + UpdateTime types.String `tfsdk:"update_time"` + RegionDisplayName types.String `tfsdk:"region_display_name"` + Annotations map[string]string `tfsdk:"annotations"` + TiDBNodeSetting tidbNodeSetting `tfsdk:"tidb_node_setting"` + TiKVNodeSetting tikvNodeSetting `tfsdk:"tikv_node_setting"` + TiFlashNodeSetting *tiflashNodeSetting `tfsdk:"tiflash_node_setting"` } type pausePlan struct { @@ -81,14 +84,14 @@ type tidbNodeSetting struct { } type nodeGroup struct { - NodeCount types.Int64 `tfsdk:"node_count"` - TiDBNodeGroupId types.String `tfsdk:"tidb_node_group_id"` - TIDBNodeGroupName types.String `tfsdk:"tidb_node_group_name"` - NodeSpecKey types.String `tfsdk:"node_spec_key"` - NodeSpecDisplayName types.String `tfsdk:"node_spec_display_name"` - IsDefaultGroup types.Bool `tfsdk:"is_default_group"` - State types.String `tfsdk:"state"` - NodeChangingProgress nodeChangingProgress `tfsdk:"node_changing_progress"` + NodeSpecKey types.String `tfsdk:"node_spec_key"` + NodeCount types.Int64 `tfsdk:"node_count"` + NodeGroupId types.String `tfsdk:"node_group_id"` + NodeGroupDisplayName types.String `tfsdk:"node_group_display_name"` + NodeSpecDisplayName types.String `tfsdk:"node_spec_display_name"` + IsDefaultGroup types.Bool `tfsdk:"is_default_group"` + State types.String `tfsdk:"state"` + NodeChangingProgress *nodeChangingProgress `tfsdk:"node_changing_progress"` } type nodeChangingProgress struct { @@ -96,6 +99,24 @@ type nodeChangingProgress struct { RemainingDeletionNodeCount types.Int64 `tfsdk:"remaining_deletion_node_count"` } +type tikvNodeSetting struct { + NodeSpecKey types.String `tfsdk:"node_spec_key"` + NodeCount types.Int64 `tfsdk:"node_count"` + StorageSizeGi types.Int64 `tfsdk:"storage_size_gi"` + StorageType types.String `tfsdk:"storage_type"` + NodeSpecDisplayName types.String `tfsdk:"node_spec_display_name"` + NodeChangingProgress *nodeChangingProgress `tfsdk:"node_changing_progress"` +} + +type tiflashNodeSetting struct { + NodeSpecKey types.String `tfsdk:"node_spec_key"` + NodeCount types.Int64 `tfsdk:"node_count"` + StorageSizeGi types.Int64 `tfsdk:"storage_size_gi"` + StorageType types.String `tfsdk:"storage_type"` + NodeSpecDisplayName types.String `tfsdk:"node_spec_display_name"` + NodeChangingProgress *nodeChangingProgress `tfsdk:"node_changing_progress"` +} + type dedicatedClusterResource struct { provider *tidbcloudProvider } @@ -104,11 +125,11 @@ func NewDedicatedClusterResource() resource.Resource { return &dedicatedClusterResource{} } -func (r *clusterResource) Metadata(_ context.Context, req resource.MetadataRequest, resp *resource.MetadataResponse) { - resp.TypeName = req.ProviderTypeName + "_cluster" +func (r *dedicatedClusterResource) Metadata(_ context.Context, req resource.MetadataRequest, resp *resource.MetadataResponse) { + resp.TypeName = req.ProviderTypeName + "_dedicated_cluster" } -func (r *clusterResource) Configure(_ context.Context, req resource.ConfigureRequest, resp *resource.ConfigureResponse) { +func (r *dedicatedClusterResource) Configure(_ context.Context, req resource.ConfigureRequest, resp *resource.ConfigureResponse) { // Prevent panic if the provider has not been configured. if req.ProviderData == nil { return @@ -121,118 +142,157 @@ func (r *clusterResource) Configure(_ context.Context, req resource.ConfigureReq } } -func (r *clusterResource) Schema(_ context.Context, _ resource.SchemaRequest, resp *resource.SchemaResponse) { +func (r *dedicatedClusterResource) Schema(_ context.Context, _ resource.SchemaRequest, resp *resource.SchemaResponse) { resp.Schema = schema.Schema{ - MarkdownDescription: "cluster resource", + MarkdownDescription: "dedicated cluster resource", Attributes: map[string]schema.Attribute{ "project_id": schema.StringAttribute{ - MarkdownDescription: "The ID of the project. You can get the project ID from [tidbcloud_projects datasource](../data-sources/projects.md).", - Required: true, - }, - "name": schema.StringAttribute{ - MarkdownDescription: "The name of the cluster.", - Required: true, + MarkdownDescription: "The ID of the project.", + Computed: true, }, "id": schema.StringAttribute{ - Computed: true, MarkdownDescription: "The ID of the cluster.", + Computed: true, PlanModifiers: []planmodifier.String{ stringplanmodifier.UseStateForUnknown(), }, }, - "cluster_type": schema.StringAttribute{ - MarkdownDescription: "Enum: \"DEDICATED\" \"DEVELOPER\", The cluster type.", + "name": schema.StringAttribute{ + MarkdownDescription: "The name of the cluster.", Required: true, }, "cloud_provider": schema.StringAttribute{ - MarkdownDescription: "Enum: \"AWS\" \"GCP\", The cloud provider on which your TiDB cluster is hosted.", + MarkdownDescription: "The cloud provider on which your cluster is hosted.", + Computed: true, + }, + "region_id": schema.StringAttribute{ + MarkdownDescription: "The region where the cluster is deployed.", Required: true, }, - "create_timestamp": schema.StringAttribute{ - MarkdownDescription: "The creation time of the cluster in Unix timestamp seconds (epoch time).", + "labels": schema.MapAttribute{ + MarkdownDescription: "A map of labels assigned to the cluster.", + Optional: true, Computed: true, - PlanModifiers: []planmodifier.String{ - stringplanmodifier.UseStateForUnknown(), + PlanModifiers: []planmodifier.Map{ + mapplanmodifier.UseStateForUnknown(), }, + ElementType: types.StringType, }, - "region": schema.StringAttribute{ - MarkdownDescription: "the region value should match the cloud provider's region code. You can get the complete list of available regions from the [tidbcloud_cluster_specs datasource](../data-sources/cluster_specs.md).", - Required: true, + "root_password": schema.StringAttribute{ + MarkdownDescription: "The root password to access the cluster.", + Optional: true, + }, + "port": schema.Int64Attribute{ + MarkdownDescription: "The port used for accessing the cluster.", + Optional: true, + }, + "paused": schema.BoolAttribute{ + MarkdownDescription: "Whether the cluster is paused.", + Optional: true, + }, + "pause_plan": schema.SingleNestedAttribute{ + MarkdownDescription: "Pause plan details for the cluster.", + Optional: true, + Attributes: map[string]schema.Attribute{ + "pause_type": schema.StringAttribute{ + MarkdownDescription: "The type of pause.", + Optional: true, + }, + "scheduled_resume_time": schema.StringAttribute{ + MarkdownDescription: "The scheduled time for resuming the cluster.", + Optional: true, + }, + }, + }, + "state": schema.StringAttribute{ + MarkdownDescription: "The current state of the cluster.", + Computed: true, + }, + "version": schema.StringAttribute{ + MarkdownDescription: "The version of the cluster.", + Computed: true, }, - "status": schema.SingleNestedAttribute{ - MarkdownDescription: "The status of the cluster.", + "created_by": schema.StringAttribute{ + MarkdownDescription: "The creator of the cluster.", Computed: true, - PlanModifiers: []planmodifier.Object{ - clusterResourceStatus(), + }, + "create_time": schema.StringAttribute{ + MarkdownDescription: "The creation time of the cluster.", + Computed: true, + }, + "update_time": schema.StringAttribute{ + MarkdownDescription: "The last update time of the cluster.", + Computed: true, + }, + "region_display_name": schema.StringAttribute{ + MarkdownDescription: "The display name of the region.", + Computed: true, + }, + "annotations": schema.MapAttribute{ + MarkdownDescription: "A map of annotations for the cluster.", + Optional: true, + Computed: true, + PlanModifiers: []planmodifier.Map{ + mapplanmodifier.UseStateForUnknown(), }, + ElementType: types.StringType, + }, + "tidb_node_setting": schema.SingleNestedAttribute{ + MarkdownDescription: "Settings for TiDB nodes.", + Required: true, Attributes: map[string]schema.Attribute{ - "tidb_version": schema.StringAttribute{ - MarkdownDescription: "TiDB version.", - Computed: true, - PlanModifiers: []planmodifier.String{ - stringplanmodifier.UseStateForUnknown(), - }, + "node_spec_key": schema.StringAttribute{ + MarkdownDescription: "The node specification key.", + Required: true, }, - "cluster_status": schema.StringAttribute{ - MarkdownDescription: "Status of the cluster.", - Computed: true, + "node_count": schema.Int64Attribute{ + MarkdownDescription: "The number of nodes in the cluster.", + Required: true, }, - "connection_strings": schema.SingleNestedAttribute{ - MarkdownDescription: "Connection strings.", - Computed: true, - PlanModifiers: []planmodifier.Object{ - objectplanmodifier.UseStateForUnknown(), - }, - Attributes: map[string]schema.Attribute{ - "default_user": schema.StringAttribute{ - MarkdownDescription: "The default TiDB user for connection.", - Computed: true, - PlanModifiers: []planmodifier.String{ - stringplanmodifier.UseStateForUnknown(), + "node_groups": schema.ListNestedAttribute{ + MarkdownDescription: "List of node groups.", + Optional: true, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "node_spec_key": schema.StringAttribute{ + MarkdownDescription: "The node specification key.", + Computed: true, }, - }, - "standard": schema.SingleNestedAttribute{ - MarkdownDescription: "Standard connection string.", - Computed: true, - PlanModifiers: []planmodifier.Object{ - objectplanmodifier.UseStateForUnknown(), + "node_count": schema.Int64Attribute{ + MarkdownDescription: "The number of nodes in the group.", + Required: true, }, - Attributes: map[string]schema.Attribute{ - "host": schema.StringAttribute{ - MarkdownDescription: "The host of standard connection.", - Computed: true, - PlanModifiers: []planmodifier.String{ - stringplanmodifier.UseStateForUnknown(), - }, - }, - "port": schema.Int64Attribute{ - MarkdownDescription: "The TiDB port for connection. The port must be in the range of 1024-65535 except 10080.", - Computed: true, - PlanModifiers: []planmodifier.Int64{ - int64planmodifier.UseStateForUnknown(), - }, - }, + "node_group_id": schema.StringAttribute{ + MarkdownDescription: "The ID of the TiDB node group.", + Computed: true, }, - }, - "vpc_peering": schema.SingleNestedAttribute{ - MarkdownDescription: "VPC peering connection string.", - Computed: true, - PlanModifiers: []planmodifier.Object{ - objectplanmodifier.UseStateForUnknown(), + "node_group_display_name": schema.StringAttribute{ + MarkdownDescription: "The display name of the TiDB node group.", + Computed: true, + }, + "node_spec_display_name": schema.StringAttribute{ + MarkdownDescription: "The display name of the node spec.", + Computed: true, }, - Attributes: map[string]schema.Attribute{ - "host": schema.StringAttribute{ - MarkdownDescription: "The host of VPC peering connection.", - Computed: true, - PlanModifiers: []planmodifier.String{ - stringplanmodifier.UseStateForUnknown(), + "is_default_group": schema.BoolAttribute{ + MarkdownDescription: "Indicates if this is the default group.", + Computed: true, + }, + "state": schema.StringAttribute{ + MarkdownDescription: "The state of the node group.", + Computed: true, + }, + "node_changing_progress": schema.SingleNestedAttribute{ + MarkdownDescription: "Details of node change progress.", + Computed: true, + Attributes: map[string]schema.Attribute{ + "matching_node_spec_node_count": schema.Int64Attribute{ + MarkdownDescription: "Count of nodes matching the specification.", + Computed: true, }, - }, - "port": schema.Int64Attribute{ - MarkdownDescription: "The TiDB port for connection. The port must be in the range of 1024-65535 except 10080.", - Computed: true, - PlanModifiers: []planmodifier.Int64{ - int64planmodifier.UseStateForUnknown(), + "remaining_deletion_node_count": schema.Int64Attribute{ + MarkdownDescription: "Count of nodes remaining to be deleted.", + Computed: true, }, }, }, @@ -241,149 +301,81 @@ func (r *clusterResource) Schema(_ context.Context, _ resource.SchemaRequest, re }, }, }, - "config": schema.SingleNestedAttribute{ - MarkdownDescription: "The configuration of the cluster.", + "tikv_node_setting": schema.SingleNestedAttribute{ + MarkdownDescription: "Settings for TiKV nodes.", Required: true, Attributes: map[string]schema.Attribute{ - "root_password": schema.StringAttribute{ - MarkdownDescription: "The root password to access the cluster. It must be 8-64 characters.", - Optional: true, + "node_spec_key": schema.StringAttribute{ + MarkdownDescription: "The node specification key.", + Required: true, }, - "port": schema.Int64Attribute{ - MarkdownDescription: "The TiDB port for connection. The port must be in the range of 1024-65535 except 10080, 4000 in default.\n" + - " - For a Serverless Tier cluster, only port 4000 is available.", - Optional: true, - Computed: true, - PlanModifiers: []planmodifier.Int64{ - int64planmodifier.UseStateForUnknown(), - }, + "node_count": schema.Int64Attribute{ + MarkdownDescription: "The number of nodes in the cluster.", + Required: true, }, - "paused": schema.BoolAttribute{ - MarkdownDescription: "lag that indicates whether the cluster is paused. true means to pause the cluster, and false means to resume the cluster.\n" + - " - The cluster can be paused only when the cluster_status is \"AVAILABLE\"." + - " - The cluster can be resumed only when the cluster_status is \"PAUSED\".", - Optional: true, + "storage_size_gi": schema.Int64Attribute{ + MarkdownDescription: "The storage size in GiB.", + Required: true, }, - "components": schema.SingleNestedAttribute{ - MarkdownDescription: "The components of the cluster.\n" + - " - For a Serverless Tier cluster, the components value can not be set." + - " - For a Dedicated Tier cluster, the components value must be set.", - Optional: true, - Computed: true, - PlanModifiers: []planmodifier.Object{ - objectplanmodifier.UseStateForUnknown(), - }, + "storage_type": schema.StringAttribute{ + MarkdownDescription: "The storage type.", + Required: true, + }, + "node_spec_display_name": schema.StringAttribute{ + MarkdownDescription: "The display name of the node spec.", + Computed: true, + }, + "node_changing_progress": schema.SingleNestedAttribute{ + MarkdownDescription: "Details of node change progress.", + Computed: true, Attributes: map[string]schema.Attribute{ - "tidb": schema.SingleNestedAttribute{ - MarkdownDescription: "The TiDB component of the cluster", - Required: true, - PlanModifiers: []planmodifier.Object{ - objectplanmodifier.UseStateForUnknown(), - }, - Attributes: map[string]schema.Attribute{ - "node_size": schema.StringAttribute{ - Required: true, - MarkdownDescription: "The size of the TiDB component in the cluster, You can get the available node size of each region from the [tidbcloud_cluster_specs datasource](../data-sources/cluster_specs.md).\n" + - " - If the vCPUs of TiDB or TiKV component is 2 or 4, then their vCPUs need to be the same.\n" + - " - If the vCPUs of TiDB or TiKV component is 2 or 4, then the cluster does not support TiFlash.\n" + - " - Can not modify node_size of an existing cluster.", - PlanModifiers: []planmodifier.String{ - stringplanmodifier.UseStateForUnknown(), - }, - }, - "node_quantity": schema.Int64Attribute{ - MarkdownDescription: "The number of nodes in the cluster. You can get the minimum and step of a node quantity from the [tidbcloud_cluster_specs datasource](../data-sources/cluster_specs.md).", - Required: true, - PlanModifiers: []planmodifier.Int64{ - int64planmodifier.UseStateForUnknown(), - }, - }, - }, - }, - "tikv": schema.SingleNestedAttribute{ - MarkdownDescription: "The TiKV component of the cluster", - Required: true, - PlanModifiers: []planmodifier.Object{ - objectplanmodifier.UseStateForUnknown(), - }, - Attributes: map[string]schema.Attribute{ - "node_size": schema.StringAttribute{ - MarkdownDescription: "The size of the TiKV component in the cluster, You can get the available node size of each region from the [tidbcloud_cluster_specs datasource](../data-sources/cluster_specs.md).\n" + - " - If the vCPUs of TiDB or TiKV component is 2 or 4, then their vCPUs need to be the same.\n" + - " - If the vCPUs of TiDB or TiKV component is 2 or 4, then the cluster does not support TiFlash.\n" + - " - Can not modify node_size of an existing cluster.", - Required: true, - PlanModifiers: []planmodifier.String{ - stringplanmodifier.UseStateForUnknown(), - }, - }, - "storage_size_gib": schema.Int64Attribute{ - MarkdownDescription: "The storage size of a node in the cluster. You can get the minimum and maximum of storage size from the [tidbcloud_cluster_specs datasource](../data-sources/cluster_specs.md).\n" + - " - Can not modify storage_size_gib of an existing cluster.", - Required: true, - PlanModifiers: []planmodifier.Int64{ - int64planmodifier.UseStateForUnknown(), - }, - }, - "node_quantity": schema.Int64Attribute{ - MarkdownDescription: "The number of nodes in the cluster. You can get the minimum and step of a node quantity from the [tidbcloud_cluster_specs datasource](../data-sources/cluster_specs.md).\n" + - " - TiKV do not support decreasing node quantity.\n" + - " - The node_quantity of TiKV must be a multiple of 3.", - Required: true, - PlanModifiers: []planmodifier.Int64{ - int64planmodifier.UseStateForUnknown(), - }, - }, - }, + "matching_node_spec_node_count": schema.Int64Attribute{ + MarkdownDescription: "Count of nodes matching the specification.", + Computed: true, }, - "tiflash": schema.SingleNestedAttribute{ - MarkdownDescription: "The TiFlash component of the cluster.", - Optional: true, - PlanModifiers: []planmodifier.Object{ - objectplanmodifier.UseStateForUnknown(), - }, - Attributes: map[string]schema.Attribute{ - "node_size": schema.StringAttribute{ - MarkdownDescription: "The size of the TiFlash component in the cluster, You can get the available node size of each region from the [tidbcloud_cluster_specs datasource](../data-sources/cluster_specs.md).\n" + - " - Can not modify node_size of an existing cluster.", - Required: true, - PlanModifiers: []planmodifier.String{ - stringplanmodifier.UseStateForUnknown(), - }, - }, - "storage_size_gib": schema.Int64Attribute{ - MarkdownDescription: "The storage size of a node in the cluster. You can get the minimum and maximum of storage size from the [tidbcloud_cluster_specs datasource](../data-sources/cluster_specs.md).\n" + - " - Can not modify storage_size_gib of an existing cluster.", - Required: true, - PlanModifiers: []planmodifier.Int64{ - int64planmodifier.UseStateForUnknown(), - }, - }, - "node_quantity": schema.Int64Attribute{ - MarkdownDescription: "The number of nodes in the cluster. You can get the minimum and step of a node quantity from the [tidbcloud_cluster_specs datasource](../data-sources/cluster_specs.md).\n" + - " - TiFlash do not support decreasing node quantity.", - Required: true, - PlanModifiers: []planmodifier.Int64{ - int64planmodifier.UseStateForUnknown(), - }, - }, - }, + "remaining_deletion_node_count": schema.Int64Attribute{ + MarkdownDescription: "Count of nodes remaining to be deleted.", + Computed: true, }, }, }, - "ip_access_list": schema.ListNestedAttribute{ - MarkdownDescription: "A list of IP addresses and Classless Inter-Domain Routing (CIDR) addresses that are allowed to access the TiDB Cloud cluster via [standard connection](https://docs.pingcap.com/tidbcloud/connect-to-tidb-cluster#connect-via-standard-connection).", - Optional: true, - NestedObject: schema.NestedAttributeObject{ - Attributes: map[string]schema.Attribute{ - "cidr": schema.StringAttribute{ - MarkdownDescription: "The IP address or CIDR range that you want to add to the cluster's IP access list.", - Required: true, - }, - "description": schema.StringAttribute{ - MarkdownDescription: "Description that explains the purpose of the entry.", - Required: true, - }, + }, + }, + "tiflash_node_setting": schema.SingleNestedAttribute{ + MarkdownDescription: "Settings for TiFlash nodes.", + Optional: true, + Attributes: map[string]schema.Attribute{ + "node_spec_key": schema.StringAttribute{ + MarkdownDescription: "The node specification key.", + Required: true, + }, + "node_count": schema.Int64Attribute{ + MarkdownDescription: "The number of nodes in the cluster.", + Required: true, + }, + "storage_size_gi": schema.Int64Attribute{ + MarkdownDescription: "The storage size in GiB.", + Required: true, + }, + "storage_type": schema.StringAttribute{ + MarkdownDescription: "The storage type.", + Required: true, + }, + "node_spec_display_name": schema.StringAttribute{ + MarkdownDescription: "The display name of the node spec.", + Computed: true, + }, + "node_changing_progress": schema.SingleNestedAttribute{ + MarkdownDescription: "Details of node change progress.", + Computed: true, + Attributes: map[string]schema.Attribute{ + "matching_node_spec_node_count": schema.Int64Attribute{ + MarkdownDescription: "Count of nodes matching the specification.", + Computed: true, + }, + "remaining_deletion_node_count": schema.Int64Attribute{ + MarkdownDescription: "Count of nodes remaining to be deleted.", + Computed: true, }, }, }, @@ -393,7 +385,7 @@ func (r *clusterResource) Schema(_ context.Context, _ resource.SchemaRequest, re } } -func (r clusterResource) Create(ctx context.Context, req resource.CreateRequest, resp *resource.CreateResponse) { +func (r dedicatedClusterResource) Create(ctx context.Context, req resource.CreateRequest, resp *resource.CreateResponse) { if !r.provider.configured { resp.Diagnostics.AddError( "Provider not configured", @@ -403,75 +395,34 @@ func (r clusterResource) Create(ctx context.Context, req resource.CreateRequest, } // get data from config - var data clusterResourceData + var data dedicatedClusterResourceData diags := req.Config.Get(ctx, &data) resp.Diagnostics.Append(diags...) if resp.Diagnostics.HasError() { return } - // for Serverless cluster, components is not allowed. or plan and state may be inconsistent - if data.ClusterType == dev { - if data.Config.Components != nil { - resp.Diagnostics.AddError("Create Error", fmt.Sprintf("components is not allowed in %s cluster_type", dev)) - return - } - } - - // for DEDICATED cluster, components is required. - if data.ClusterType == ded { - if data.Config.Components == nil { - resp.Diagnostics.AddError("Create Error", fmt.Sprintf("components is required in %s cluster_type", ded)) - return - } - } - - // write logs using the tflog package - // see https://pkg.go.dev/github.com/hashicorp/terraform-plugin-log/tflog - tflog.Trace(ctx, "created cluster_resource") - createClusterParams := clusterApi.NewCreateClusterParams().WithProjectID(data.ProjectId).WithBody(buildCreateClusterBody(data)) - createClusterResp, err := r.provider.client.CreateCluster(createClusterParams) + tflog.Trace(ctx, "created dedicated_cluster_resource") + body := buildCreateDedicatedClusterBody(data) + cluster, err := r.provider.DedicatedClient.CreateCluster(ctx, &body) if err != nil { resp.Diagnostics.AddError("Create Error", fmt.Sprintf("Unable to call CreateCluster, got error: %s", err)) return } // set clusterId. other computed attributes are not returned by create, they will be set when refresh - clusterId := *createClusterResp.Payload.ID + clusterId := *cluster.ClusterId data.ClusterId = types.StringValue(clusterId) if r.provider.sync { - var cluster *clusterApi.GetClusterOKBody - if data.ClusterType == dev { - tflog.Info(ctx, "wait serverless cluster ready") - cluster, err = WaitClusterReady(ctx, clusterServerlessCreateTimeout, clusterServerlessCreateInterval, data.ProjectId, clusterId, r.provider.client) - if err != nil { - resp.Diagnostics.AddError( - "Cluster creation failed", - fmt.Sprintf("Cluster is not ready, get error: %s", err), - ) - return - } - } else { - tflog.Info(ctx, "wait dedicated cluster ready") - cluster, err = WaitClusterReady(ctx, clusterCreateTimeout, clusterCreateInterval, data.ProjectId, clusterId, r.provider.client) - if err != nil { - resp.Diagnostics.AddError( - "Cluster creation failed", - fmt.Sprintf("Cluster is not ready, get error: %s", err), - ) - return - } - } - refreshClusterResourceData(ctx, cluster, &data) - } else { - // we refresh in create for any unknown value. if someone has other opinions which is better, he can delete the refresh logic - tflog.Trace(ctx, "read cluster_resource") - getClusterParams := clusterApi.NewGetClusterParams().WithProjectID(data.ProjectId).WithClusterID(data.ClusterId.ValueString()) - getClusterResp, err := r.provider.client.GetCluster(getClusterParams) + tflog.Info(ctx, "wait dedicated cluster ready") + cluster, err = WaitDedicatedClusterReady(ctx, clusterCreateTimeout, clusterCreateInterval, clusterId, r.provider.DedicatedClient) if err != nil { - resp.Diagnostics.AddError("Create Error", fmt.Sprintf("Unable to call GetCluster, got error: %s", err)) + resp.Diagnostics.AddError( + "Cluster creation failed", + fmt.Sprintf("Cluster is not ready, get error: %s", err), + ) return } - refreshClusterResourceData(ctx, getClusterResp.Payload, &data) + refreshDedicatedClusterResourceData(ctx, cluster, &data) } // save into the Terraform state. @@ -479,446 +430,466 @@ func (r clusterResource) Create(ctx context.Context, req resource.CreateRequest, resp.Diagnostics.Append(diags...) } -func buildCreateClusterBody(data clusterResourceData) clusterApi.CreateClusterBody { - // required - rootPassWord := data.Config.RootPassword.ValueString() - payload := clusterApi.CreateClusterBody{ - Name: &data.Name, - ClusterType: &data.ClusterType, - CloudProvider: &data.CloudProvider, - Region: &data.Region, - Config: &clusterApi.CreateClusterParamsBodyConfig{ - RootPassword: &rootPassWord, - }, - } +func (r dedicatedClusterResource) Read(ctx context.Context, req resource.ReadRequest, resp *resource.ReadResponse) { + var clusterId string - // optional - if data.Config.Components != nil { - tidb := data.Config.Components.TiDB - tikv := data.Config.Components.TiKV - tiflash := data.Config.Components.TiFlash - - components := &clusterApi.CreateClusterParamsBodyConfigComponents{ - Tidb: &clusterApi.CreateClusterParamsBodyConfigComponentsTidb{ - NodeSize: &tidb.NodeSize, - NodeQuantity: &tidb.NodeQuantity, - }, - Tikv: &clusterApi.CreateClusterParamsBodyConfigComponentsTikv{ - NodeSize: &tikv.NodeSize, - StorageSizeGib: &tikv.StorageSizeGib, - NodeQuantity: &tikv.NodeQuantity, - }, - } - // tiflash is optional - if tiflash != nil { - components.Tiflash = &clusterApi.CreateClusterParamsBodyConfigComponentsTiflash{ - NodeSize: &tiflash.NodeSize, - StorageSizeGib: &tiflash.StorageSizeGib, - NodeQuantity: &tiflash.NodeQuantity, - } - } - - payload.Config.Components = components - } - if data.Config.IPAccessList != nil { - var IPAccessList []*clusterApi.CreateClusterParamsBodyConfigIPAccessListItems0 - for _, key := range data.Config.IPAccessList { - cidr := key.CIDR - IPAccessList = append(IPAccessList, &clusterApi.CreateClusterParamsBodyConfigIPAccessListItems0{ - Cidr: &cidr, - Description: key.Description, - }) - } - payload.Config.IPAccessList = IPAccessList - } - if !data.Config.Port.IsNull() && !data.Config.Port.IsUnknown() { - payload.Config.Port = int32(data.Config.Port.ValueInt64()) - } - - return payload -} - -func (r clusterResource) Read(ctx context.Context, req resource.ReadRequest, resp *resource.ReadResponse) { - var projectId, clusterId string - - resp.Diagnostics.Append(req.State.GetAttribute(ctx, path.Root("project_id"), &projectId)...) - resp.Diagnostics.Append(req.State.GetAttribute(ctx, path.Root("id"), &clusterId)...) + resp.Diagnostics.Append(req.State.GetAttribute(ctx, path.Root("cluster_id"), &clusterId)...) if resp.Diagnostics.HasError() { return } // call read api - tflog.Trace(ctx, "read cluster_resource") - getClusterParams := clusterApi.NewGetClusterParams().WithProjectID(projectId).WithClusterID(clusterId) - getClusterResp, err := r.provider.client.GetCluster(getClusterParams) + tflog.Trace(ctx, "read dedicated_cluster_resource") + cluster, err := r.provider.DedicatedClient.GetCluster(ctx, clusterId) if err != nil { - resp.Diagnostics.AddError("Read Error", fmt.Sprintf("Unable to call GetClusterById, got error: %s", err)) + resp.Diagnostics.AddError("Read Error", fmt.Sprintf("Unable to call GetCluster, got error: %s", err)) return } // refresh data with read result - var data clusterResourceData + var data dedicatedClusterResourceData // root_password, ip_access_list and pause will not return by read api, so we just use state's value even it changed on console! // use types.String in case ImportState method throw unhandled null value var rootPassword types.String - var iPAccessList []ipAccess var paused *bool resp.Diagnostics.Append(req.State.GetAttribute(ctx, path.Root("config").AtName("root_password"), &rootPassword)...) - resp.Diagnostics.Append(req.State.GetAttribute(ctx, path.Root("config").AtName("ip_access_list"), &iPAccessList)...) resp.Diagnostics.Append(req.State.GetAttribute(ctx, path.Root("config").AtName("paused"), &paused)...) - data.Config.RootPassword = rootPassword - data.Config.IPAccessList = iPAccessList - data.Config.Paused = paused - refreshClusterResourceData(ctx, getClusterResp.Payload, &data) + data.RootPassword = rootPassword + data.Paused = types.BoolValue(*paused) + refreshDedicatedClusterResourceData(ctx, cluster, &data) // save into the Terraform state diags := resp.State.Set(ctx, &data) resp.Diagnostics.Append(diags...) } -func refreshClusterResourceData(ctx context.Context, resp *clusterApi.GetClusterOKBody, data *clusterResourceData) { +func refreshDedicatedClusterResourceData(ctx context.Context, resp *dedicated.TidbCloudOpenApidedicatedv1beta1Cluster, data *dedicatedClusterResourceData) { // must return - data.Name = resp.Name - data.ClusterId = types.StringValue(*resp.ID) - data.Region = resp.Region - data.ProjectId = *resp.ProjectID - data.ClusterType = resp.ClusterType - data.CloudProvider = resp.CloudProvider - data.CreateTimestamp = types.StringValue(resp.CreateTimestamp) - data.Config.Port = types.Int64Value(int64(resp.Config.Port)) - tidb := resp.Config.Components.Tidb - tikv := resp.Config.Components.Tikv - data.Config.Components = &components{ - TiDB: &componentTiDB{ - NodeSize: *tidb.NodeSize, - NodeQuantity: *tidb.NodeQuantity, - }, - TiKV: &componentTiKV{ - NodeSize: *tikv.NodeSize, - NodeQuantity: *tikv.NodeQuantity, - StorageSizeGib: *tikv.StorageSizeGib, - }, - } - - var standard connectionStandard - var vpcPeering connectionVpcPeering - if resp.Status.ConnectionStrings.Standard != nil { - standard.Host = resp.Status.ConnectionStrings.Standard.Host - standard.Port = resp.Status.ConnectionStrings.Standard.Port - } - if resp.Status.ConnectionStrings.VpcPeering != nil { - vpcPeering.Host = resp.Status.ConnectionStrings.VpcPeering.Host - vpcPeering.Port = resp.Status.ConnectionStrings.VpcPeering.Port - } - data.Status = &clusterStatusDataSource{ - TidbVersion: resp.Status.TidbVersion, - ClusterStatus: types.StringValue(resp.Status.ClusterStatus), - ConnectionStrings: &connection{ - DefaultUser: resp.Status.ConnectionStrings.DefaultUser, - Standard: &standard, - VpcPeering: &vpcPeering, - }, - } - // may return - tiflash := resp.Config.Components.Tiflash - if tiflash != nil { - data.Config.Components.TiFlash = &componentTiFlash{ - NodeSize: *tiflash.NodeSize, - NodeQuantity: *tiflash.NodeQuantity, - StorageSizeGib: *tiflash.StorageSizeGib, - } - } - - // not return - // IPAccessList, and password and pause will not update for it will not return by read api - -} - -// Update since open api is patch without check for the invalid parameter. we do a lot of check here to avoid inconsistency -// check the date can't be updated -// if plan and state is different, we can execute updated -func (r clusterResource) Update(ctx context.Context, req resource.UpdateRequest, resp *resource.UpdateResponse) { - // get plan - var data clusterResourceData - diags := req.Plan.Get(ctx, &data) - resp.Diagnostics.Append(diags...) - if resp.Diagnostics.HasError() { - return - } - // get state - var state clusterResourceData - diags = req.State.Get(ctx, &state) - resp.Diagnostics.Append(diags...) - if resp.Diagnostics.HasError() { - return - } - - // Severless can not be changed now - if data.ClusterType == dev { - resp.Diagnostics.AddError( - "Update error", - "Unable to update Serverless cluster", - ) - return - } - - // only components and paused can be changed now - if data.Name != state.Name || data.ClusterType != state.ClusterType || data.Region != state.Region || data.CloudProvider != state.CloudProvider || - data.ProjectId != state.ProjectId || data.ClusterId != state.ClusterId { - resp.Diagnostics.AddError( - "Update error", - "You may update the name,cluster_type,region,cloud_provider or projectId. They can not be changed, only components can be changed now", - ) - return - } - if !data.Config.Port.IsNull() && !data.Config.Port.IsNull() && data.Config.Port.ValueInt64() != state.Config.Port.ValueInt64() { - resp.Diagnostics.AddError( - "Update error", - "port can not be changed, only components can be changed now", - ) - return - } - if data.Config.IPAccessList != nil { - // You cannot add an IP access list to an existing cluster without an IP rule. - if len(state.Config.IPAccessList) == 0 { - resp.Diagnostics.AddError( - "Update error", - "ip_access_list can not be added to the existing cluster.", - ) - return - } - - // You cannot insert or delete IP rule. - if len(data.Config.IPAccessList) != len(state.Config.IPAccessList) { - resp.Diagnostics.AddError( - "Update error", - "ip_access_list can not be changed, only components can be changed now", - ) - return - } - - // You cannot update the IP rule. - newIPAccessList := make([]ipAccess, len(data.Config.IPAccessList)) - copy(newIPAccessList, data.Config.IPAccessList) - sort.Slice(newIPAccessList, func(i, j int) bool { - return newIPAccessList[i].CIDR < newIPAccessList[j].CIDR - }) - - currentIPAccessList := make([]ipAccess, len(state.Config.IPAccessList)) - copy(currentIPAccessList, state.Config.IPAccessList) - sort.Slice(currentIPAccessList, func(i, j int) bool { - return currentIPAccessList[i].CIDR < currentIPAccessList[j].CIDR - }) - - for index, key := range newIPAccessList { - if currentIPAccessList[index].CIDR != key.CIDR || currentIPAccessList[index].Description != key.Description { - resp.Diagnostics.AddError( - "Update error", - "ip_access_list can not be changed, only components can be changed now", - ) - return + data.ClusterId = types.StringValue(*resp.ClusterId) + data.Name = types.StringValue(resp.DisplayName) + data.CloudProvider = types.StringValue(string(*resp.CloudProvider)) + data.RegionId = types.StringValue(resp.RegionId) + data.Labels = *resp.Labels + data.Port = types.Int64Value(int64(resp.Port)) + data.State = types.StringValue(string(*resp.State)) + data.Version = types.StringValue(*resp.Version) + data.CreatedBy = types.StringValue(*resp.CreatedBy) + data.CreateTime = types.StringValue(resp.CreateTime.String()) + data.UpdateTime = types.StringValue(resp.UpdateTime.String()) + data.RegionDisplayName = types.StringValue(*resp.RegionDisplayName) + data.Annotations = *resp.Annotations + + // tidb node setting + var tidbNodeCounts int64 + var dataNodeGroups []nodeGroup + for _, g := range resp.TidbNodeSetting.TidbNodeGroups { + var tidbNodeChangingProgress *nodeChangingProgress + if g.NodeChangingProgress != nil { + tidbNodeChangingProgress = &nodeChangingProgress{ + MatchingNodeSpecNodeCount: convertInt32PtrToInt64(g.NodeChangingProgress.MatchingNodeSpecNodeCount), + RemainingDeletionNodeCount: convertInt32PtrToInt64(g.NodeChangingProgress.RemainingDeletionNodeCount), } } - } else { - // You cannot remove the IP access list. - if len(state.Config.IPAccessList) > 0 { - resp.Diagnostics.AddError( - "Update error", - "ip_access_list can not be changed, only components can be changed now", - ) - return - } - } - - // check Components - tidb := data.Config.Components.TiDB - tikv := data.Config.Components.TiKV - tiflash := data.Config.Components.TiFlash - tidbState := state.Config.Components.TiDB - tikvState := state.Config.Components.TiKV - tiflashState := state.Config.Components.TiFlash - if tidb.NodeSize != tidbState.NodeSize { - resp.Diagnostics.AddError( - "Update error", - "tidb node_size can't be changed", - ) - return - } - if tikv.NodeSize != tikvState.NodeSize || tikv.StorageSizeGib != tikvState.StorageSizeGib { - resp.Diagnostics.AddError( - "Update error", - "tikv node_size or storage_size_gib can't be changed", - ) - return + dataNodeGroups = append(dataNodeGroups, nodeGroup{ + NodeSpecKey: types.StringValue(*g.NodeSpecKey), + NodeCount: types.Int64Value(int64(g.NodeCount)), + NodeGroupId: types.StringValue(*g.TidbNodeGroupId), + NodeGroupDisplayName: types.StringValue(*g.DisplayName), + NodeSpecDisplayName: types.StringValue(*g.NodeSpecDisplayName), + IsDefaultGroup: types.BoolValue(bool(*g.IsDefaultGroup)), + State: types.StringValue(string(*g.State)), + NodeChangingProgress: tidbNodeChangingProgress, + }) + tidbNodeCounts += int64(g.NodeCount) } - if tiflash != nil && tiflashState != nil { - // if cluster have tiflash already, then we can't specify NodeSize and StorageSizeGib - if tiflash.NodeSize != tiflashState.NodeSize || tiflash.StorageSizeGib != tiflashState.StorageSizeGib { - resp.Diagnostics.AddError( - "Update error", - "tiflash node_size or storage_size_gib can't be changed", - ) - return - } + data.TiDBNodeSetting = tidbNodeSetting{ + NodeSpecKey: types.StringValue(resp.TidbNodeSetting.NodeSpecKey), + NodeCount: types.Int64Value(tidbNodeCounts), + NodeGroups: dataNodeGroups, } - // build UpdateClusterBody - var updateClusterBody clusterApi.UpdateClusterBody - updateClusterBody.Config = &clusterApi.UpdateClusterParamsBodyConfig{} - // build paused - if data.Config.Paused != nil { - if state.Config.Paused == nil || *data.Config.Paused != *state.Config.Paused { - updateClusterBody.Config.Paused = data.Config.Paused + // tikv node setting + var tikvNodeChangingProgress *nodeChangingProgress + if resp.TikvNodeSetting.NodeChangingProgress != nil { + tikvNodeChangingProgress = &nodeChangingProgress{ + MatchingNodeSpecNodeCount: convertInt32PtrToInt64(resp.TikvNodeSetting.NodeChangingProgress.MatchingNodeSpecNodeCount), + RemainingDeletionNodeCount: convertInt32PtrToInt64(resp.TikvNodeSetting.NodeChangingProgress.RemainingDeletionNodeCount), } } - // build components - var isComponentsChanged = false - if tidb.NodeQuantity != tidbState.NodeQuantity || tikv.NodeQuantity != tikvState.NodeQuantity { - isComponentsChanged = true + data.TiKVNodeSetting = tikvNodeSetting{ + NodeSpecKey: types.StringValue(resp.TikvNodeSetting.NodeSpecKey), + NodeCount: types.Int64Value(int64(resp.TikvNodeSetting.NodeCount)), + StorageSizeGi: types.Int64Value(int64(resp.TikvNodeSetting.StorageSizeGi)), + StorageType: types.StringValue(string(resp.TikvNodeSetting.StorageType)), + NodeSpecDisplayName: types.StringValue(*resp.TikvNodeSetting.NodeSpecDisplayName), + NodeChangingProgress: tikvNodeChangingProgress, } - var componentTiFlash *clusterApi.UpdateClusterParamsBodyConfigComponentsTiflash - if tiflash != nil { - if tiflashState == nil { - isComponentsChanged = true - componentTiFlash = &clusterApi.UpdateClusterParamsBodyConfigComponentsTiflash{ - NodeQuantity: &tiflash.NodeQuantity, - NodeSize: &tiflash.NodeSize, - StorageSizeGib: &tiflash.StorageSizeGib, - } - } else if tiflash.NodeQuantity != tiflashState.NodeQuantity { - isComponentsChanged = true - // NodeSize can't be changed - componentTiFlash = &clusterApi.UpdateClusterParamsBodyConfigComponentsTiflash{ - NodeQuantity: &tiflash.NodeQuantity, + // may return + // tiflash node setting + if resp.TiflashNodeSetting != nil { + var tiflashNodeChangingProgress *nodeChangingProgress + if resp.TiflashNodeSetting.NodeChangingProgress != nil { + tiflashNodeChangingProgress = &nodeChangingProgress{ + MatchingNodeSpecNodeCount: convertInt32PtrToInt64(resp.TiflashNodeSetting.NodeChangingProgress.MatchingNodeSpecNodeCount), + RemainingDeletionNodeCount: convertInt32PtrToInt64(resp.TiflashNodeSetting.NodeChangingProgress.RemainingDeletionNodeCount), } } - } - - if isComponentsChanged { - updateClusterBody.Config.Components = &clusterApi.UpdateClusterParamsBodyConfigComponents{ - Tidb: &clusterApi.UpdateClusterParamsBodyConfigComponentsTidb{ - NodeQuantity: &tidb.NodeQuantity, - }, - Tikv: &clusterApi.UpdateClusterParamsBodyConfigComponentsTikv{ - NodeQuantity: &tikv.NodeQuantity, - }, - Tiflash: componentTiFlash, - } - } - - tflog.Trace(ctx, "update cluster_resource") - updateClusterParams := clusterApi.NewUpdateClusterParams().WithProjectID(data.ProjectId).WithClusterID(data.ClusterId.ValueString()).WithBody(updateClusterBody) - _, err := r.provider.client.UpdateCluster(updateClusterParams) - if err != nil { - resp.Diagnostics.AddError("Update Error", fmt.Sprintf("Unable to call UpdateClusterById, got error: %s", err)) - return - } - - if r.provider.sync { - tflog.Info(ctx, "wait cluster ready") - cluster, err := WaitClusterReady(ctx, clusterUpdateTimeout, clusterUpdateInterval, data.ProjectId, data.ClusterId.ValueString(), r.provider.client) - if err != nil { - resp.Diagnostics.AddError( - "Cluster update failed", - fmt.Sprintf("Cluster is not ready, get error: %s", err), - ) - return - } - refreshClusterResourceData(ctx, cluster, &data) - } else { - // we refresh for any unknown value. if someone has other opinions which is better, he can delete the refresh logic - tflog.Trace(ctx, "read cluster_resource") - getClusterResp, err := r.provider.client.GetCluster(clusterApi.NewGetClusterParams().WithProjectID(data.ProjectId).WithClusterID(data.ClusterId.ValueString())) - if err != nil { - resp.Diagnostics.AddError("Update Error", fmt.Sprintf("Unable to call GetClusterById, got error: %s", err)) - return + data.TiFlashNodeSetting = &tiflashNodeSetting{ + NodeSpecKey: types.StringValue(resp.TiflashNodeSetting.NodeSpecKey), + NodeCount: types.Int64Value(int64(resp.TiflashNodeSetting.NodeCount)), + StorageSizeGi: types.Int64Value(int64(resp.TiflashNodeSetting.StorageSizeGi)), + StorageType: types.StringValue(string(resp.TiflashNodeSetting.StorageType)), + NodeSpecDisplayName: types.StringValue(*resp.TiflashNodeSetting.NodeSpecDisplayName), + NodeChangingProgress: tiflashNodeChangingProgress, } - refreshClusterResourceData(ctx, getClusterResp.Payload, &data) } - // save into the Terraform state. - diags = resp.State.Set(ctx, &data) - resp.Diagnostics.Append(diags...) + // not return + // IPAccessList, and password and pause will not update for it will not return by read api } -func (r clusterResource) Delete(ctx context.Context, req resource.DeleteRequest, resp *resource.DeleteResponse) { - var data clusterResourceData +// Update since open api is patch without check for the invalid parameter. we do a lot of check here to avoid inconsistency +// check the date can't be updated +// if plan and state is different, we can execute updated +func (r dedicatedClusterResource) Update(ctx context.Context, req resource.UpdateRequest, resp *resource.UpdateResponse) { + // // get plan + // var data clusterResourceData + // diags := req.Plan.Get(ctx, &data) + // resp.Diagnostics.Append(diags...) + // if resp.Diagnostics.HasError() { + // return + // } + // // get state + // var state clusterResourceData + // diags = req.State.Get(ctx, &state) + // resp.Diagnostics.Append(diags...) + // if resp.Diagnostics.HasError() { + // return + // } + + // // Severless can not be changed now + // if data.ClusterType == dev { + // resp.Diagnostics.AddError( + // "Update error", + // "Unable to update Serverless cluster", + // ) + // return + // } + + // // only components and paused can be changed now + // if data.Name != state.Name || data.ClusterType != state.ClusterType || data.Region != state.Region || data.CloudProvider != state.CloudProvider || + // data.ProjectId != state.ProjectId || data.ClusterId != state.ClusterId { + // resp.Diagnostics.AddError( + // "Update error", + // "You may update the name,cluster_type,region,cloud_provider or projectId. They can not be changed, only components can be changed now", + // ) + // return + // } + // if !data.Config.Port.IsNull() && !data.Config.Port.IsNull() && data.Config.Port.ValueInt64() != state.Config.Port.ValueInt64() { + // resp.Diagnostics.AddError( + // "Update error", + // "port can not be changed, only components can be changed now", + // ) + // return + // } + // if data.Config.IPAccessList != nil { + // // You cannot add an IP access list to an existing cluster without an IP rule. + // if len(state.Config.IPAccessList) == 0 { + // resp.Diagnostics.AddError( + // "Update error", + // "ip_access_list can not be added to the existing cluster.", + // ) + // return + // } + + // // You cannot insert or delete IP rule. + // if len(data.Config.IPAccessList) != len(state.Config.IPAccessList) { + // resp.Diagnostics.AddError( + // "Update error", + // "ip_access_list can not be changed, only components can be changed now", + // ) + // return + // } + + // // You cannot update the IP rule. + // newIPAccessList := make([]ipAccess, len(data.Config.IPAccessList)) + // copy(newIPAccessList, data.Config.IPAccessList) + // sort.Slice(newIPAccessList, func(i, j int) bool { + // return newIPAccessList[i].CIDR < newIPAccessList[j].CIDR + // }) + + // currentIPAccessList := make([]ipAccess, len(state.Config.IPAccessList)) + // copy(currentIPAccessList, state.Config.IPAccessList) + // sort.Slice(currentIPAccessList, func(i, j int) bool { + // return currentIPAccessList[i].CIDR < currentIPAccessList[j].CIDR + // }) + + // for index, key := range newIPAccessList { + // if currentIPAccessList[index].CIDR != key.CIDR || currentIPAccessList[index].Description != key.Description { + // resp.Diagnostics.AddError( + // "Update error", + // "ip_access_list can not be changed, only components can be changed now", + // ) + // return + // } + // } + // } else { + // // You cannot remove the IP access list. + // if len(state.Config.IPAccessList) > 0 { + // resp.Diagnostics.AddError( + // "Update error", + // "ip_access_list can not be changed, only components can be changed now", + // ) + // return + // } + // } + + // // check Components + // tidb := data.Config.Components.TiDB + // tikv := data.Config.Components.TiKV + // tiflash := data.Config.Components.TiFlash + // tidbState := state.Config.Components.TiDB + // tikvState := state.Config.Components.TiKV + // tiflashState := state.Config.Components.TiFlash + // if tidb.NodeSize != tidbState.NodeSize { + // resp.Diagnostics.AddError( + // "Update error", + // "tidb node_size can't be changed", + // ) + // return + // } + // if tikv.NodeSize != tikvState.NodeSize || tikv.StorageSizeGib != tikvState.StorageSizeGib { + // resp.Diagnostics.AddError( + // "Update error", + // "tikv node_size or storage_size_gib can't be changed", + // ) + // return + // } + // if tiflash != nil && tiflashState != nil { + // // if cluster have tiflash already, then we can't specify NodeSize and StorageSizeGib + // if tiflash.NodeSize != tiflashState.NodeSize || tiflash.StorageSizeGib != tiflashState.StorageSizeGib { + // resp.Diagnostics.AddError( + // "Update error", + // "tiflash node_size or storage_size_gib can't be changed", + // ) + // return + // } + // } + + // // build UpdateClusterBody + // var updateClusterBody clusterApi.UpdateClusterBody + // updateClusterBody.Config = &clusterApi.UpdateClusterParamsBodyConfig{} + // // build paused + // if data.Config.Paused != nil { + // if state.Config.Paused == nil || *data.Config.Paused != *state.Config.Paused { + // updateClusterBody.Config.Paused = data.Config.Paused + // } + // } + // // build components + // var isComponentsChanged = false + // if tidb.NodeQuantity != tidbState.NodeQuantity || tikv.NodeQuantity != tikvState.NodeQuantity { + // isComponentsChanged = true + // } + + // var componentTiFlash *clusterApi.UpdateClusterParamsBodyConfigComponentsTiflash + // if tiflash != nil { + // if tiflashState == nil { + // isComponentsChanged = true + // componentTiFlash = &clusterApi.UpdateClusterParamsBodyConfigComponentsTiflash{ + // NodeQuantity: &tiflash.NodeQuantity, + // NodeSize: &tiflash.NodeSize, + // StorageSizeGib: &tiflash.StorageSizeGib, + // } + // } else if tiflash.NodeQuantity != tiflashState.NodeQuantity { + // isComponentsChanged = true + // // NodeSize can't be changed + // componentTiFlash = &clusterApi.UpdateClusterParamsBodyConfigComponentsTiflash{ + // NodeQuantity: &tiflash.NodeQuantity, + // } + // } + // } + + // if isComponentsChanged { + // updateClusterBody.Config.Components = &clusterApi.UpdateClusterParamsBodyConfigComponents{ + // Tidb: &clusterApi.UpdateClusterParamsBodyConfigComponentsTidb{ + // NodeQuantity: &tidb.NodeQuantity, + // }, + // Tikv: &clusterApi.UpdateClusterParamsBodyConfigComponentsTikv{ + // NodeQuantity: &tikv.NodeQuantity, + // }, + // Tiflash: componentTiFlash, + // } + // } + + // tflog.Trace(ctx, "update cluster_resource") + // updateClusterParams := clusterApi.NewUpdateClusterParams().WithProjectID(data.ProjectId).WithClusterID(data.ClusterId.ValueString()).WithBody(updateClusterBody) + // _, err := r.provider.client.UpdateCluster(updateClusterParams) + // if err != nil { + // resp.Diagnostics.AddError("Update Error", fmt.Sprintf("Unable to call UpdateClusterById, got error: %s", err)) + // return + // } + + // if r.provider.sync { + // tflog.Info(ctx, "wait cluster ready") + // cluster, err := WaitClusterReady(ctx, clusterUpdateTimeout, clusterUpdateInterval, data.ProjectId, data.ClusterId.ValueString(), r.provider.client) + // if err != nil { + // resp.Diagnostics.AddError( + // "Cluster update failed", + // fmt.Sprintf("Cluster is not ready, get error: %s", err), + // ) + // return + // } + // refreshClusterResourceData(ctx, cluster, &data) + // } else { + // // we refresh for any unknown value. if someone has other opinions which is better, he can delete the refresh logic + // tflog.Trace(ctx, "read cluster_resource") + // getClusterResp, err := r.provider.client.GetCluster(clusterApi.NewGetClusterParams().WithProjectID(data.ProjectId).WithClusterID(data.ClusterId.ValueString())) + // if err != nil { + // resp.Diagnostics.AddError("Update Error", fmt.Sprintf("Unable to call GetClusterById, got error: %s", err)) + // return + // } + // refreshClusterResourceData(ctx, getClusterResp.Payload, &data) + // } + + // // save into the Terraform state. + // diags = resp.State.Set(ctx, &data) + // resp.Diagnostics.Append(diags...) + panic("not implemented") +} - diags := req.State.Get(ctx, &data) - resp.Diagnostics.Append(diags...) +func (r dedicatedClusterResource) Delete(ctx context.Context, req resource.DeleteRequest, resp *resource.DeleteResponse) { + var clusterId string + resp.Diagnostics.Append(req.State.GetAttribute(ctx, path.Root("cluster_id"), &clusterId)...) if resp.Diagnostics.HasError() { return } tflog.Trace(ctx, "delete cluster_resource") - _, err := r.provider.client.DeleteCluster(clusterApi.NewDeleteClusterParams().WithProjectID(data.ProjectId).WithClusterID(data.ClusterId.ValueString())) + _, err := r.provider.DedicatedClient.DeleteCluster(ctx, clusterId) if err != nil { - resp.Diagnostics.AddError("Delete Error", fmt.Sprintf("Unable to call DeleteClusterById, got error: %s", err)) + resp.Diagnostics.AddError("Delete Error", fmt.Sprintf("Unable to call DeleteCluster, got error: %s", err)) return } } -func (r clusterResource) ImportState(ctx context.Context, req resource.ImportStateRequest, resp *resource.ImportStateResponse) { - idParts := strings.Split(req.ID, ",") +// func (r clusterResource) ImportState(ctx context.Context, req resource.ImportStateRequest, resp *resource.ImportStateResponse) { +// idParts := strings.Split(req.ID, ",") - if len(idParts) != 2 || idParts[0] == "" || idParts[1] == "" { - resp.Diagnostics.AddError( - "Unexpected Import Identifier", - fmt.Sprintf("Expected import identifier with format: project_id,cluster_id. Got: %q", req.ID), - ) - return - } +// if len(idParts) != 2 || idParts[0] == "" || idParts[1] == "" { +// resp.Diagnostics.AddError( +// "Unexpected Import Identifier", +// fmt.Sprintf("Expected import identifier with format: project_id,cluster_id. Got: %q", req.ID), +// ) +// return +// } - resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root("project_id"), idParts[0])...) - resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root("id"), idParts[1])...) -} +// resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root("project_id"), idParts[0])...) +// resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root("id"), idParts[1])...) +// } -func WaitClusterReady(ctx context.Context, timeout time.Duration, interval time.Duration, projectId, clusterId string, - client tidbcloud.TiDBCloudClient) (*clusterApi.GetClusterOKBody, error) { +func WaitDedicatedClusterReady(ctx context.Context, timeout time.Duration, interval time.Duration, clusterId string, + client tidbcloud.TiDBCloudDedicatedClient) (*dedicated.TidbCloudOpenApidedicatedv1beta1Cluster, error) { stateConf := &retry.StateChangeConf{ Pending: []string{ - string(clusterStatusCreating), - string(clusterStatusModifying), - string(clusterStatusResuming), - string(clusterStatusUnavailable), - string(clusterStatusImporting), - string(clusterStatusPausing), + string(dedicatedClusterStatusCreating), + string(dedicatedClusterStatusModifying), + string(dedicatedClusterStatusResuming), + string(dedicatedClusterStatusImporting), + string(dedicatedClusterStatusPausing), + string(dedicatedClusterStatusUPgrading), }, Target: []string{ - string(clusterStatusAvailable), - string(clusterStatusPaused), - string(clusterStatusMaintaining), + string(dedicatedClusterStatusActive), + string(dedicatedClusterStatusPaused), + string(dedicatedClusterStatusMaintenance), }, Timeout: timeout, MinTimeout: 500 * time.Millisecond, PollInterval: interval, - Refresh: clusterStateRefreshFunc(ctx, projectId, clusterId, client), + Refresh: dedicatedClusterStateRefreshFunc(ctx, clusterId, client), } outputRaw, err := stateConf.WaitForStateContext(ctx) - if output, ok := outputRaw.(*clusterApi.GetClusterOKBody); ok { + if output, ok := outputRaw.(*dedicated.TidbCloudOpenApidedicatedv1beta1Cluster); ok { return output, err } return nil, err } -func clusterStateRefreshFunc(ctx context.Context, projectId, clusterId string, - client tidbcloud.TiDBCloudClient) retry.StateRefreshFunc { +func dedicatedClusterStateRefreshFunc(ctx context.Context, clusterId string, + client tidbcloud.TiDBCloudDedicatedClient) retry.StateRefreshFunc { return func() (interface{}, string, error) { - param := clusterApi.NewGetClusterParams().WithProjectID(projectId).WithClusterID(clusterId).WithContext(ctx) - getClusterResp, err := client.GetCluster(param) + tflog.Trace(ctx, "Waiting for dedicated cluster ready") + cluster, err := client.GetCluster(ctx, clusterId) if err != nil { - tflog.Warn(ctx, fmt.Sprintf("get cluster error: %s", err)) - if getClusterResp != nil && getClusterResp.Code() < http.StatusInternalServerError { - return nil, "", err - } else { - // regard as not found and retry again. Default is 20 times - return nil, "", nil - } + return nil, "", err } - return getClusterResp.Payload, getClusterResp.Payload.Status.ClusterStatus, nil + return cluster, string(*cluster.State), nil + } +} + +func buildCreateDedicatedClusterBody(data dedicatedClusterResourceData) dedicated.TidbCloudOpenApidedicatedv1beta1Cluster { + displayName := data.Name.ValueString() + regionId := data.RegionId.ValueString() + rootPassword := data.RootPassword.ValueString() + version := data.Version.ValueString() + + // tidb node groups + var nodeGroups []dedicated.Dedicatedv1beta1TidbNodeGroup + for _, group := range data.TiDBNodeSetting.NodeGroups { + displayName := group.NodeGroupDisplayName.ValueString() + nodeGroups = append(nodeGroups, dedicated.Dedicatedv1beta1TidbNodeGroup{ + NodeCount: int32(group.NodeCount.ValueInt64()), + DisplayName: &displayName, + }) + } + + // tidb node setting + tidbNodeSpeckKey := data.TiDBNodeSetting.NodeSpecKey.ValueString() + tidbNodeSetting := dedicated.V1beta1ClusterTidbNodeSetting{ + NodeSpecKey: tidbNodeSpeckKey, + TidbNodeGroups: nodeGroups, + } + + // tikv node setting + tikvNodeSpeckKey := data.TiKVNodeSetting.NodeSpecKey.ValueString() + tikvNodeCount := int32(data.TiKVNodeSetting.NodeCount.ValueInt64()) + tikvStorageSizeGi := int32(data.TiKVNodeSetting.StorageSizeGi.ValueInt64()) + tikvStorageType := dedicated.ClusterStorageNodeSettingStorageType(data.TiKVNodeSetting.StorageType.ValueString()) + tikvNodeSetting := dedicated.V1beta1ClusterStorageNodeSetting{ + NodeSpecKey: tikvNodeSpeckKey, + NodeCount: tikvNodeCount, + StorageSizeGi: tikvStorageSizeGi, + StorageType: tikvStorageType, + } + + var tiflashNodeSetting *dedicated.V1beta1ClusterStorageNodeSetting + // tiflash node setting + if data.TiFlashNodeSetting != nil { + tiflashNodeSpeckKey := data.TiFlashNodeSetting.NodeSpecKey.ValueString() + tikvNodeCount := int32(data.TiKVNodeSetting.NodeCount.ValueInt64()) + tiflashStorageSizeGi := int32(data.TiFlashNodeSetting.StorageSizeGi.ValueInt64()) + tiflashStorageType := dedicated.ClusterStorageNodeSettingStorageType(data.TiFlashNodeSetting.StorageType.ValueString()) + tiflashNodeSetting = &dedicated.V1beta1ClusterStorageNodeSetting{ + NodeSpecKey: tiflashNodeSpeckKey, + NodeCount: tikvNodeCount, + StorageSizeGi: tiflashStorageSizeGi, + StorageType: tiflashStorageType, + } + } + + return dedicated.TidbCloudOpenApidedicatedv1beta1Cluster{ + DisplayName: displayName, + RegionId: regionId, + Labels: &data.Labels, + TidbNodeSetting: tidbNodeSetting, + TikvNodeSetting: tikvNodeSetting, + TiflashNodeSetting: tiflashNodeSetting, + Port: int32(data.Port.ValueInt64()), + RootPassword: &rootPassword, + Version: &version, } } diff --git a/internal/provider/provider.go b/internal/provider/provider.go index 6a629e8..0724772 100644 --- a/internal/provider/provider.go +++ b/internal/provider/provider.go @@ -11,7 +11,6 @@ import ( "github.com/hashicorp/terraform-plugin-framework/resource" "github.com/hashicorp/terraform-plugin-framework/types" "github.com/tidbcloud/terraform-provider-tidbcloud/tidbcloud" - "github.com/tidbcloud/terraform-provider-tidbcloud/internal/service/dedicated" ) // Ensure the implementation satisfies the provider.Provider interface. @@ -137,6 +136,7 @@ func (p *tidbcloudProvider) Resources(ctx context.Context) []func() resource.Res NewBackupResource, NewRestoreResource, NewImportResource, + NewDedicatedClusterResource, } } @@ -150,10 +150,7 @@ func (p *tidbcloudProvider) DataSources(ctx context.Context) []func() datasource NewDedicatedRegionsDataSource, NewDedicatedRegionDataSource, -<<<<<<< HEAD -======= NewDedicatedCloudProvidersDataSource, ->>>>>>> main } } diff --git a/internal/provider/util.go b/internal/provider/util.go index 846341c..771083a 100644 --- a/internal/provider/util.go +++ b/internal/provider/util.go @@ -3,6 +3,8 @@ package provider import ( cryptorand "crypto/rand" "math/big" + + "github.com/hashicorp/terraform-plugin-framework/types" ) const ( @@ -16,6 +18,8 @@ const ( UserAgent string = "terraform-provider-tidbcloud" ) +const () + // HookGlobal sets `*ptr = val` and returns a closure for restoring `*ptr` to // its original value. A runtime panic will occur if `val` is not assignable to // `*ptr`. @@ -49,3 +53,10 @@ type Knowable interface { func IsKnown(t Knowable) bool { return !t.IsUnknown() && !t.IsNull() } + +func convertInt32PtrToInt64(v *int32) types.Int64 { + if v == nil { + return types.Int64Null() + } + return types.Int64Value(int64(*v)) +} diff --git a/tidbcloud/dedicated_api_client.go b/tidbcloud/dedicated_api_client.go index 6903cc7..11c9e7c 100644 --- a/tidbcloud/dedicated_api_client.go +++ b/tidbcloud/dedicated_api_client.go @@ -19,6 +19,9 @@ type TiDBCloudDedicatedClient interface { ListRegions(ctx context.Context, cloudProvider string, projectId string) ([]dedicated.Commonv1beta1Region, error) GetRegion(ctx context.Context, regionId string) (*dedicated.Commonv1beta1Region, error) ListCloudProviders(ctx context.Context, projectId string) ([]dedicated.V1beta1RegionCloudProvider, error) + CreateCluster(ctx context.Context, body *dedicated.TidbCloudOpenApidedicatedv1beta1Cluster) (*dedicated.TidbCloudOpenApidedicatedv1beta1Cluster, error) + GetCluster(ctx context.Context, clusterId string) (*dedicated.TidbCloudOpenApidedicatedv1beta1Cluster, error) + DeleteCluster(ctx context.Context, clusterId string) (*dedicated.TidbCloudOpenApidedicatedv1beta1Cluster, error) } func NewDedicatedApiClient(rt http.RoundTripper, dedicatedEndpoint string, userAgent string) (*dedicated.APIClient, error) { @@ -89,6 +92,25 @@ func (d *DedicatedClientDelegate) ListCloudProviders(ctx context.Context, projec return resp.CloudProviders, parseError(err, h) } +func (d *DedicatedClientDelegate) CreateCluster(ctx context.Context, body *dedicated.TidbCloudOpenApidedicatedv1beta1Cluster) (*dedicated.TidbCloudOpenApidedicatedv1beta1Cluster, error) { + r := d.dc.ClusterServiceAPI.ClusterServiceCreateCluster(ctx) + if body != nil { + r = r.Cluster(*body) + } + c, h, err := r.Execute() + return c, parseError(err, h) +} + +func (d *DedicatedClientDelegate) GetCluster(ctx context.Context, clusterId string) (*dedicated.TidbCloudOpenApidedicatedv1beta1Cluster, error) { + resp, h, err := d.dc.ClusterServiceAPI.ClusterServiceGetCluster(ctx, clusterId).Execute() + return resp, parseError(err, h) +} + +func (d *DedicatedClientDelegate) DeleteCluster(ctx context.Context, clusterId string) (*dedicated.TidbCloudOpenApidedicatedv1beta1Cluster, error) { + resp, h, err := d.dc.ClusterServiceAPI.ClusterServiceDeleteCluster(ctx, clusterId).Execute() + return resp, parseError(err, h) +} + func parseError(err error, resp *http.Response) error { defer func() { if resp != nil {