Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

fix: delete cluster from console #239

Merged
merged 12 commits into from
Nov 14, 2023
2 changes: 1 addition & 1 deletion Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,7 @@ test:

testacc:
@echo "==> Running acceptance tests"
TF_ACC=1 go test ./castai/... '-run=^TestAcc' -v -timeout 10m
TF_ACC=1 go test ./castai/... '-run=^TestAcc' -v -timeout 16m

validate-terraform-examples:
for examples in examples/eks examples/gke examples/aks ; do \
Expand Down
43 changes: 28 additions & 15 deletions castai/cluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -37,20 +37,41 @@ func resourceCastaiClusterDelete(ctx context.Context, data *schema.ResourceData,
agentStatus := *clusterResponse.JSON200.AgentStatus
log.Printf("[INFO] Current cluster status=%s, agent_status=%s", clusterStatus, agentStatus)

if clusterStatus == sdk.ClusterStatusDeleted || clusterStatus == sdk.ClusterStatusArchived {
if clusterStatus == sdk.ClusterStatusArchived {
log.Printf("[INFO] Cluster is already deleted, removing from state.")
data.SetId("")
return nil
}

triggerDisconnect := func() *retry.RetryError {
response, err := client.ExternalClusterAPIDisconnectClusterWithResponse(ctx, clusterId, sdk.ExternalClusterAPIDisconnectClusterJSONRequestBody{
DeleteProvisionedNodes: getOptionalBool(data, FieldDeleteNodesOnDisconnect, false),
KeepKubernetesResources: toPtr(true),
})
if checkErr := sdk.CheckOKResponse(response, err); checkErr != nil {
return retry.NonRetryableError(err)
}

return retry.RetryableError(fmt.Errorf("triggered agent disconnection cluster status %s agent status %s", clusterStatus, agentStatus))
}

triggerDelete := func() *retry.RetryError {
log.Printf("[INFO] Deleting cluster.")
if err := sdk.CheckResponseNoContent(client.ExternalClusterAPIDeleteClusterWithResponse(ctx, clusterId)); err != nil {
return retry.NonRetryableError(err)
res, err := client.ExternalClusterAPIDeleteClusterWithResponse(ctx, clusterId)
if res.StatusCode() == 400 {
return triggerDisconnect()
}

if checkErr := sdk.CheckResponseNoContent(res, err); checkErr != nil {
return retry.NonRetryableError(fmt.Errorf("error when deleting cluster status %s agent status %s error: %w", clusterStatus, agentStatus, err))
}
return retry.RetryableError(fmt.Errorf("triggered cluster deletion"))
}

if agentStatus == sdk.ClusterAgentStatusDisconnected || clusterStatus == sdk.ClusterStatusDeleted {
return triggerDelete()
}

// If cluster doesn't have credentials we have to call delete cluster instead of disconnect because disconnect
// will do nothing on cluster with empty credentials.
if toString(clusterResponse.JSON200.CredentialsId) == "" {
Expand All @@ -62,31 +83,23 @@ func resourceCastaiClusterDelete(ctx context.Context, data *schema.ResourceData,
}

if agentStatus == sdk.ClusterAgentStatusDisconnecting {
return retry.RetryableError(fmt.Errorf("agent is disconnecting"))
return retry.RetryableError(fmt.Errorf("agent is disconnecting cluster status %s agent status %s", clusterStatus, agentStatus))
}

if clusterStatus == sdk.ClusterStatusDeleting {
return retry.RetryableError(fmt.Errorf("cluster is deleting"))
return retry.RetryableError(fmt.Errorf("cluster is deleting cluster status %s agent status %s", clusterStatus, agentStatus))
}

if toString(clusterResponse.JSON200.CredentialsId) != "" && agentStatus != sdk.ClusterAgentStatusDisconnected {
log.Printf("[INFO] Disconnecting cluster.")
response, err := client.ExternalClusterAPIDisconnectClusterWithResponse(ctx, clusterId, sdk.ExternalClusterAPIDisconnectClusterJSONRequestBody{
DeleteProvisionedNodes: getOptionalBool(data, FieldDeleteNodesOnDisconnect, false),
KeepKubernetesResources: toPtr(true),
})
if checkErr := sdk.CheckOKResponse(response, err); checkErr != nil {
return retry.NonRetryableError(err)
}

return retry.RetryableError(fmt.Errorf("triggered agent disconnection"))
return triggerDisconnect()
}

if agentStatus == sdk.ClusterAgentStatusDisconnected && clusterStatus != sdk.ClusterStatusDeleted {
return triggerDelete()
}

return retry.RetryableError(fmt.Errorf("retrying"))
return retry.RetryableError(fmt.Errorf("retrying cluster status %s agent status %s", clusterStatus, agentStatus))
})

if err != nil {
Expand Down
2 changes: 1 addition & 1 deletion castai/resource_aks_cluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@ func resourceAKSCluster() *schema.Resource {
Timeouts: &schema.ResourceTimeout{
Create: schema.DefaultTimeout(5 * time.Minute),
Update: schema.DefaultTimeout(1 * time.Minute),
Delete: schema.DefaultTimeout(2 * time.Minute),
Delete: schema.DefaultTimeout(6 * time.Minute),
},

Schema: map[string]*schema.Schema{
Expand Down
2 changes: 1 addition & 1 deletion castai/resource_eks_cluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@ func resourceEKSCluster() *schema.Resource {
Timeouts: &schema.ResourceTimeout{
Create: schema.DefaultTimeout(5 * time.Minute),
Update: schema.DefaultTimeout(1 * time.Minute),
Delete: schema.DefaultTimeout(5 * time.Minute),
Delete: schema.DefaultTimeout(6 * time.Minute),
},

Schema: map[string]*schema.Schema{
Expand Down
Loading