diff --git a/castai/resource_eks_cluster_id.go b/castai/resource_eks_cluster_id.go index c7324990..fb166697 100644 --- a/castai/resource_eks_cluster_id.go +++ b/castai/resource_eks_cluster_id.go @@ -16,6 +16,7 @@ func resourceEKSClusterID() *schema.Resource { CreateContext: resourceEKSClusterIDCreate, ReadContext: resourceEKSClusterIDRead, DeleteContext: resourceEKSClusterIDDelete, + Description: "Retrieve CAST AI clusterid", Schema: map[string]*schema.Schema{ "account_id": { Type: schema.TypeString, diff --git a/castai/resource_eks_cluster_userarn.go b/castai/resource_eks_cluster_userarn.go index 9f6e3e00..c25bbb93 100644 --- a/castai/resource_eks_cluster_userarn.go +++ b/castai/resource_eks_cluster_userarn.go @@ -17,6 +17,7 @@ func resourceEKSClusterUserARN() *schema.Resource { ReadContext: resourceEKSUserARNRead, CreateContext: resourceEKSUserARNCreate, DeleteContext: resourceEKSUserARNDelete, + Description: "Retrieve EKS Cluster user arn", Schema: map[string]*schema.Schema{ EKSClusterUserARNFieldClusterID: { Type: schema.TypeString, diff --git a/castai/resource_eviction_config.go b/castai/resource_eviction_config.go index ad716d87..140e271f 100644 --- a/castai/resource_eviction_config.go +++ b/castai/resource_eviction_config.go @@ -126,14 +126,17 @@ func resourceEvictionConfig() *schema.Resource { FieldEvictionOptionDisabled: { Type: schema.TypeBool, Optional: true, + Description: "Marking pods as removalDisabled", }, FieldEvictionOptionAggressive: { Type: schema.TypeBool, + Description: "Apply Aggressive mode to Evictor", Optional: true, }, FieldEvictionOptionDisposable: { Type: schema.TypeBool, Optional: true, + Description: "Marking node as disposable", }, }, }, diff --git a/examples/data-sources/castai_eks_settings/data-source.tf b/examples/data-sources/castai_eks_settings/data-source.tf new file mode 100644 index 00000000..6b0ea282 --- /dev/null +++ b/examples/data-sources/castai_eks_settings/data-source.tf @@ -0,0 +1,6 @@ +data "castai_eks_settings" "current" { + account_id = data.aws_caller_identity.current.account_id + region = var.cluster_region + cluster = var.cluster_name + vpc = var.vpc +} diff --git a/examples/data-sources/castai_gke_user_policies/data-source.tf b/examples/data-sources/castai_gke_user_policies/data-source.tf new file mode 100644 index 00000000..ba342eba --- /dev/null +++ b/examples/data-sources/castai_gke_user_policies/data-source.tf @@ -0,0 +1 @@ +data "castai_gke_user_policies" "gke" {} diff --git a/examples/data-sources/castai_organization/data-source.tf b/examples/data-sources/castai_organization/data-source.tf new file mode 100644 index 00000000..2b822b79 --- /dev/null +++ b/examples/data-sources/castai_organization/data-source.tf @@ -0,0 +1,3 @@ +data "castai_organization" "dev" { + name = var.castai_dev_organization_name +} diff --git a/examples/resources/castai_autoscaler/resource.tf b/examples/resources/castai_autoscaler/resource.tf new file mode 100644 index 00000000..89d51907 --- /dev/null +++ b/examples/resources/castai_autoscaler/resource.tf @@ -0,0 +1,82 @@ +resource "castai_autoscaler" "castai_autoscaler_policy" { + cluster_id = castai_eks_cluster.castai_cluster.id + + autoscaler_policies_json = var.autoscaler_policies_json + + autoscaler_settings { + enabled = true + is_scoped_mode = false + node_templates_partial_matching_enabled = false + + unschedulable_pods { + enabled = true + custom_instances_enabled = false + + headroom { + enabled = true + cpu_percentage = 20 + memory_percentage = 30 + } + + headroom_spot { + enabled = true + cpu_percentage = 15 + memory_percentage = 25 + } + + node_constraints { + enabled = true + min_cpu_cores = 2 + max_cpu_cores = 8 + min_ram_mib = 2048 + max_ram_mib = 8192 + } + } + + cluster_limits { + enabled = true + + cpu { + min_cores = 1 + max_cores = 10 + } + } + + spot_instances { + enabled = true + max_reclaim_rate = 50 + spot_diversity_enabled = true + spot_diversity_price_increase_limit = 20 + + spot_backups { + enabled = true + spot_backup_restore_rate_seconds = 300 + } + + spot_interruption_predictions { + enabled = true + spot_interruption_predictions_type = "history" + } + } + + node_downscaler { + enabled = true + + empty_nodes { + enabled = true + delay_seconds = 60 + } + + evictor { + enabled = true + dry_run = false + aggressive_mode = false + scoped_mode = false + cycle_interval = 300 + node_grace_period_minutes = 10 + pod_eviction_failure_back_off_interval = 30 + ignore_pod_disruption_budgets = false + } + } + } +} diff --git a/examples/resources/castai_eks_clusterid/resource.tf b/examples/resources/castai_eks_clusterid/resource.tf new file mode 100644 index 00000000..a51979d2 --- /dev/null +++ b/examples/resources/castai_eks_clusterid/resource.tf @@ -0,0 +1,5 @@ +resource "castai_eks_clusterid" "cluster_id" { + account_id = data.aws_caller_identity.current.account_id + region = var.cluster_region + cluster_name = var.cluster_name +} diff --git a/examples/resources/castai_eks_user_arn/resource.tf b/examples/resources/castai_eks_user_arn/resource.tf new file mode 100644 index 00000000..f0a8a958 --- /dev/null +++ b/examples/resources/castai_eks_user_arn/resource.tf @@ -0,0 +1,3 @@ +resource "castai_eks_user_arn" "castai_user_arn" { + cluster_id = castai_eks_clusterid.cluster_id.id +} diff --git a/examples/resources/castai_evictor_advanced_config/resource.tf b/examples/resources/castai_evictor_advanced_config/resource.tf new file mode 100644 index 00000000..0b264d32 --- /dev/null +++ b/examples/resources/castai_evictor_advanced_config/resource.tf @@ -0,0 +1,25 @@ +resource "evictor_advanced_config" "config" { + evictor_advanced_config = [ + { + pod_selector = { + kind = "Job" + namespace = "castai" + match_labels = { + "app.kubernetes.io/name" = "castai-node" + } + }, + aggressive = true + }, + { + node_selector = { + match_expressions = [ + { + key = "pod.cast.ai/flag" + operator = "Exists" + } + ] + }, + disposable = true + } + ] +} diff --git a/examples/resources/castai_node_template/resource.tf b/examples/resources/castai_node_template/resource.tf new file mode 100644 index 00000000..643ae3c9 --- /dev/null +++ b/examples/resources/castai_node_template/resource.tf @@ -0,0 +1,64 @@ +resource "castai_node_template" "this" { + cluster_id = castai_eks_cluster.castai_cluster.id + + name = "my-node-template" + is_default = false + is_enabled = true + configuration_id = "config-id-123" + should_taint = true + + custom_labels = { + env = "production" + } + + custom_taints { + key = "dedicated" + value = "backend" + effect = "NoSchedule" + } + + constraints { + compute_optimized = true + storage_optimized = false + compute_optimized_state = "on" + storage_optimized_state = "off" + is_gpu_only = false + spot = true + on_demand = false + use_spot_fallbacks = true + fallback_restore_rate_seconds = 300 + enable_spot_diversity = true + spot_diversity_price_increase_limit_percent = 20 + spot_interruption_predictions_enabled = true + spot_interruption_predictions_type = "history" + min_cpu = 2 + max_cpu = 8 + min_memory = 4096 + max_memory = 16384 + architectures = ["amd64"] + azs = ["us-east-1a", "us-east-1b"] + burstable_instances = false + customer_specific = false + + instance_families { + include = ["m5", "m6i"] + exclude = ["m4"] + } + + gpu { + manufacturers = ["nvidia"] + include_names = ["p2"] + exclude_names = ["p3"] + min_count = 1 + max_count = 4 + } + + custom_priority { + instance_families = ["m5", "m6i"] + spot = true + on_demand = false + } + } + + depends_on = [castai_autoscaler.castai_autoscaler_policies] +} diff --git a/examples/resources/castai_organization_members/resource.tf b/examples/resources/castai_organization_members/resource.tf new file mode 100644 index 00000000..6bc138b8 --- /dev/null +++ b/examples/resources/castai_organization_members/resource.tf @@ -0,0 +1,19 @@ +data "castai_organization" "dev" { + name = var.castai_dev_organization_name +} + +resource "castai_organization_members" "dev" { + organization_id = data.castai_organization.dev.id + + owners = [ + "owner@test.ai", + ] + + members = [ + "member@test.ai", + ] + + viewers = [] +} + + diff --git a/examples/resources/castai_workload_scaling_policy/resource.tf b/examples/resources/castai_workload_scaling_policy/resource.tf index 6f5b1afb..e90e4ee5 100644 --- a/examples/resources/castai_workload_scaling_policy/resource.tf +++ b/examples/resources/castai_workload_scaling_policy/resource.tf @@ -1,6 +1,6 @@ castai_workload_scaling_policy "services" { name = "services" - cluster_id = castai_gke_cluster.dev.id + cluster_id = castai_eks_cluster.dev.id apply_type = "IMMEDIATE" management_option = "MANAGED" cpu { @@ -14,4 +14,4 @@ castai_workload_scaling_policy "services" { overhead = 0.35 apply_threshold = 0.2 } -} \ No newline at end of file +}