diff --git a/Makefile b/Makefile index 732ee844..f8c084d1 100644 --- a/Makefile +++ b/Makefile @@ -3,7 +3,7 @@ # To re-generate a bundle for another specific version without changing the standard setup, you can: # - use the VERSION as arg of the bundle target (e.g make bundle VERSION=0.0.8) # - use environment variables to overwrite this value (e.g export VERSION=0.0.8) -VERSION ?= 0.5.0 +VERSION ?= 0.5.1 # CHANNELS define the bundle channels used in the bundle. # Add a new line here if you would like to change its default config. (E.g CHANNELS = "candidate,fast,stable") @@ -299,13 +299,22 @@ run-automation-cloning: fi; \ go test $(DEFAULT_CLONING_ROOT)$$folders -v -timeout 90m -.PHONY: run-automation-provisioning +.PHONY: run-automation-provisioning-si DEFAULT_PROVISIONING_ROOT := ./automation/tests/provisioning/ -PROVISIONING_FOLDERS := ... -run-automation-provisioning: +# change this list to every folder but the pg-ha_test one +SI_PROVISIONING_FOLDERS := ... +run-automation-provisioning-si: @read -p "Enter the test directories with spacing to run (mongo-si_test mssql-si_test mysql-si_test pg-si_test). Else all directories will be run: " folders; \ if [ -z "$$folders" ]; then \ - folders="$(PROVISIONING_FOLDERS)"; \ + folders="$(SI_PROVISIONING_FOLDERS)"; \ fi; \ go test $(DEFAULT_PROVISIONING_ROOT)$$folders -v -timeout 90m +.PHONY: run-automation-provisioning-ha +HA_PROVISIONING_FOLDERS := pg-ha_test +run-automation-provisioning-ha: + @read -p "Enter the test directories with spacing to run (pg-ha_test). Else all directories will be run: " folders; \ + if [ -z "$$folders" ]; then \ + folders="$(HA_PROVISIONING_FOLDERS)"; \ + fi; \ + go test $(DEFAULT_PROVISIONING_ROOT)$$folders -v -timeout 90m diff --git a/README.md b/README.md index 958e4e2c..30df93f6 100644 --- a/README.md +++ b/README.md @@ -156,7 +156,9 @@ spec: size: 10 timezone: "UTC" type: postgres - + # isHighAvailability is an optional parameter. In case nothing is specified, it is set to false + isHighAvailability: false + # You can specify any (or none) of these types of profiles: compute, software, network, dbParam # If not specified, the corresponding Out-of-Box (OOB) profile will be used wherever applicable # Name is case-sensitive. ID is the UUID of the profile. Profile should be in the "READY" state @@ -214,6 +216,9 @@ spec: # Cluster id of the cluster where the Database has to be provisioned # Can be fetched from the GET /clusters endpoint clusterId: "Nutanix Cluster Id" + # isHighAvailability is an optional parameter. In case nothing is specified, it is set to false + isHighAvailability: false + # You can specify any (or none) of these types of profiles: compute, software, network, dbParam # If not specified, the corresponding Out-of-Box (OOB) profile will be used wherever applicable # Name is case-sensitive. ID is the UUID of the profile. Profile should be in the "READY" state @@ -285,6 +290,26 @@ additionalArguments: windows_domain_profile_id: # NO Default. Must specify vm_db_server_user. vm_db_server_user: # NO Default. Must specify windows_domain_profile_id. vm_win_license_key: # NO Default. + +# Postgres High Availability +additionalArguments: + listener_port: "1111" # Default: "5432" + failover_mode: "Manual" # Default: "Automatic" + proxy_read_port: "1111" # Default: "5001" + listener_port: "1111" # Default: "5432" + proxy_write_port: "1111" # Default: "5000", + enable_synchronous_mode: "true" # Default: "true", + auto_tune_staging_drive: "false" # Default: true", + backup_policy: "primary_only" # Default: "primary_only" + provision_virtual_ip": "true" # Default: "true" + deploy_haproxy: "true" # Default: "true" + node_type: "haproxy" # Default: "database" + allocate_pg_hugepage: "false" # Default: "true" + cluster_database: "false" # Default: "true" + archive_wal_expire_days: "7" # Default: "-1" + enable_peer_auth: "false" # Default: "true" + cluster_name: "" + patroni_cluster_name: "" ``` Cloning Additional Arguments: diff --git a/api/v1alpha1/database_types.go b/api/v1alpha1/database_types.go index 06247563..e13e484e 100644 --- a/api/v1alpha1/database_types.go +++ b/api/v1alpha1/database_types.go @@ -107,6 +107,24 @@ type Instance struct { // +optional // Additional database engine specific arguments AdditionalArguments map[string]string `json:"additionalArguments"` + // +optional + IsHighAvailability bool `json:"isHighAvailability"` + // +optional + Nodes []*Node `json:"nodes,omitempty"` +} + +type Node struct { + // +optional + VmName string `json:"vmName"` + Properties NodeProperties `json:"properties"` +} + +type NodeProperties struct { + NodeType string `json:"node_type"` + // +optional + Role string `json:"role"` + // +optional + FailoverMode string `json:"failover_mode"` } type Clone struct { diff --git a/api/v1alpha1/database_webhook.go b/api/v1alpha1/database_webhook.go index 6553d4f1..44208df3 100644 --- a/api/v1alpha1/database_webhook.go +++ b/api/v1alpha1/database_webhook.go @@ -97,13 +97,13 @@ func (r *Database) ValidateDelete() (admission.Warnings, error) { } /* Checks if configured additional arguments are valid or not and returns the corresponding additional arguments. If error is nil valid, else invalid */ -func additionalArgumentsValidationCheck(isClone bool, dbType string, specifiedAdditionalArguments map[string]string) error { +func additionalArgumentsValidationCheck(isClone bool, dbType string, isHA bool, specifiedAdditionalArguments map[string]string) error { // Empty additionalArguments is always valid if specifiedAdditionalArguments == nil { return nil } - allowedAdditionalArguments, err := util.GetAllowedAdditionalArguments(isClone, dbType) + allowedAdditionalArguments, err := util.GetAllowedAdditionalArguments(isClone, dbType, isHA) // Invalid type returns error if err != nil { diff --git a/api/v1alpha1/node_helpers.go b/api/v1alpha1/node_helpers.go new file mode 100644 index 00000000..282752f9 --- /dev/null +++ b/api/v1alpha1/node_helpers.go @@ -0,0 +1,59 @@ +package v1alpha1 + +// validate the Node and NodeProperties passed are valid +// e.g validate vmNames being unique, properties correctly defined, etc. +// one day move to common/util + +import ( + "fmt" +) + +var ( + typeOptions = map[string]bool{"database": true, "haproxy": true} + roleOptions = map[string]bool{"primary": true, "secondary": true} + failoverOptions = map[string]bool{"Automatic": true, "Manual": true} +) + +func ValidateNodes(nodes []*Node, isHighAvailability bool) error { + if !isHighAvailability { + return nil + } + + databaseNodeCount := 0 + vmNames := make(map[string]bool) // for validating that vmnames are unique + for _, node := range nodes { + if node.Properties.NodeType == "database" { + databaseNodeCount++ + } + + if err := ValidateNodeProperties(node.Properties); err != nil { + return err + } + + if _, ok := vmNames[node.VmName]; ok { + return fmt.Errorf("vmName %s is already specified", node.VmName) + } + vmNames[node.VmName] = true + } + + if databaseNodeCount < 3 { + return fmt.Errorf("high Availability requires at least 3 nodes database nodes") + } + return nil +} + +func ValidateNodeProperties(np NodeProperties) error { + if !typeOptions[np.NodeType] { + return fmt.Errorf("invalid NodeType in Node Properties: %s", np.NodeType) + } + + if !roleOptions[np.Role] { + return fmt.Errorf("invalid Role in Node Properties: %s", np.Role) + } + + if !failoverOptions[np.FailoverMode] { + return fmt.Errorf("invalid FailoverMode in Node Properties: %s", np.FailoverMode) + } + + return nil +} diff --git a/api/v1alpha1/webhook_helpers.go b/api/v1alpha1/webhook_helpers.go index 45559d8f..8e1cbff3 100644 --- a/api/v1alpha1/webhook_helpers.go +++ b/api/v1alpha1/webhook_helpers.go @@ -91,9 +91,12 @@ func (v *CloningWebhookHandler) validateCreate(spec *DatabaseSpec, errors *field } } - if err := additionalArgumentsValidationCheck(spec.IsClone, clone.Type, clone.AdditionalArguments); err != nil { + // HA is not supported when cloning + isHighAvailability := false + if err := additionalArgumentsValidationCheck(spec.IsClone, clone.Type, isHighAvailability, clone.AdditionalArguments); err != nil { *errors = append(*errors, field.Invalid(clonePath.Child("additionalArguments"), clone.AdditionalArguments, err.Error())) } + databaselog.Info("Exiting validateCreate for clone") } @@ -230,10 +233,15 @@ func (v *ProvisioningWebhookHandler) validateCreate(spec *DatabaseSpec, errors * )) } - if err := additionalArgumentsValidationCheck(spec.IsClone, instance.Type, instance.AdditionalArguments); err != nil { + if err := additionalArgumentsValidationCheck(spec.IsClone, instance.Type, instance.IsHighAvailability, instance.AdditionalArguments); err != nil { *errors = append(*errors, field.Invalid(instancePath.Child("additionalArguments"), instance.AdditionalArguments, err.Error())) } + // Validate nodes for HA + if err := ValidateNodes(instance.Nodes, instance.IsHighAvailability); err != nil { + *errors = append(*errors, field.Invalid(instancePath.Child("nodes"), instance.Nodes, err.Error())) + } + databaselog.Info("Exiting validateCreate for provisioning") } diff --git a/api/v1alpha1/webhook_suite_test.go b/api/v1alpha1/webhook_suite_test.go index 2173c2db..dcf9bc26 100644 --- a/api/v1alpha1/webhook_suite_test.go +++ b/api/v1alpha1/webhook_suite_test.go @@ -60,6 +60,7 @@ const ( CREDENTIAL_SECRET = "database-secret" TIMEZONE = "UTC" SIZE = 10 + HA = false ) func TestAPIs(t *testing.T) { @@ -353,6 +354,139 @@ var _ = Describe("Webhook Tests", func() { Expect(errMsg).To(ContainSubstring(fmt.Sprintf("additional arguments validation for type: %s failed!", common.DATABASE_TYPE_MSSQL))) }) }) + + When("Postgres specified with IsHighAvailability", func() { + It("Should have zero nodes and IsHighAvailability set to true", func() { + db := createDefaultDatabase("db15") + db.Spec.Instance.AdditionalArguments = map[string]string{ + "failover_mode": "Automatic", + "proxy_read_port": "5001", + "listener_port": "5432", + "proxy_write_port": "5000", + "enable_synchronous_mode": "true", + "auto_tune_staging_drive": "true", + "backup_policy": "primary_only", + "provision_virtual_ip": "true", + "deploy_haproxy": "true", + "node_type": "database", + "allocate_pg_hugepage": "false", + "cluster_database": "false", + "archive_wal_expire_days": "-1", + "enable_peer_auth": "false", + "cluster_name": "psqlcluster", + "patroni_cluster_name": "patroni", + } + db.Spec.Instance.IsHighAvailability = true + db.Spec.Instance.Nodes = nil + + err := k8sClient.Create(context.Background(), db) + Expect(err).To(HaveOccurred()) + }) + + It("Should have 5 nodes and IsHighAvailability set to true", func() { + db := createDefaultDatabase("db16") + db.Spec.Instance.AdditionalArguments = map[string]string{ + "failover_mode": "Automatic", + "proxy_read_port": "5001", + "listener_port": "5432", + "proxy_write_port": "5000", + "enable_synchronous_mode": "true", + "auto_tune_staging_drive": "true", + "backup_policy": "primary_only", + "provision_virtual_ip": "true", + "deploy_haproxy": "true", + "node_type": "database", + "allocate_pg_hugepage": "false", + "cluster_database": "false", + "archive_wal_expire_days": "-1", + "enable_peer_auth": "false", + "cluster_name": "psqlcluster", + "patroni_cluster_name": "patroni", + } + primaryProp := createDefaultNodeProperties("database", "primary") + secondaryProp := createDefaultNodeProperties("database", "secondary") + proxyProp := createDefaultNodeProperties("haproxy", "secondary") + db.Spec.Instance.IsHighAvailability = true + db.Spec.Instance.Nodes = []*Node{ + { + VmName: "VM1", + Properties: *primaryProp, + }, + { + VmName: "VM2", + Properties: *secondaryProp, + }, + { + VmName: "VM3", + Properties: *secondaryProp, + }, + { + VmName: "VM4", + Properties: *proxyProp, + }, + { + VmName: "VM5", + Properties: *proxyProp, + }, + } + + err := k8sClient.Create(context.Background(), db) + Expect(err).ToNot(HaveOccurred()) + }) + + It("Should throw error when given 2 nodes", func() { + db := createDefaultDatabase("db17") + db.Spec.Instance.AdditionalArguments = map[string]string{ + "failover_mode": "Automatic", + "proxy_read_port": "5001", + "listener_port": "5432", + "proxy_write_port": "5000", + "enable_synchronous_mode": "true", + "auto_tune_staging_drive": "true", + "backup_policy": "primary_only", + "provision_virtual_ip": "true", + "deploy_haproxy": "true", + "node_type": "database", + "allocate_pg_hugepage": "false", + "cluster_database": "false", + "archive_wal_expire_days": "-1", + "enable_peer_auth": "false", + "cluster_name": "psqlcluster", + "patroni_cluster_name": "patroni", + } + primaryProp := createDefaultNodeProperties("database", "primary") + secondaryProp := createDefaultNodeProperties("database", "secondary") + db.Spec.Instance.IsHighAvailability = true + db.Spec.Instance.Nodes = []*Node{ + { + VmName: "VM1", + Properties: *primaryProp, + }, + { + VmName: "VM2", + Properties: *secondaryProp, + }, + } + + err := k8sClient.Create(context.Background(), db) + Expect(err).To(HaveOccurred()) + }) + + It("Should error out for invalid Postgres additionalArguments and IsHighAvailability set to true", func() { + db := createDefaultDatabase("db18") + db.Spec.Instance.AdditionalArguments = map[string]string{ + "listener_port": "5432", + "invalid": "invalid", + } + db.Spec.Instance.IsHighAvailability = true + + err := k8sClient.Create(context.Background(), db) + Expect(err).To(HaveOccurred()) + errMsg := err.(*errors.StatusError).ErrStatus.Message + Expect(errMsg).To(ContainSubstring(fmt.Sprintf("additional arguments validation for type: %s failed!", common.DATABASE_TYPE_POSTGRES))) + }) + }) + }) Context("Clone checks", func() { @@ -595,6 +729,7 @@ var _ = Describe("Webhook Tests", func() { Expect(errMsg).To(ContainSubstring(fmt.Sprintf("additional arguments validation for type: %s failed!", common.DATABASE_TYPE_MSSQL))) }) }) + }) }) @@ -615,6 +750,7 @@ func createDefaultDatabase(metadataName string) *Database { Type: common.DATABASE_TYPE_POSTGRES, Profiles: &(Profiles{}), AdditionalArguments: map[string]string{}, + IsHighAvailability: HA, }, }, } @@ -643,3 +779,11 @@ func createDefaultClone(metadataName string) *Database { }, } } + +func createDefaultNodeProperties(node_type, role string) *NodeProperties { + return &NodeProperties{ + NodeType: node_type, + Role: role, + FailoverMode: "Automatic", + } +} diff --git a/api/v1alpha1/zz_generated.deepcopy.go b/api/v1alpha1/zz_generated.deepcopy.go index 2a061e22..ff63d5f3 100644 --- a/api/v1alpha1/zz_generated.deepcopy.go +++ b/api/v1alpha1/zz_generated.deepcopy.go @@ -39,6 +39,17 @@ func (in *Clone) DeepCopyInto(out *Clone) { (*out)[key] = val } } + if in.Nodes != nil { + in, out := &in.Nodes, &out.Nodes + *out = make([]*Node, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(Node) + **out = **in + } + } + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Clone. @@ -190,6 +201,17 @@ func (in *Instance) DeepCopyInto(out *Instance) { (*out)[key] = val } } + if in.Nodes != nil { + in, out := &in.Nodes, &out.Nodes + *out = make([]*Node, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(Node) + **out = **in + } + } + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Instance. @@ -314,6 +336,37 @@ func (in *NDBServerStatus) DeepCopy() *NDBServerStatus { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Node) DeepCopyInto(out *Node) { + *out = *in + out.Properties = in.Properties +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Node. +func (in *Node) DeepCopy() *Node { + if in == nil { + return nil + } + out := new(Node) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeProperties) DeepCopyInto(out *NodeProperties) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeProperties. +func (in *NodeProperties) DeepCopy() *NodeProperties { + if in == nil { + return nil + } + out := new(NodeProperties) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Profile) DeepCopyInto(out *Profile) { *out = *in diff --git a/automation/tests/provisioning/pg-ha_test/config/database.yaml b/automation/tests/provisioning/pg-ha_test/config/database.yaml new file mode 100644 index 00000000..675e1e96 --- /dev/null +++ b/automation/tests/provisioning/pg-ha_test/config/database.yaml @@ -0,0 +1,49 @@ +apiVersion: ndb.nutanix.com/v1alpha1 +kind: Database +metadata: + name: db-pg-ha +spec: + ndbRef: ndb-pg + databaseInstance: + Name: db-pg-ha + databaseNames: + - database_one + - database_two + - database_three + clusterId: + credentialSecret: db-secret-pg-ha + size: 10 + timezone: "UTC" + type: postgres + isHighAvailability: true + profiles: {} + timeMachine: + name: db-pg-ha_TM + description: "TM provisioned by operator" + sla : "DEFAULT_OOB_GOLD_SLA" + dailySnapshotTime: "12:34:56" + snapshotsPerDay: 4 + logCatchUpFrequency: 90 + weeklySnapshotDay: "WEDNESDAY" + monthlySnapshotDay: 24 + quarterlySnapshotMonth: "Jan" + additionalArguments: # Optional block, can specify additional arguments that are unique to database engines. + listener_port: "5432" + failover_mode: "Automatic" + deploy_haproxy: "false" + nodes: + - vmName: "test1" + properties: + node_type: database + role: Primary + failover_mode: Automatic + - vmName: "test2" + properties: + node_type: database + role: Secondary + failover_mode: Automatic + - vmName: "test3" + properties: + node_type: database + role: Secondary + failover_mode: Automatic diff --git a/automation/tests/provisioning/pg-ha_test/config/db-secret.yaml b/automation/tests/provisioning/pg-ha_test/config/db-secret.yaml new file mode 100644 index 00000000..84c54aa7 --- /dev/null +++ b/automation/tests/provisioning/pg-ha_test/config/db-secret.yaml @@ -0,0 +1,8 @@ +apiVersion: v1 +kind: Secret +metadata: + name: db-secret-pg-ha +type: Opaque +stringData: + password: + ssh_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCwyAhpllp2WwrUB1aO/0/DN5nIWNXJWQ3ybhuEG4U+kHl8xFFKnPOTDQtTK8UwByoSf6wqIfTr10ESAoHySOpxHk2gyVHVmUmRZ1WFiNR5tW3Q4qbq1qKpIVy1jH9ZRoTJwzg0J33W9W8SZzhM8Nj0nwuDqp6FS8ui7q9H3tgM+9bYYxETTg52NEw7jTVQx6KaZgG+p/8armoYPKh9DGhBYGY3oCmGiOYlm/phSlj3R63qghZIsBXKxeJDEs4cLolQ+9QYoRqqusdEGVCp7Ba/GtUPdBPYdTy+xuXGiALEpsCrqyUstxypHZVJEQfmqS8uy9UB8KFg2YepwhPgX1oN noname diff --git a/automation/tests/provisioning/pg-ha_test/config/ndb-secret.yaml b/automation/tests/provisioning/pg-ha_test/config/ndb-secret.yaml new file mode 100644 index 00000000..f3ac03b0 --- /dev/null +++ b/automation/tests/provisioning/pg-ha_test/config/ndb-secret.yaml @@ -0,0 +1,10 @@ +apiVersion: v1 +kind: Secret +metadata: + name: ndb-secret-pg-ha +type: Opaque +stringData: +# username and password for the test database + username: user1 + password: user1/pwd + ca_certificate: "" diff --git a/automation/tests/provisioning/pg-ha_test/config/ndb.yaml b/automation/tests/provisioning/pg-ha_test/config/ndb.yaml new file mode 100644 index 00000000..c0857802 --- /dev/null +++ b/automation/tests/provisioning/pg-ha_test/config/ndb.yaml @@ -0,0 +1,8 @@ +apiVersion: ndb.nutanix.com/v1alpha1 +kind: NDBServer +metadata: + name: ndb-pg +spec: + credentialSecret: ndb-secret-pg-ha + server: :8443/era/v0.9> + skipCertificateVerification: true diff --git a/automation/tests/provisioning/pg-ha_test/config/pod.yaml b/automation/tests/provisioning/pg-ha_test/config/pod.yaml new file mode 100644 index 00000000..24056a89 --- /dev/null +++ b/automation/tests/provisioning/pg-ha_test/config/pod.yaml @@ -0,0 +1,30 @@ +apiVersion: v1 +kind: Pod +metadata: + name: app-pg-ha + labels: + app: app-pg-ha +spec: + containers: + - name: best-app + image: manavrajvanshinx/best-app:latest + resources: + limits: + memory: 512Mi + cpu: "1" + env: + - name: DBHOST + value: db-pg-ha-svc + - name: DBPORT + value: '80' + - name: PASSWORD + valueFrom: + secretKeyRef: + name: db-secret-pg-ha + key: password + ports: + - containerPort: 3000 + initContainers: + - name: init-db + image: busybox:1.28 + command: ['sh', '-c', "until nslookup db-pg-ha-svc.$(cat /var/run/secrets/kubernetes.io/serviceaccount/namespace).svc.cluster.local; do echo waiting for database service; sleep 2; done"] diff --git a/automation/tests/provisioning/pg-ha_test/pg-ha_test.go b/automation/tests/provisioning/pg-ha_test/pg-ha_test.go new file mode 100644 index 00000000..fcea9964 --- /dev/null +++ b/automation/tests/provisioning/pg-ha_test/pg-ha_test.go @@ -0,0 +1,213 @@ +package postgres_provisoning_ha + +// Basic imports +import ( + "context" + "fmt" + "testing" + "time" + + "github.com/nutanix-cloud-native/ndb-operator/automation" + clientsetv1alpha1 "github.com/nutanix-cloud-native/ndb-operator/automation/clientset/v1alpha1" + util "github.com/nutanix-cloud-native/ndb-operator/automation/util" + "github.com/nutanix-cloud-native/ndb-operator/common" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/suite" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" +) + +// A test suite is a collection of related test cases that are grouped together for testing a specific package or functionality. +// The testify package builds on top of Go's built-in testing package and enhances it with additional features like assertions and test suite management. +// PostgresProvisioningHightAvailabilityInstanceTestSuite is a test suite struct that embeds testify's suite.Suite +type PostgresProvisioningHighAvailabilityTestSuite struct { + suite.Suite + ctx context.Context + setupTypes *util.SetupTypes + v1alpha1ClientSet *clientsetv1alpha1.V1alpha1Client + clientset *kubernetes.Clientset + tsm util.TestSuiteManager +} + +// SetupSuite is called once before running the tests in the suite +func (suite *PostgresProvisioningHighAvailabilityTestSuite) SetupSuite() { + var err error + var config *rest.Config + var ctx context.Context + var v1alpha1ClientSet *clientsetv1alpha1.V1alpha1Client + var clientset *kubernetes.Clientset + var tsm util.TestSuiteManager + + // Setup logger and context + logger, err := util.SetupLogger(fmt.Sprintf("%s/pg-provisioning-ha_test.log", automation.PROVISIONING_LOG_PATH), "pg-provisioning-ha: ") + if err != nil { + suite.T().FailNow() + } + ctx = util.SetupContext(context.Background(), logger) + + logger.Println("SetupSuite() starting...") + errBaseMsg := "Error: SetupSuite() ended" + + // Setup env + if err = util.CheckRequiredEnv(ctx); err != nil { + logger.Printf("%s! %s\n", errBaseMsg, err) + suite.T().FailNow() + } + + // Setup kubeconfig + config, err = util.SetupKubeconfig(ctx) + if err != nil { + logger.Printf("%s! %s\n", errBaseMsg, err) + suite.T().FailNow() + } + + // Setup scheme and clientsets + if v1alpha1ClientSet, clientset, err = util.SetupSchemeAndClientSet(ctx, config); err != nil { + logger.Printf("%s! %s\n", errBaseMsg, err) + suite.T().FailNow() + } + + // Setup yaml types + setupTypes, err := util.SetupTypeTemplates(ctx) + if err != nil { + logger.Printf("%s! %s\n", errBaseMsg, err) + suite.T().FailNow() + } + + // Get test suite manager + tsm = util.GetTestSuiteManager(ctx, *setupTypes) + + // Provision database and wait for database and pod to be ready + if err := tsm.Setup(ctx, setupTypes, clientset, v1alpha1ClientSet, suite.T()); err != nil { + logger.Printf("%s! %s\n", errBaseMsg, err) + suite.T().FailNow() + } + + // Set variables for the entire suite + suite.ctx = ctx + suite.setupTypes = setupTypes + suite.v1alpha1ClientSet = v1alpha1ClientSet + suite.clientset = clientset + suite.tsm = tsm + + logger.Println("SetupSuite() ended!") +} + +// TearDownSuite is called once after running the tests in the suite +func (suite *PostgresProvisioningHighAvailabilityTestSuite) TearDownSuite() { + var err error + + logger := util.GetLogger(suite.ctx) + logger.Println("TearDownSuite() starting...") + errBaseMsg := "Error: SetupSuite() ended" + + // Setup yaml types + setupTypes, err := util.SetupTypeTemplates(suite.ctx) + if err != nil { + logger.Printf("%s! %s\n", errBaseMsg, err) + suite.T().FailNow() + } + + // Delete resources and de-provision database + if err = suite.tsm.TearDown(suite.ctx, setupTypes, suite.clientset, suite.v1alpha1ClientSet, suite.T()); err != nil { + logger.Printf("%s! %s\n", errBaseMsg, err) + suite.T().FailNow() + } + + logger.Println("HA TearDownSuite() completed!") +} + +// This will run right before the test starts and receives the suite and test names as input +func (suite *PostgresProvisioningHighAvailabilityTestSuite) BeforeTest(suiteName, testName string) { + util.GetLogger(suite.ctx).Printf("******************** RUNNING HA TEST %s %s ********************\n", suiteName, testName) +} + +// This will run after test finishes and receives the suite and test names as input +func (suite *PostgresProvisioningHighAvailabilityTestSuite) AfterTest(suiteName, testName string) { + util.GetLogger(suite.ctx).Printf("******************** END HA TEST %s %s ********************\n", suiteName, testName) +} + +// Tests if provisioning is succesful by checking if database status is 'READY' +func (suite *PostgresProvisioningHighAvailabilityTestSuite) TestProvisioningSuccess() { + logger := util.GetLogger(suite.ctx) + + databaseResponse, err := suite.tsm.GetDatabaseOrCloneResponse(suite.ctx, suite.setupTypes, suite.clientset, suite.v1alpha1ClientSet) + if err != nil { + logger.Printf("Error: TestProvisioningSuccess() failed! %v", err) + } else { + logger.Println("Database response retrieved.") + } + + assert := assert.New(suite.T()) + assert.Equal(common.DATABASE_CR_STATUS_READY, databaseResponse.Status, "The database status should be ready.") + assert.Greater(len(databaseResponse.DatabaseNodes), 1) +} + +// Tests if app is able to connect to database via GET request +func (suite *PostgresProvisioningHighAvailabilityTestSuite) TestAppConnectivity() { + logger := util.GetLogger(suite.ctx) + + resp, err := suite.tsm.GetAppResponse(suite.ctx, suite.setupTypes, suite.clientset, automation.POSTGRES_SI_PROVISONING_LOCAL_PORT) + if err != nil { + logger.Printf("Error: TestAppConnectivity failed! %v", err) + } else { + logger.Println("App response retrieved.") + } + + assert := assert.New(suite.T()) + assert.Equal(200, resp.StatusCode, "The response status should be 200.") +} + +// Tests if creation of time machine is succesful +func (suite *PostgresProvisioningHighAvailabilityTestSuite) TestTimeMachineSuccess() { + logger := util.GetLogger(suite.ctx) + assert := assert.New(suite.T()) + + if suite.setupTypes.Database.Spec.Instance.TMInfo.SLAName == "" || suite.setupTypes.Database.Spec.Instance.TMInfo.SLAName == "NONE" { + logger.Println("No time machine specified, test automatically passing.") + return + } + + tm, err := suite.tsm.GetTimemachineResponseByDatabaseId(suite.ctx, suite.setupTypes, suite.clientset, suite.v1alpha1ClientSet) + if err != nil { + logger.Printf("Error: TestTimeMachineSuccess() failed! %v", err) + assert.FailNow("Error: TestTimeMachineSuccess() failed! %v", err) + } else { + logger.Println("Timemachine response retrieved.") + } + + err = util.CheckTmInfo(suite.ctx, suite.setupTypes.Database, tm) + if err != nil { + logger.Printf("Error: TestTimeMachineSuccess() failed! %v", err) + assert.FailNow("Error: TestTimeMachineSuccess() failed! %v", err) + } else { + logger.Println("CheckTmInfo succesful") + } + + assert.Equal(common.DATABASE_CR_STATUS_READY, tm.Status, "The tm status should be ready.") +} + +// In order for 'go test' to run this suite, we need to create +// a normal test function and pass our suite to suite.Run +func TestPostgresProvisioningHighAvailabilityTestSuite(t *testing.T) { + suite.Run(t, new(PostgresProvisioningHighAvailabilityTestSuite)) +} + +// BeforeTestLogTime will run right before the test starts and logs the start time of the test. +func (suite *PostgresProvisioningHighAvailabilityTestSuite) BeforeTestLogTime(suiteName, testName string) { + logger := util.GetLogger(suite.ctx) + startTime := time.Now() + // Store the start time in the context for use in AfterTestLogTime + ctx := context.WithValue(suite.ctx, "startTime", startTime) + suite.ctx = ctx + logger.Printf("******************** STARTING HA TEST %s %s at %v ********************\n", suiteName, testName, startTime) +} + +// AfterTestLogTime will run after the test finishes and calculates the duration of the test. +func (suite *PostgresProvisioningHighAvailabilityTestSuite) AfterTestLogTime(suiteName, testName string) { + logger := util.GetLogger(suite.ctx) + startTime := suite.ctx.Value("startTime").(time.Time) + endTime := time.Now() + duration := endTime.Sub(startTime) + logger.Printf("******************** ENDING HA TEST %s %s at %v (Duration: %v) ********************\n", suiteName, testName, endTime, duration) +} diff --git a/common/util/additionalArguments.go b/common/util/additionalArguments.go index 74ffa5fb..6be911ac 100644 --- a/common/util/additionalArguments.go +++ b/common/util/additionalArguments.go @@ -10,11 +10,12 @@ import ( // 1. A map where the keys are the allowed additional arguments for the database type, and the corresponding values indicates whether the key is an action argument (where true=yes and false=no). // Currently, all additional arguments are action arguments but this might not always be the case, thus this distinction is made so actual action arguments are appended to the appropriate provisioning body property. // 2. An error if there is no allowed additional arguments for the corresponding type, in other words, if the dbType is not MSSQL, MongoDB, PostGres, or MYSQL. Else nil. -func GetAllowedAdditionalArguments(isClone bool, dbType string) (map[string]bool, error) { + +func GetAllowedAdditionalArguments(isClone bool, dbType string, isHa bool) (map[string]bool, error) { if isClone { return GetAllowedAdditionalArgumentsForClone(dbType) } else { - return GetAllowedAdditionalArgumentsForDatabase(dbType) + return GetAllowedAdditionalArgumentsForDatabase(dbType, isHa) } } @@ -79,7 +80,7 @@ func GetAllowedAdditionalArgumentsForClone(dbType string) (map[string]bool, erro } } -func GetAllowedAdditionalArgumentsForDatabase(dbType string) (map[string]bool, error) { +func GetAllowedAdditionalArgumentsForDatabase(dbType string, isHA bool) (map[string]bool, error) { switch dbType { case common.DATABASE_TYPE_MSSQL: return map[string]bool{ @@ -104,10 +105,34 @@ func GetAllowedAdditionalArgumentsForDatabase(dbType string) (map[string]bool, e "journal_size": true, }, nil case common.DATABASE_TYPE_POSTGRES: - return map[string]bool{ - /* Has a default */ - "listener_port": true, - }, nil + if isHA { + return map[string]bool{ + /* Has a default */ + "listener_port": true, + "proxy_read_port": true, + "proxy_write_port": true, + "enable_synchronous_mode": true, + "auto_tune_staging_drive": true, + "backup_policy": true, + "db_password": true, + "database_names": true, + "provision_virtual_ip": true, + "deploy_haproxy": true, + "failover_mode": true, + "node_type": true, + "allocate_pg_hugepage": true, + "cluster_database": true, + "archive_wal_expire_days": true, + "enable_peer_auth": true, + "cluster_name": true, + "patroni_cluster_name": true, + }, nil + } else { + return map[string]bool{ + /* Has a default */ + "listener_port": true, + }, nil + } case common.DATABASE_TYPE_MYSQL: return map[string]bool{ "listener_port": true, diff --git a/config/crd/bases/ndb.nutanix.com_databases.yaml b/config/crd/bases/ndb.nutanix.com_databases.yaml index 67749d0e..12c424ac 100644 --- a/config/crd/bases/ndb.nutanix.com_databases.yaml +++ b/config/crd/bases/ndb.nutanix.com_databases.yaml @@ -69,9 +69,31 @@ spec: description: description: Description of the clone instance type: string + isHighAvailability: + type: boolean name: description: Name of the clone instance type: string + nodes: + items: + properties: + properties: + properties: + failover_mode: + type: string + node_type: + type: string + role: + type: string + required: + - node_type + type: object + vmName: + type: string + required: + - properties + type: object + type: array profiles: properties: compute: @@ -155,9 +177,31 @@ spec: description: description: Description of the database instance type: string + isHighAvailability: + type: boolean name: description: Name of the database instance type: string + nodes: + items: + properties: + properties: + properties: + failover_mode: + type: string + node_type: + type: string + role: + type: string + required: + - node_type + type: object + vmName: + type: string + required: + - properties + type: object + type: array profiles: properties: compute: diff --git a/config/default/manager_auth_proxy_patch.yaml b/config/default/manager_auth_proxy_patch.yaml index bd5c11b8..b82a310c 100644 --- a/config/default/manager_auth_proxy_patch.yaml +++ b/config/default/manager_auth_proxy_patch.yaml @@ -16,7 +16,7 @@ spec: capabilities: drop: - "ALL" - image: gcr.io/kubebuilder/kube-rbac-proxy:v0.15.0 + image: gcr.io/kubebuilder/kube-rbac-proxy:v0.16.0 args: - "--secure-listen-address=0.0.0.0:8443" - "--upstream=http://127.0.0.1:8080/" diff --git a/config/manager/kustomization.yaml b/config/manager/kustomization.yaml index 4410e1b2..e3356666 100644 --- a/config/manager/kustomization.yaml +++ b/config/manager/kustomization.yaml @@ -13,4 +13,4 @@ kind: Kustomization images: - name: controller newName: ghcr.io/nutanix-cloud-native/ndb-operator/controller - newTag: v0.5.0 + newTag: v0.5.1 diff --git a/controller_adapters/database.go b/controller_adapters/database.go index c2bc4506..46bde5e3 100644 --- a/controller_adapters/database.go +++ b/controller_adapters/database.go @@ -145,6 +145,21 @@ func (d *Database) GetInstanceSize() int { return d.Spec.Instance.Size } +func (d *Database) GetInstanceIsHighAvailability() bool { + if d.IsClone() { + // Clone doesn't currently support HA + return false + } + return d.Spec.Instance.IsHighAvailability +} + +func (d *Database) GetInstanceNodes() []*v1alpha1.Node { + if d.IsClone() { + return d.Spec.Instance.Nodes + } + return d.Spec.Instance.Nodes +} + // Returns basic details about the Time Machine if provided in the // underlying database, else returns defaults like: // TM Name: _TM diff --git a/controller_adapters/database_test.go b/controller_adapters/database_test.go index 60a91bde..a0de35a0 100644 --- a/controller_adapters/database_test.go +++ b/controller_adapters/database_test.go @@ -231,6 +231,30 @@ func TestDatabase_GetInstanceSize(t *testing.T) { }) } +// Tests the GetInstanceIsHighAvailability() function retrieves Size correctly: +func TestDatabase_GetInstanceIsHighAvailability(t *testing.T) { + + name := "Contains IsHighAvailability" + database := Database{ + Database: v1alpha1.Database{ + Spec: v1alpha1.DatabaseSpec{ + Instance: &v1alpha1.Instance{ + IsHighAvailability: true, + }, + }, + }, + } + wantIsHighAvailability := true + + t.Run(name, func(t *testing.T) { + + gotIsHighAvailability := database.GetInstanceIsHighAvailability() + if gotIsHighAvailability != wantIsHighAvailability { + t.Errorf("Database.GetInstanceIsHighAvailability() gotIsHighAvailability= %v, want %v", gotIsHighAvailability, wantIsHighAvailability) + } + }) +} + // Tests the GetClusterId() function retrieves ClusterId correctly: func TestDatabase_GetClusterId(t *testing.T) { diff --git a/ndb_api/clone_helpers.go b/ndb_api/clone_helpers.go index 85848ad3..70fb4376 100644 --- a/ndb_api/clone_helpers.go +++ b/ndb_api/clone_helpers.go @@ -85,7 +85,7 @@ func GenerateCloningRequest(ctx context.Context, ndb_client ndb_client.NDBClient NetworkProfileId: profilesMap[common.PROFILE_TYPE_NETWORK].Id, NewDbServerTimeZone: "", NxClusterId: database.GetClusterId(), - Properties: make([]string, 0), + Properties: make([]map[string]string, 0), }, }, // Added by request appenders as per the engine @@ -96,8 +96,11 @@ func GenerateCloningRequest(ctx context.Context, ndb_client ndb_client.NDBClient NetworkProfileId: profilesMap[common.PROFILE_TYPE_NETWORK].Id, DatabaseParameterProfileId: profilesMap[common.PROFILE_TYPE_DATABASE_PARAMETER].Id, } + // boolean for high availability; unavailable for cloning + isHighAvailability := false + // Appending request body based on database type - appender, err := GetRequestAppender(databaseType) + appender, err := GetRequestAppender(databaseType, isHighAvailability) if err != nil { log.Error(err, "Error while getting a request appender") return @@ -210,6 +213,36 @@ func (a *PostgresRequestAppender) appendCloningRequest(req *DatabaseCloneRequest return req, nil } +func (a *PostgresHARequestAppender) appendCloningRequest(req *DatabaseCloneRequest, database DatabaseInterface, reqData map[string]interface{}) (*DatabaseCloneRequest, error) { + req.SSHPublicKey = reqData[common.NDB_PARAM_SSH_PUBLIC_KEY].(string) + dbPassword := reqData[common.NDB_PARAM_PASSWORD].(string) + + req.NodeCount = len(database.GetInstanceNodes()) + + // Default action arguments + actionArguments := map[string]string{ + /* Non-Configurable from additionalArguments*/ + "vm_name": database.GetName(), + "dbserver_description": "DB Server VM for " + database.GetName(), + "db_password": dbPassword, + } + + // Appending/overwriting database actionArguments to actionArguments + if err := setConfiguredActionArguments(database, actionArguments); err != nil { + return nil, err + } + + // Converting action arguments map to list and appending to req.ActionArguments + req.ActionArguments = append(req.ActionArguments, convertMapToActionArguments(actionArguments)...) + + // Appending LCMConfig Details if specified + if err := appendLCMConfigDetailsToRequest(req, database.GetAdditionalArguments()); err != nil { + return nil, err + } + + return req, nil +} + func (a *MySqlRequestAppender) appendCloningRequest(req *DatabaseCloneRequest, database DatabaseInterface, reqData map[string]interface{}) (*DatabaseCloneRequest, error) { req.SSHPublicKey = reqData[common.NDB_PARAM_SSH_PUBLIC_KEY].(string) dbPassword := reqData[common.NDB_PARAM_PASSWORD].(string) diff --git a/ndb_api/common_helpers.go b/ndb_api/common_helpers.go index 9eb1d8b7..3d2cbd48 100644 --- a/ndb_api/common_helpers.go +++ b/ndb_api/common_helpers.go @@ -134,12 +134,16 @@ func GetDatabasePortByType(dbType string) int32 { } // Get specific implementation of the DBProvisionRequestAppender interface based on the provided databaseType -func GetRequestAppender(databaseType string) (requestAppender RequestAppender, err error) { +func GetRequestAppender(databaseType string, isHighAvailability bool) (requestAppender RequestAppender, err error) { switch databaseType { case common.DATABASE_TYPE_MYSQL: requestAppender = &MySqlRequestAppender{} case common.DATABASE_TYPE_POSTGRES: - requestAppender = &PostgresRequestAppender{} + if isHighAvailability { + requestAppender = &PostgresHARequestAppender{} + } else { + requestAppender = &PostgresRequestAppender{} + } case common.DATABASE_TYPE_MONGODB: requestAppender = &MongoDbRequestAppender{} case common.DATABASE_TYPE_MSSQL: diff --git a/ndb_api/common_helpers_test.go b/ndb_api/common_helpers_test.go index bc95c376..e3cc4964 100644 --- a/ndb_api/common_helpers_test.go +++ b/ndb_api/common_helpers_test.go @@ -242,7 +242,7 @@ func TestGetRequestAppender(t *testing.T) { } for _, tc := range testCases { - result, err := GetRequestAppender(tc.databaseType) + result, err := GetRequestAppender(tc.databaseType, false) if tc.expectedResult { assert.NotNil(t, result) assert.NoError(t, err) diff --git a/ndb_api/common_types.go b/ndb_api/common_types.go index 26c57a91..1dae7ed0 100644 --- a/ndb_api/common_types.go +++ b/ndb_api/common_types.go @@ -88,12 +88,12 @@ type ActionArgument struct { } type Node struct { - VmName string `json:"vmName"` - ComputeProfileId string `json:"computeProfileId,omitempty"` - NetworkProfileId string `json:"networkProfileId,omitempty"` - NewDbServerTimeZone string `json:"newDbServerTimeZone,omitempty"` - NxClusterId string `json:"nxClusterId,omitempty"` - Properties []string `json:"properties"` + VmName string `json:"vmName"` + ComputeProfileId string `json:"computeProfileId,omitempty"` + NetworkProfileId string `json:"networkProfileId,omitempty"` + NewDbServerTimeZone string `json:"newDbServerTimeZone,omitempty"` + NxClusterId string `json:"nxClusterId,omitempty"` + Properties []map[string]string `json:"properties"` } type Property struct { diff --git a/ndb_api/db_helpers.go b/ndb_api/db_helpers.go index 0af5412a..0011fbad 100644 --- a/ndb_api/db_helpers.go +++ b/ndb_api/db_helpers.go @@ -20,6 +20,7 @@ import ( "context" "errors" "fmt" + "github.com/nutanix-cloud-native/ndb-operator/api/v1alpha1" "strconv" "github.com/nutanix-cloud-native/ndb-operator/common" @@ -92,7 +93,7 @@ func GenerateProvisioningRequest(ctx context.Context, ndb_client *ndb_client.NDB }, Nodes: []Node{ { - Properties: make([]string, 0), + Properties: make([]map[string]string, 0), VmName: database.GetName() + "_VM", }, }, @@ -109,7 +110,7 @@ func GenerateProvisioningRequest(ctx context.Context, ndb_client *ndb_client.NDB } // Appending request body based on database type - appender, err := GetRequestAppender(database.GetInstanceType()) + appender, err := GetRequestAppender(database.GetInstanceType(), database.GetInstanceIsHighAvailability()) if err != nil { log.Error(err, "Error while appending provisioning request") return @@ -189,7 +190,7 @@ func setConfiguredActionArguments(database DatabaseInterface, actionArguments ma return fmt.Errorf("%s! Action arguments cannot be nil", errMsgRoot) } - allowedAdditionalArguments, err := util.GetAllowedAdditionalArguments(database.IsClone(), database.GetInstanceType()) + allowedAdditionalArguments, err := util.GetAllowedAdditionalArguments(database.IsClone(), database.GetInstanceType(), database.GetInstanceIsHighAvailability()) if err != nil { return fmt.Errorf("%s! %s", errMsgRoot, err.Error()) } @@ -304,6 +305,206 @@ func (a *PostgresRequestAppender) appendProvisioningRequest(req *DatabaseProvisi return req, nil } +func (a *PostgresHARequestAppender) appendProvisioningRequest(req *DatabaseProvisionRequest, database DatabaseInterface, reqData map[string]interface{}) (*DatabaseProvisionRequest, error) { + dbPassword := reqData[common.NDB_PARAM_PASSWORD].(string) + databaseNames := database.GetInstanceDatabaseNames() + req.SSHPublicKey = reqData[common.NDB_PARAM_SSH_PUBLIC_KEY].(string) + // Set the number of nodes to 5, 3 Postgres nodes + 2 HA Proxy nodes + err := setNodesParameters(req, database) + if err != nil { + return nil, err + } + + req.Clustered = true + + // Default action arguments + actionArguments := defaultActionArgumentsforHAProvisioning(database, dbPassword, databaseNames) + + // Appending/overwriting database actionArguments to actionArguments + if err := setConfiguredActionArguments(database, actionArguments); err != nil { + return nil, err + } + // Converting action arguments map to list and appending to req.ActionArguments + req.ActionArguments = append(req.ActionArguments, convertMapToActionArguments(actionArguments)...) + + return req, nil +} + +func setNodesParameters(req *DatabaseProvisionRequest, database DatabaseInterface) (nodeErrors error) { + // Clear the original req.Nodes array + req.Nodes = []Node{} + if database.GetAdditionalArguments()["cluster_name"] == "" { + database.GetAdditionalArguments()["cluster_name"] = "postgresHaCluster" + } + // Validate node counts + nodesRequested := database.GetInstanceNodes() + if len(nodesRequested) == 0 { + nodesRequested = createDefaultNodes(database) + } + proxyNodeCount := 0 + req.NodeCount = len(nodesRequested) + primaryNodeCount, databaseNodeCount := getNodeCounts(nodesRequested) + if primaryNodeCount > 1 { + return fmt.Errorf("invalid nodes: HA instance can only have one primary node") + } + const MinReqDatabaseNodes = 3 + if databaseNodeCount < MinReqDatabaseNodes { + return fmt.Errorf("invalid node count: HA instance needs at least %d nodes, given: %d", MinReqDatabaseNodes, databaseNodeCount) + } + + for i := 0; i < req.NodeCount; i++ { + currentNode := nodesRequested[i] + + if currentNode.Properties.NodeType != "database" && currentNode.Properties.NodeType != "haproxy" { + return fmt.Errorf("invalid node type: %s", currentNode.Properties.NodeType) + } + if currentNode.Properties.NodeType == "database" { + if databaseNodeCount == 0 && primaryNodeCount == 0 && currentNode.Properties.Role == "" { + currentNode.Properties.Role = "Primary" + } + databaseNodeCount++ + if currentNode.VmName == "" { + defaultDatabaseName := database.GetAdditionalArguments()["cluster_name"] + "-" + strconv.Itoa(databaseNodeCount+1) + currentNode.VmName = defaultDatabaseName + } + if currentNode.Properties.Role == "" { + currentNode.Properties.Role = "Secondary" + } + } + if currentNode.Properties.NodeType == "haproxy" { + proxyNodeCount++ + if currentNode.VmName == "" { + defaultDatabaseName := database.GetAdditionalArguments()["cluster_name"] + "_haproxy" + strconv.Itoa(proxyNodeCount+1) + currentNode.VmName = defaultDatabaseName + } + } + isPrimaryNode := currentNode.Properties.NodeType == "database" && currentNode.Properties.Role == "Primary" + if isPrimaryNode { + primaryNodeCount += 1 + } + + props := make([]map[string]string, 4) + props[0] = map[string]string{ + "name": "role", + "value": currentNode.Properties.Role, + } + props[1] = map[string]string{ + "name": "failover_mode", + "value": currentNode.Properties.FailoverMode, + } + props[2] = map[string]string{ + "name": "node_type", + "value": currentNode.Properties.NodeType, + } + props[3] = map[string]string{ + "name": "remove_archive_destination", + "value": database.GetAdditionalArguments()["remove_archive_destination"], + } + req.Nodes = append(req.Nodes, Node{ + Properties: props, + VmName: currentNode.VmName, + NxClusterId: database.GetClusterId(), + NetworkProfileId: req.NetworkProfileId, + ComputeProfileId: req.ComputeProfileId, + }) + } + return nil +} + +func createDefaultNodes(database DatabaseInterface) []*v1alpha1.Node { + nodes := make([]*v1alpha1.Node, 0) + deployProxy := database.GetAdditionalArguments()["deploy_haproxy"] == "" || database.GetAdditionalArguments()["deploy_haproxy"] == "true" + if deployProxy { + nodes = append(nodes, &v1alpha1.Node{ + VmName: database.GetAdditionalArguments()["cluster_name"] + "_haproxy1", + Properties: v1alpha1.NodeProperties{ + NodeType: "haproxy", + }, + }) + nodes = append(nodes, &v1alpha1.Node{ + VmName: database.GetAdditionalArguments()["cluster_name"] + "_haproxy2", + Properties: v1alpha1.NodeProperties{ + NodeType: "haproxy", + }, + }) + } + nodes = append(nodes, &v1alpha1.Node{ + VmName: database.GetAdditionalArguments()["cluster_name"] + "-1", + Properties: v1alpha1.NodeProperties{ + NodeType: "database", + Role: "Primary", + FailoverMode: "Automatic", + }, + }) + nodes = append(nodes, &v1alpha1.Node{ + VmName: database.GetAdditionalArguments()["cluster_name"] + "-2", + Properties: v1alpha1.NodeProperties{ + NodeType: "database", + Role: "Secondary", + FailoverMode: "Automatic", + }, + }) + nodes = append(nodes, &v1alpha1.Node{ + VmName: database.GetAdditionalArguments()["cluster_name"] + "-3", + Properties: v1alpha1.NodeProperties{ + NodeType: "database", + Role: "Secondary", + FailoverMode: "Automatic", + }, + }) + return nodes +} + +func getNodeCounts(nodesRequested []*v1alpha1.Node) (primaryCount int, databaseCount int) { + + for _, node := range nodesRequested { + if node.Properties.Role == "Primary" { + primaryCount++ + } + if node.Properties.NodeType == "database" { + databaseCount++ + } + } + return primaryCount, databaseCount +} + +func defaultActionArgumentsforHAProvisioning(database DatabaseInterface, dbPassword string, databaseNames string) map[string]string { + defaults := map[string]string{ + "failover_mode": "Automatic", + "proxy_read_port": "5001", + "listener_port": "5432", + "proxy_write_port": "5000", + "enable_synchronous_mode": "true", + "auto_tune_staging_drive": "true", + "backup_policy": "primary_only", + "provision_virtual_ip": "true", + "deploy_haproxy": "true", + "node_type": "database", + "allocate_pg_hugepage": "false", + "cluster_database": "false", + "archive_wal_expire_days": "-1", + "enable_peer_auth": "false", + "cluster_name": "psqlcluster", + "patroni_cluster_name": "patroni", + } + + additionalArguments := map[string]string{ + "db_password": dbPassword, + "database_names": databaseNames, + } + originalAdditionalArguments := database.GetAdditionalArguments() + for key, defaultValue := range defaults { + value := originalAdditionalArguments[key] + if value == "" { + additionalArguments[key] = defaultValue + } else { + additionalArguments[key] = value + } + } + + return additionalArguments +} + func (a *MySqlRequestAppender) appendProvisioningRequest(req *DatabaseProvisionRequest, database DatabaseInterface, reqData map[string]interface{}) (*DatabaseProvisionRequest, error) { dbPassword := reqData[common.NDB_PARAM_PASSWORD].(string) databaseNames := database.GetInstanceDatabaseNames() diff --git a/ndb_api/db_helpers_test.go b/ndb_api/db_helpers_test.go index f39af280..c13fdbd5 100644 --- a/ndb_api/db_helpers_test.go +++ b/ndb_api/db_helpers_test.go @@ -3,6 +3,7 @@ package ndb_api import ( "context" "errors" + "github.com/nutanix-cloud-native/ndb-operator/api/v1alpha1" "reflect" "sort" "testing" @@ -85,28 +86,38 @@ func TestGetRequestAppenderByType(t *testing.T) { // test data map tests := []struct { - databaseType string - expected interface{} + databaseType string + isHighAvailability bool + expected interface{} }{ {databaseType: common.DATABASE_TYPE_POSTGRES, - expected: &PostgresRequestAppender{}, + isHighAvailability: false, + expected: &PostgresRequestAppender{}, + }, + {databaseType: common.DATABASE_TYPE_POSTGRES, + isHighAvailability: true, + expected: &PostgresHARequestAppender{}, }, {databaseType: common.DATABASE_TYPE_MYSQL, - expected: &MySqlRequestAppender{}, + isHighAvailability: false, + expected: &MySqlRequestAppender{}, }, {databaseType: common.DATABASE_TYPE_MSSQL, - expected: &MSSQLRequestAppender{}, + isHighAvailability: false, + expected: &MSSQLRequestAppender{}, }, {databaseType: common.DATABASE_TYPE_MONGODB, - expected: &MongoDbRequestAppender{}, + isHighAvailability: false, + expected: &MongoDbRequestAppender{}, }, {databaseType: "test", - expected: nil, + isHighAvailability: false, + expected: nil, }, } for _, tc := range tests { - got, _ := GetRequestAppender(tc.databaseType) + got, _ := GetRequestAppender(tc.databaseType, tc.isHighAvailability) if !reflect.DeepEqual(tc.expected, got) { t.Fatalf("expected: %v, got: %v", tc.expected, got) } @@ -130,6 +141,7 @@ func TestPostgresProvisionRequestAppender_withoutAdditionalArguments_positiveWor mockDatabase.On("GetInstanceType").Return(common.DATABASE_TYPE_POSTGRES) mockDatabase.On("GetAdditionalArguments").Return(map[string]string{}) mockDatabase.On("IsClone").Return(false) + mockDatabase.On("GetInstanceIsHighAvailability").Return(false) expectedActionArgs := []ActionArgument{ { Name: "proxy_read_port", @@ -166,7 +178,7 @@ func TestPostgresProvisionRequestAppender_withoutAdditionalArguments_positiveWor } // Get specific implementation of RequestAppender - requestAppender, _ := GetRequestAppender(common.DATABASE_TYPE_POSTGRES) + requestAppender, _ := GetRequestAppender(common.DATABASE_TYPE_POSTGRES, false) // Call function being tested resultRequest, err := requestAppender.appendProvisioningRequest(baseRequest, mockDatabase, reqData) @@ -211,6 +223,7 @@ func TestPostgresProvisionRequestAppender_withAdditionalArguments_positiveWorkfl "listener_port": "0000", }) mockDatabase.On("IsClone").Return(false) + mockDatabase.On("GetInstanceIsHighAvailability").Return(false) expectedActionArgs := []ActionArgument{ { @@ -248,7 +261,7 @@ func TestPostgresProvisionRequestAppender_withAdditionalArguments_positiveWorkfl } // Get specific implementation of RequestAppender - requestAppender, _ := GetRequestAppender(common.DATABASE_TYPE_POSTGRES) + requestAppender, _ := GetRequestAppender(common.DATABASE_TYPE_POSTGRES, false) // Call function being tested resultRequest, err := requestAppender.appendProvisioningRequest(baseRequest, mockDatabase, reqData) @@ -293,8 +306,301 @@ func TestPostgresProvisionRequestAppender_withAdditionalArguments_negativeWorkfl "invalid-key": "invalid-value", }) mockDatabase.On("IsClone").Return(false) + mockDatabase.On("GetInstanceIsHighAvailability").Return(false) + // Get specific implementation of RequestAppender + requestAppender, _ := GetRequestAppender(common.DATABASE_TYPE_POSTGRES, false) + + // Call function being tested + resultRequest, err := requestAppender.appendProvisioningRequest(baseRequest, mockDatabase, reqData) + + // Checks if error was returned + if err == nil { + t.Errorf("Should have errored. Expected: Setting configured action arguments failed! invalid-key is not an allowed additional argument, Got: %v", err) + } + // Checks if resultRequestIsNil + if resultRequest != nil { + t.Errorf("Should have errored. Expected: resultRequest to be nil, Got: %v", resultRequest) + } + + // Verify that the mock method was called with the expected arguments + mockDatabase.AssertCalled(t, "GetInstanceDatabaseNames") +} + +// Tests PostgresHAProvisionRequestAppender(), without additional arguments, positive workflow +func TestPostgresHAProvisionRequestAppender_withoutAdditionalArguments_positiveWorkflow(t *testing.T) { + + baseRequest := &DatabaseProvisionRequest{} + // Create a mock implementation of DatabaseInterface + mockDatabase := &MockDatabaseInterface{} + + reqData := map[string]interface{}{ + common.NDB_PARAM_SSH_PUBLIC_KEY: TEST_SSHKEY, + common.NDB_PARAM_PASSWORD: TEST_PASSWORD, + } + emptyNodes := make([]*v1alpha1.Node, 0) + + // Mock required Mock Database Interface methods + mockDatabase.On("GetInstanceDatabaseNames").Return(TEST_DB_NAMES) + mockDatabase.On("GetName").Return("TestPostgresHADB") + mockDatabase.On("GetInstanceType").Return(common.DATABASE_TYPE_POSTGRES) + mockDatabase.On("GetAdditionalArguments").Return(map[string]string{}) + mockDatabase.On("GetInstanceNodes").Return(emptyNodes) + mockDatabase.On("GetClusterId").Return(TEST_CLUSTER_ID) + mockDatabase.On("IsClone").Return(false) + mockDatabase.On("GetInstanceIsHighAvailability").Return(true) + expectedActionArgs := []ActionArgument{ + { + Name: "proxy_read_port", + Value: "5001", + }, + { + Name: "listener_port", + Value: "5432", + }, + { + Name: "proxy_write_port", + Value: "5000", + }, + { + Name: "enable_synchronous_mode", + Value: "true", + }, + { + Name: "auto_tune_staging_drive", + Value: "true", + }, + { + Name: "backup_policy", + Value: "primary_only", + }, + { + Name: "db_password", + Value: TEST_PASSWORD, + }, + { + Name: "database_names", + Value: TEST_DB_NAMES, + }, + { + Name: "provision_virtual_ip", + Value: "true", + }, + { + Name: "deploy_haproxy", + Value: "true", + }, + { + Name: "failover_mode", + Value: "Automatic", + }, + { + Name: "node_type", + Value: "database", + }, + { + Name: "allocate_pg_hugepage", + Value: "false", + }, + { + Name: "cluster_database", + Value: "false", + }, + { + Name: "archive_wal_expire_days", + Value: "-1", + }, + { + Name: "enable_peer_auth", + Value: "false", + }, + { + Name: "cluster_name", + Value: "postgresHaCluster", + }, + { + Name: "patroni_cluster_name", + Value: "patroni", + }, + } + + // Get specific implementation of RequestAppender + requestAppender, _ := GetRequestAppender(common.DATABASE_TYPE_POSTGRES, true) + + // Call function being tested + resultRequest, err := requestAppender.appendProvisioningRequest(baseRequest, mockDatabase, reqData) + // Assert expected results + if resultRequest.SSHPublicKey != reqData[common.NDB_PARAM_SSH_PUBLIC_KEY] { + t.Errorf("Unexpected SSHPublicKey value. Expected: %s, Got: %s", reqData[common.NDB_PARAM_SSH_PUBLIC_KEY], resultRequest.SSHPublicKey) + } + + // Checks if expected and retrieved action arguments are equal + sortWantAndGotActionArgsByName(expectedActionArgs, resultRequest.ActionArguments) + + // Checks if no error was returned + if err != nil { + t.Errorf("Unexpected error. Expected: %v, Got: %v", nil, err) + } + + // Checks requestAppender.appendProvisioningRequest return type has no error and resultRequest.ActionArguments correctly configured + if !reflect.DeepEqual(expectedActionArgs, resultRequest.ActionArguments) { + t.Errorf("Unexpected ActionArguments. Expected: %v, Got: %v", expectedActionArgs, resultRequest.ActionArguments) + } + + // Verify that the mock method was called with the expected arguments + mockDatabase.AssertCalled(t, "GetInstanceDatabaseNames") +} + +// Test PostgresHAProvisionRequestAppender(), with additional arguments, positive workflow +func TestPostgresHAProvisionRequestAppender_withAdditionalArguments_positiveWorkflow(t *testing.T) { + + baseRequest := &DatabaseProvisionRequest{} + // Create a mock implementation of DatabaseInterface + mockDatabase := &MockDatabaseInterface{} + + reqData := map[string]interface{}{ + common.NDB_PARAM_SSH_PUBLIC_KEY: TEST_SSHKEY, + common.NDB_PARAM_PASSWORD: TEST_PASSWORD, + } + emptyNodes := make([]*v1alpha1.Node, 0) + // Mock required Mock Database Interface methods + mockDatabase.On("GetInstanceDatabaseNames").Return(TEST_DB_NAMES) + mockDatabase.On("GetName").Return("TestPostgresHADB") + mockDatabase.On("GetInstanceType").Return(common.DATABASE_TYPE_POSTGRES) + mockDatabase.On("GetInstanceNodes").Return(emptyNodes) + mockDatabase.On("GetAdditionalArguments").Return(map[string]string{ + "listener_port": "0000", + }) + mockDatabase.On("GetClusterId").Return(TEST_CLUSTER_ID) + mockDatabase.On("IsClone").Return(false) + mockDatabase.On("GetInstanceIsHighAvailability").Return(true) + + expectedActionArgs := []ActionArgument{ + { + Name: "listener_port", + Value: "0000", + }, + { + Name: "proxy_read_port", + Value: "5001", + }, + { + Name: "proxy_write_port", + Value: "5000", + }, + { + Name: "enable_synchronous_mode", + Value: "true", + }, + { + Name: "auto_tune_staging_drive", + Value: "true", + }, + { + Name: "backup_policy", + Value: "primary_only", + }, + { + Name: "db_password", + Value: TEST_PASSWORD, + }, + { + Name: "database_names", + Value: TEST_DB_NAMES, + }, + { + Name: "provision_virtual_ip", + Value: "true", + }, + { + Name: "deploy_haproxy", + Value: "true", + }, + { + Name: "failover_mode", + Value: "Automatic", + }, + { + Name: "node_type", + Value: "database", + }, + { + Name: "allocate_pg_hugepage", + Value: "false", + }, + { + Name: "cluster_database", + Value: "false", + }, + { + Name: "archive_wal_expire_days", + Value: "-1", + }, + { + Name: "enable_peer_auth", + Value: "false", + }, + { + Name: "cluster_name", + Value: "postgresHaCluster", + }, + { + Name: "patroni_cluster_name", + Value: "patroni", + }, + } + + // Get specific implementation of RequestAppender + requestAppender, _ := GetRequestAppender(common.DATABASE_TYPE_POSTGRES, true) + + // Call function being tested + resultRequest, err := requestAppender.appendProvisioningRequest(baseRequest, mockDatabase, reqData) + + // Assert expected results + if resultRequest.SSHPublicKey != reqData[common.NDB_PARAM_SSH_PUBLIC_KEY] { + t.Errorf("Unexpected SSHPublicKey value. Expected: %s, Got: %s", reqData[common.NDB_PARAM_SSH_PUBLIC_KEY], resultRequest.SSHPublicKey) + } + + // Sort expected and retrieved action arguments + sortWantAndGotActionArgsByName(expectedActionArgs, resultRequest.ActionArguments) + + // Checks if no error was returned + if err != nil { + t.Errorf("Unexpected error. Expected: %v, Got: %v", nil, err) + } + // Check if the lengths of expected and retrieved action arguments are equal + if !reflect.DeepEqual(expectedActionArgs, resultRequest.ActionArguments) { + t.Errorf("Unexpected ActionArguments. Expected: %v, Got: %v", expectedActionArgs, resultRequest.ActionArguments) + } + + // Verify that the mock method was called with the expected arguments + mockDatabase.AssertCalled(t, "GetInstanceDatabaseNames") +} + +// Test PostgresHAProvisionRequestAppender(), with additional arguments, negative workflow +func TestPostgresHAProvisionRequestAppender_withoutAdditionalArguments_negativeWorkflow(t *testing.T) { + + baseRequest := &DatabaseProvisionRequest{} + // Create a mock implementation of DatabaseInterface + mockDatabase := &MockDatabaseInterface{} + + reqData := map[string]interface{}{ + common.NDB_PARAM_SSH_PUBLIC_KEY: TEST_SSHKEY, + common.NDB_PARAM_PASSWORD: TEST_PASSWORD, + } + emptyNodes := make([]*v1alpha1.Node, 0) + // Mock required Mock Database Interface methods + mockDatabase.On("GetInstanceDatabaseNames").Return(TEST_DB_NAMES) + mockDatabase.On("GetName").Return("TestPostgresHADB") + mockDatabase.On("GetInstanceType").Return(common.DATABASE_TYPE_POSTGRES) + mockDatabase.On("GetInstanceNodes").Return(emptyNodes) + mockDatabase.On("GetAdditionalArguments").Return(map[string]string{ + "invalid-key": "invalid-value", + }) + mockDatabase.On("GetClusterId").Return(TEST_CLUSTER_ID) + mockDatabase.On("IsClone").Return(false) + mockDatabase.On("GetInstanceIsHighAvailability").Return(true) // Get specific implementation of RequestAppender - requestAppender, _ := GetRequestAppender(common.DATABASE_TYPE_POSTGRES) + requestAppender, _ := GetRequestAppender(common.DATABASE_TYPE_POSTGRES, true) // Call function being tested resultRequest, err := requestAppender.appendProvisioningRequest(baseRequest, mockDatabase, reqData) @@ -344,6 +650,7 @@ func TestMSSQLProvisionRequestAppender_withoutAdditionalArguments_positiveWorklo mockDatabase.On("GetInstanceType").Return(common.DATABASE_TYPE_MSSQL) mockDatabase.On("GetAdditionalArguments").Return(map[string]string{}) mockDatabase.On("IsClone").Return(false) + mockDatabase.On("GetInstanceIsHighAvailability").Return(false) expectedActionArgs := []ActionArgument{ { Name: "working_dir", @@ -400,7 +707,7 @@ func TestMSSQLProvisionRequestAppender_withoutAdditionalArguments_positiveWorklo } // Get specific implementation of RequestAppender - requestAppender, _ := GetRequestAppender(common.DATABASE_TYPE_MSSQL) + requestAppender, _ := GetRequestAppender(common.DATABASE_TYPE_MSSQL, false) // Call function being tested resultRequest, err := requestAppender.appendProvisioningRequest(baseRequest, mockDatabase, reqData) @@ -463,6 +770,7 @@ func TestMSSQLProvisionRequestAppender_withAdditionalArguments_positiveWorkflow( "vm_db_server_user": "", }) mockDatabase.On("IsClone").Return(false) + mockDatabase.On("GetInstanceIsHighAvailability").Return(false) expectedActionArgs := []ActionArgument{ { Name: "sql_user_name", @@ -531,7 +839,7 @@ func TestMSSQLProvisionRequestAppender_withAdditionalArguments_positiveWorkflow( } // Get specific implementation of RequestAppender - requestAppender, _ := GetRequestAppender(common.DATABASE_TYPE_MSSQL) + requestAppender, _ := GetRequestAppender(common.DATABASE_TYPE_MSSQL, false) // Call function being tested resultRequest, err := requestAppender.appendProvisioningRequest(baseRequest, mockDatabase, reqData) @@ -592,8 +900,9 @@ func TestMSSQLProvisionRequestAppender_withAdditionalArguments_negativeWorkflow( "invalid-key2": "invalid-value", }) mockDatabase.On("IsClone").Return(false) + mockDatabase.On("GetInstanceIsHighAvailability").Return(false) // Get specific implementation of RequestAppender - requestAppender, _ := GetRequestAppender(common.DATABASE_TYPE_MSSQL) + requestAppender, _ := GetRequestAppender(common.DATABASE_TYPE_MSSQL, false) // Call function being tested resultRequest, err := requestAppender.appendProvisioningRequest(baseRequest, mockDatabase, reqData) @@ -629,6 +938,7 @@ func TestMongoDbProvisionRequestAppender_withoutAdditionalArguments_positiveWork mockDatabase.On("GetInstanceType").Return(common.DATABASE_TYPE_MONGODB) mockDatabase.On("GetAdditionalArguments").Return(map[string]string{}) mockDatabase.On("IsClone").Return(false) + mockDatabase.On("GetInstanceIsHighAvailability").Return(false) expectedActionArgs := []ActionArgument{ { Name: "listener_port", @@ -669,7 +979,7 @@ func TestMongoDbProvisionRequestAppender_withoutAdditionalArguments_positiveWork } // Get specific implementation of RequestAppender - requestAppender, _ := GetRequestAppender(common.DATABASE_TYPE_MONGODB) + requestAppender, _ := GetRequestAppender(common.DATABASE_TYPE_MONGODB, false) // Call function being tested resultRequest, err := requestAppender.appendProvisioningRequest(baseRequest, mockDatabase, reqData) @@ -716,6 +1026,7 @@ func TestMongoDbProvisionRequestAppender_withAdditionalArguments_positiveWorkflo "journal_size": "1", }) mockDatabase.On("IsClone").Return(false) + mockDatabase.On("GetInstanceIsHighAvailability").Return(false) expectedActionArgs := []ActionArgument{ { Name: "listener_port", @@ -756,7 +1067,7 @@ func TestMongoDbProvisionRequestAppender_withAdditionalArguments_positiveWorkflo } // Get specific implementation of RequestAppender - requestAppender, _ := GetRequestAppender(common.DATABASE_TYPE_MONGODB) + requestAppender, _ := GetRequestAppender(common.DATABASE_TYPE_MONGODB, false) // Call function being tested resultRequest, err := requestAppender.appendProvisioningRequest(baseRequest, mockDatabase, reqData) @@ -801,8 +1112,9 @@ func TestMongoDbProvisionRequestAppender_withAdditionalArguments_negativeWorkflo "invalid-key": "invalid-value", }) mockDatabase.On("IsClone").Return(false) + mockDatabase.On("GetInstanceIsHighAvailability").Return(false) // Get specific implementation of RequestAppender - requestAppender, _ := GetRequestAppender(common.DATABASE_TYPE_MONGODB) + requestAppender, _ := GetRequestAppender(common.DATABASE_TYPE_MONGODB, false) // Call function being tested resultRequest, err := requestAppender.appendProvisioningRequest(baseRequest, mockDatabase, reqData) @@ -837,6 +1149,7 @@ func TestMySqlProvisionRequestAppender_withoutAdditionalArguments_positiveWorkfl mockDatabase.On("GetInstanceType").Return(common.DATABASE_TYPE_MYSQL) mockDatabase.On("GetAdditionalArguments").Return(map[string]string{}) mockDatabase.On("IsClone").Return(false) + mockDatabase.On("GetInstanceIsHighAvailability").Return(false) expectedActionArgs := []ActionArgument{ { Name: "listener_port", @@ -857,7 +1170,7 @@ func TestMySqlProvisionRequestAppender_withoutAdditionalArguments_positiveWorkfl } // Get specific implementation of RequestAppender - requestAppender, _ := GetRequestAppender(common.DATABASE_TYPE_MYSQL) + requestAppender, _ := GetRequestAppender(common.DATABASE_TYPE_MYSQL, false) // Call function being tested resultRequest, err := requestAppender.appendProvisioningRequest(baseRequest, mockDatabase, reqData) @@ -902,6 +1215,7 @@ func TestMySqlProvisionRequestAppender_withAdditionalArguments_positiveWorkflow( "listener_port": "1111", }) mockDatabase.On("IsClone").Return(false) + mockDatabase.On("GetInstanceIsHighAvailability").Return(false) expectedActionArgs := []ActionArgument{ { Name: "listener_port", @@ -922,7 +1236,7 @@ func TestMySqlProvisionRequestAppender_withAdditionalArguments_positiveWorkflow( } // Get specific implementation of RequestAppender - requestAppender, _ := GetRequestAppender(common.DATABASE_TYPE_MYSQL) + requestAppender, _ := GetRequestAppender(common.DATABASE_TYPE_MYSQL, false) // Call function being tested resultRequest, err := requestAppender.appendProvisioningRequest(baseRequest, mockDatabase, reqData) @@ -967,8 +1281,9 @@ func TestMySqlProvisionRequestAppender_withAdditionalArguments_negativeWorkflow( "invalid-key": "invalid-value", }) mockDatabase.On("IsClone").Return(false) + mockDatabase.On("GetInstanceIsHighAvailability").Return(false) // Get specific implementation of RequestAppender - requestAppender, _ := GetRequestAppender(common.DATABASE_TYPE_MYSQL) + requestAppender, _ := GetRequestAppender(common.DATABASE_TYPE_MYSQL, false) // Call function being tested resultRequest, err := requestAppender.appendProvisioningRequest(baseRequest, mockDatabase, reqData) @@ -1288,6 +1603,7 @@ func TestGenerateProvisioningRequest_AgainstDifferentReqData(t *testing.T) { mockDatabase.On("GetInstanceSize").Return(TEST_INSTANCE_SIZE) mockDatabase.On("GetInstanceDatabaseNames").Return(TEST_DB_NAMES) mockDatabase.On("GetAdditionalArguments").Return(map[string]string{}) + mockDatabase.On("GetInstanceIsHighAvailability").Return(false) mockDatabase.On("IsClone").Return(false) // Test diff --git a/ndb_api/interface_mock_test.go b/ndb_api/interface_mock_test.go index 691db059..2d9be8ce 100644 --- a/ndb_api/interface_mock_test.go +++ b/ndb_api/interface_mock_test.go @@ -4,6 +4,7 @@ import ( "context" "net/http" + "github.com/nutanix-cloud-native/ndb-operator/api/v1alpha1" "github.com/stretchr/testify/mock" ) @@ -158,3 +159,14 @@ func (m *MockNDBClientHTTPInterface) Do(req *http.Request) (*http.Response, erro } return args.Get(0).(*http.Response), args.Error(1) } + +// GetInstanceIsHighAvailability is a mock implementation of the GetInstanceIsHighAvailability method in the Database interface +func (m *MockDatabaseInterface) GetInstanceIsHighAvailability() bool { + args := m.Called() + return args.Bool(0) +} + +func (m *MockDatabaseInterface) GetInstanceNodes() []*v1alpha1.Node { + args := m.Called() + return args.Get(0).([]*v1alpha1.Node) +} diff --git a/ndb_api/interfaces.go b/ndb_api/interfaces.go index f0c3dedd..69383cf2 100644 --- a/ndb_api/interfaces.go +++ b/ndb_api/interfaces.go @@ -18,6 +18,7 @@ package ndb_api import ( "context" + "github.com/nutanix-cloud-native/ndb-operator/api/v1alpha1" ) // External Interfaces @@ -49,6 +50,8 @@ type DatabaseInterface interface { GetCloneSourceDBId() string GetCloneSnapshotId() string GetAdditionalArguments() map[string]string + GetInstanceIsHighAvailability() bool + GetInstanceNodes() []*v1alpha1.Node } // Internal Interfaces @@ -72,3 +75,6 @@ type PostgresRequestAppender struct{} // Implements RequestAppender type MySqlRequestAppender struct{} + +// Implements RequestAppender +type PostgresHARequestAppender struct{}