From a182070deef22788ca42cf4fe1f4ee18aaa6177e Mon Sep 17 00:00:00 2001 From: Cody Irion Date: Sat, 23 Mar 2024 15:25:25 -0400 Subject: [PATCH 01/60] Initial commit with work from previous semester --- README.md | 28 +- api/v1alpha1/database_types.go | 2 + api/v1alpha1/webhook_suite_test.go | 3 + .../crd/bases/ndb.nutanix.com_databases.yaml | 4 + controller_adapters/database.go | 4 + controller_adapters/database_test.go | 24 ++ ndb_api/clone_helpers.go | 99 +++++- ndb_api/common_helpers.go | 8 +- ndb_api/common_types.go | 12 +- ndb_api/db_helpers.go | 103 +++++- ndb_api/db_helpers_test.go | 336 ++++++++++++++++-- ndb_api/interface_mock_test.go | 6 + ndb_api/interfaces.go | 4 + 13 files changed, 589 insertions(+), 44 deletions(-) diff --git a/README.md b/README.md index 958e4e2c..21cdbb1e 100644 --- a/README.md +++ b/README.md @@ -20,7 +20,7 @@ The NDB operator brings automated and simplified database administration, provis 4. A clone of the source code ([this](https://github.com/nutanix-cloud-native/ndb-operator) repository). 5. Cert-manager (only when running in non OpenShift clusters). Follow the instructions [here](https://cert-manager.io/docs/installation/). -With the pre-requisites completed, the NDB Operator can be deployed in one of the following ways: +With the pre-requisites completed, the NDB Operator can be deployed in one of the following ways: ### Outside Kubernetes Runs the controller outside the Kubernetes cluster as a process, but installs the CRDs, services and RBAC entities within the Kubernetes cluster. Generally used while development (without running webhooks): @@ -28,7 +28,7 @@ Runs the controller outside the Kubernetes cluster as a process, but installs th make install run ``` -### Within Kubernetes +### Within Kubernetes Runs the controller pod, installs the CRDs, services and RBAC entities within the Kubernetes cluster. Used to run the operator from the container image defined in the Makefile. Make sure that the cert-manager is installed if not using OpenShift. ```sh @@ -110,12 +110,12 @@ metadata: app.kubernetes.io/created-by: ndb-operator name: ndb spec: - # Name of the secret that holds the credentials for NDB: username, password and ca_certificate created earlier - credentialSecret: ndb-secret-name - # NDB Server's API URL - server: https://[NDB IP]:8443/era/v0.9 - # Set to true to skip SSL certificate validation, should be false if ca_certificate is provided in the credential secret. - skipCertificateVerification: true + # Name of the secret that holds the credentials for NDB: username, password and ca_certificate created earlier + credentialSecret: ndb-secret-name + # NDB Server's API URL + server: https://[NDB IP]:8443/era/v0.9 + # Set to true to skip SSL certificate validation, should be false if ca_certificate is provided in the credential secret. + skipCertificateVerification: true ``` Create the NDBServer resource using: @@ -156,6 +156,8 @@ spec: size: 10 timezone: "UTC" type: postgres + # isHighAvailability is an optional parameter. In case nothing is specified, it is set to false + isHighAvailability: false # You can specify any (or none) of these types of profiles: compute, software, network, dbParam # If not specified, the corresponding Out-of-Box (OOB) profile will be used wherever applicable @@ -214,6 +216,8 @@ spec: # Cluster id of the cluster where the Database has to be provisioned # Can be fetched from the GET /clusters endpoint clusterId: "Nutanix Cluster Id" + # isHighAvailability is an optional parameter. In case nothing is specified, it is set to false + isHighAvailability: false # You can specify any (or none) of these types of profiles: compute, software, network, dbParam # If not specified, the corresponding Out-of-Box (OOB) profile will be used wherever applicable # Name is case-sensitive. ID is the UUID of the profile. Profile should be in the "READY" state @@ -257,7 +261,7 @@ kubectl apply -f ### Additional Arguments for Databases Below are the various optional addtionalArguments you can specify along with examples of their corresponding values. Arguments that have defaults will be indicated. -Provisioning Additional Arguments: +Provisioning Additional Arguments: ```yaml # PostGres additionalArguments: @@ -287,7 +291,7 @@ additionalArguments: vm_win_license_key: # NO Default. ``` -Cloning Additional Arguments: +Cloning Additional Arguments: ```yaml MSSQL: windows_domain_profile_id @@ -360,7 +364,7 @@ Run your controller locally (this will run in the foreground, so switch to a new make run ``` -**NOTES:** +**NOTES:** 1. You can also run this in one step by running: `make install run` 2. Run `make --help` for more information on all potential `make` targets @@ -439,4 +443,4 @@ This code is developed in the open with input from the community through issues ## License Copyright 2022-2023 Nutanix, Inc. -The project is released under version 2.0 of the [Apache license](http://www.apache.org/licenses/LICENSE-2.0). +The project is released under version 2.0 of the [Apache license](http://www.apache.org/licenses/LICENSE-2.0). \ No newline at end of file diff --git a/api/v1alpha1/database_types.go b/api/v1alpha1/database_types.go index 06247563..f8edc5d1 100644 --- a/api/v1alpha1/database_types.go +++ b/api/v1alpha1/database_types.go @@ -107,6 +107,7 @@ type Instance struct { // +optional // Additional database engine specific arguments AdditionalArguments map[string]string `json:"additionalArguments"` + IsHighAvailability bool `json:"isHighAvailability"` } type Clone struct { @@ -133,6 +134,7 @@ type Clone struct { // +optional // Additional database engine specific arguments AdditionalArguments map[string]string `json:"additionalArguments"` + IsHighAvailability bool `json:"isHighAvailability"` } // Time Machine details diff --git a/api/v1alpha1/webhook_suite_test.go b/api/v1alpha1/webhook_suite_test.go index 2173c2db..89219420 100644 --- a/api/v1alpha1/webhook_suite_test.go +++ b/api/v1alpha1/webhook_suite_test.go @@ -60,6 +60,7 @@ const ( CREDENTIAL_SECRET = "database-secret" TIMEZONE = "UTC" SIZE = 10 + HA = false ) func TestAPIs(t *testing.T) { @@ -615,6 +616,7 @@ func createDefaultDatabase(metadataName string) *Database { Type: common.DATABASE_TYPE_POSTGRES, Profiles: &(Profiles{}), AdditionalArguments: map[string]string{}, + IsHighAvailability: HA, }, }, } @@ -639,6 +641,7 @@ func createDefaultClone(metadataName string) *Database { SnapshotId: DEFAULT_UUID, Profiles: &(Profiles{}), AdditionalArguments: map[string]string{}, + IsHighAvailability: HA, }, }, } diff --git a/config/crd/bases/ndb.nutanix.com_databases.yaml b/config/crd/bases/ndb.nutanix.com_databases.yaml index 67749d0e..ef748fa9 100644 --- a/config/crd/bases/ndb.nutanix.com_databases.yaml +++ b/config/crd/bases/ndb.nutanix.com_databases.yaml @@ -69,6 +69,8 @@ spec: description: description: Description of the clone instance type: string + isHighAvailability: + type: boolean name: description: Name of the clone instance type: string @@ -155,6 +157,8 @@ spec: description: description: Description of the database instance type: string + isHighAvailability: + type: boolean name: description: Name of the database instance type: string diff --git a/controller_adapters/database.go b/controller_adapters/database.go index c2bc4506..3757ae23 100644 --- a/controller_adapters/database.go +++ b/controller_adapters/database.go @@ -145,6 +145,10 @@ func (d *Database) GetInstanceSize() int { return d.Spec.Instance.Size } +func (d *Database) GetInstanceIsHighAvailability() bool { + return d.Spec.Instance.IsHighAvailability +} + // Returns basic details about the Time Machine if provided in the // underlying database, else returns defaults like: // TM Name: _TM diff --git a/controller_adapters/database_test.go b/controller_adapters/database_test.go index 60a91bde..a0de35a0 100644 --- a/controller_adapters/database_test.go +++ b/controller_adapters/database_test.go @@ -231,6 +231,30 @@ func TestDatabase_GetInstanceSize(t *testing.T) { }) } +// Tests the GetInstanceIsHighAvailability() function retrieves Size correctly: +func TestDatabase_GetInstanceIsHighAvailability(t *testing.T) { + + name := "Contains IsHighAvailability" + database := Database{ + Database: v1alpha1.Database{ + Spec: v1alpha1.DatabaseSpec{ + Instance: &v1alpha1.Instance{ + IsHighAvailability: true, + }, + }, + }, + } + wantIsHighAvailability := true + + t.Run(name, func(t *testing.T) { + + gotIsHighAvailability := database.GetInstanceIsHighAvailability() + if gotIsHighAvailability != wantIsHighAvailability { + t.Errorf("Database.GetInstanceIsHighAvailability() gotIsHighAvailability= %v, want %v", gotIsHighAvailability, wantIsHighAvailability) + } + }) +} + // Tests the GetClusterId() function retrieves ClusterId correctly: func TestDatabase_GetClusterId(t *testing.T) { diff --git a/ndb_api/clone_helpers.go b/ndb_api/clone_helpers.go index 85848ad3..cba2901d 100644 --- a/ndb_api/clone_helpers.go +++ b/ndb_api/clone_helpers.go @@ -17,6 +17,7 @@ import ( "context" "errors" "fmt" + "strconv" "github.com/nutanix-cloud-native/ndb-operator/common" "github.com/nutanix-cloud-native/ndb-operator/ndb_client" @@ -85,7 +86,7 @@ func GenerateCloningRequest(ctx context.Context, ndb_client ndb_client.NDBClient NetworkProfileId: profilesMap[common.PROFILE_TYPE_NETWORK].Id, NewDbServerTimeZone: "", NxClusterId: database.GetClusterId(), - Properties: make([]string, 0), + Properties: make([]map[string]string, 0), }, }, // Added by request appenders as per the engine @@ -96,8 +97,11 @@ func GenerateCloningRequest(ctx context.Context, ndb_client ndb_client.NDBClient NetworkProfileId: profilesMap[common.PROFILE_TYPE_NETWORK].Id, DatabaseParameterProfileId: profilesMap[common.PROFILE_TYPE_DATABASE_PARAMETER].Id, } + // boolean for high availability + isHighAvailability := false + // Appending request body based on database type - appender, err := GetRequestAppender(databaseType) + appender, err := GetRequestAppender(databaseType, isHighAvailability) if err != nil { log.Error(err, "Error while getting a request appender") return @@ -210,6 +214,97 @@ func (a *PostgresRequestAppender) appendCloningRequest(req *DatabaseCloneRequest return req, nil } +func setCloneNodesParameters(req *DatabaseCloneRequest, database DatabaseInterface) { + // Extract values of ComputeProfileId and NetworkProfileId + computeProfileId := req.Nodes[0].ComputeProfileId + networkProfileId := req.Nodes[0].NetworkProfileId + serverTimeZone := req.Nodes[0].NewDbServerTimeZone + + // Clear the original req.Nodes array + req.Nodes = []Node{} + + // Create node object for HA Proxy + for i := 0; i < 2; i++ { + // Hard coding the HA Proxy properties + props := make([]map[string]string, 1) + props[0] = map[string]string{ + "name": "node_type", + "value": "haproxy", + } + req.Nodes = append(req.Nodes, Node{ + Properties: props, + VmName: database.GetName() + "_haproxy" + strconv.Itoa(i), + NxClusterId: database.GetClusterId(), + }) + } + + // Create node object for Database Instances + for i := 0; i < 3; i++ { + // Hard coding the DB properties + props := make([]map[string]string, 4) + props[0] = map[string]string{ + "name": "role", + "value": "Secondary", + } + // 1st node will be the primary node + if i == 0 { + props[0]["value"] = "Primary" + } + props[1] = map[string]string{ + "name": "failover_mode", + "value": "Automatic", + } + props[2] = map[string]string{ + "name": "node_type", + "value": "database", + } + props[3] = map[string]string{ + "name": "remote_archive_destination", + "value": "", + } + req.Nodes = append(req.Nodes, Node{ + ComputeProfileId: computeProfileId, + NetworkProfileId: networkProfileId, + NewDbServerTimeZone: serverTimeZone, + Properties: props, + VmName: database.GetName() + "-" + strconv.Itoa(i), + NxClusterId: database.GetClusterId(), + }) + } +} + +func (a *PostgresHARequestAppender) appendCloningRequest(req *DatabaseCloneRequest, database DatabaseInterface, reqData map[string]interface{}) (*DatabaseCloneRequest, error) { + req.SSHPublicKey = reqData[common.NDB_PARAM_SSH_PUBLIC_KEY].(string) + dbPassword := reqData[common.NDB_PARAM_PASSWORD].(string) + + // Set the number of nodes to 5, 3 Postgres nodes + 2 HA Proxy nodes + req.NodeCount = 5 + setCloneNodesParameters(req, database) + + // Default action arguments + actionArguments := map[string]string{ + /* Non-Configurable from additionalArguments*/ + "vm_name": database.GetName(), + "dbserver_description": "DB Server VM for " + database.GetName(), + "db_password": dbPassword, + } + + // Appending/overwriting database actionArguments to actionArguments + if err := setConfiguredActionArguments(database, actionArguments); err != nil { + return nil, err + } + + // Converting action arguments map to list and appending to req.ActionArguments + req.ActionArguments = append(req.ActionArguments, convertMapToActionArguments(actionArguments)...) + + // Appending LCMConfig Details if specified + if err := appendLCMConfigDetailsToRequest(req, database.GetAdditionalArguments()); err != nil { + return nil, err + } + + return req, nil +} + func (a *MySqlRequestAppender) appendCloningRequest(req *DatabaseCloneRequest, database DatabaseInterface, reqData map[string]interface{}) (*DatabaseCloneRequest, error) { req.SSHPublicKey = reqData[common.NDB_PARAM_SSH_PUBLIC_KEY].(string) dbPassword := reqData[common.NDB_PARAM_PASSWORD].(string) diff --git a/ndb_api/common_helpers.go b/ndb_api/common_helpers.go index 9eb1d8b7..3d2cbd48 100644 --- a/ndb_api/common_helpers.go +++ b/ndb_api/common_helpers.go @@ -134,12 +134,16 @@ func GetDatabasePortByType(dbType string) int32 { } // Get specific implementation of the DBProvisionRequestAppender interface based on the provided databaseType -func GetRequestAppender(databaseType string) (requestAppender RequestAppender, err error) { +func GetRequestAppender(databaseType string, isHighAvailability bool) (requestAppender RequestAppender, err error) { switch databaseType { case common.DATABASE_TYPE_MYSQL: requestAppender = &MySqlRequestAppender{} case common.DATABASE_TYPE_POSTGRES: - requestAppender = &PostgresRequestAppender{} + if isHighAvailability { + requestAppender = &PostgresHARequestAppender{} + } else { + requestAppender = &PostgresRequestAppender{} + } case common.DATABASE_TYPE_MONGODB: requestAppender = &MongoDbRequestAppender{} case common.DATABASE_TYPE_MSSQL: diff --git a/ndb_api/common_types.go b/ndb_api/common_types.go index 26c57a91..1dae7ed0 100644 --- a/ndb_api/common_types.go +++ b/ndb_api/common_types.go @@ -88,12 +88,12 @@ type ActionArgument struct { } type Node struct { - VmName string `json:"vmName"` - ComputeProfileId string `json:"computeProfileId,omitempty"` - NetworkProfileId string `json:"networkProfileId,omitempty"` - NewDbServerTimeZone string `json:"newDbServerTimeZone,omitempty"` - NxClusterId string `json:"nxClusterId,omitempty"` - Properties []string `json:"properties"` + VmName string `json:"vmName"` + ComputeProfileId string `json:"computeProfileId,omitempty"` + NetworkProfileId string `json:"networkProfileId,omitempty"` + NewDbServerTimeZone string `json:"newDbServerTimeZone,omitempty"` + NxClusterId string `json:"nxClusterId,omitempty"` + Properties []map[string]string `json:"properties"` } type Property struct { diff --git a/ndb_api/db_helpers.go b/ndb_api/db_helpers.go index 0af5412a..74775787 100644 --- a/ndb_api/db_helpers.go +++ b/ndb_api/db_helpers.go @@ -92,7 +92,7 @@ func GenerateProvisioningRequest(ctx context.Context, ndb_client *ndb_client.NDB }, Nodes: []Node{ { - Properties: make([]string, 0), + Properties: make([]map[string]string, 0), VmName: database.GetName() + "_VM", }, }, @@ -109,7 +109,7 @@ func GenerateProvisioningRequest(ctx context.Context, ndb_client *ndb_client.NDB } // Appending request body based on database type - appender, err := GetRequestAppender(database.GetInstanceType()) + appender, err := GetRequestAppender(database.GetInstanceType(), database.GetInstanceIsHighAvailability()) if err != nil { log.Error(err, "Error while appending provisioning request") return @@ -304,6 +304,105 @@ func (a *PostgresRequestAppender) appendProvisioningRequest(req *DatabaseProvisi return req, nil } +func setNodesParameters(req *DatabaseProvisionRequest, database DatabaseInterface) { + // Clear the original req.Nodes array + req.Nodes = []Node{} + + // Create node object for HA Proxy + for i := 0; i < 2; i++ { + // Hard coding the HA Proxy properties + props := make([]map[string]string, 1) + props[0] = map[string]string{ + "name": "node_type", + "value": "haproxy", + } + req.Nodes = append(req.Nodes, Node{ + Properties: props, + VmName: database.GetName() + "_haproxy" + strconv.Itoa(i+1), + NxClusterId: database.GetClusterId(), + }) + } + + // Create node object for Database Instances + for i := 0; i < 3; i++ { + // Hard coding the DB properties + props := make([]map[string]string, 4) + props[0] = map[string]string{ + "name": "role", + "value": "Secondary", + } + // 1st node will be the primary node + if i == 0 { + props[0]["value"] = "Primary" + } + props[1] = map[string]string{ + "name": "failover_mode", + "value": "Automatic", + } + props[2] = map[string]string{ + "name": "node_type", + "value": "database", + } + props[3] = map[string]string{ + "name": "remote_archive_destination", + "value": "", + } + req.Nodes = append(req.Nodes, Node{ + Properties: props, + VmName: database.GetName() + "-" + strconv.Itoa(i+1), + NetworkProfileId: req.NetworkProfileId, + ComputeProfileId: req.ComputeProfileId, + NxClusterId: database.GetClusterId(), + }) + } +} + +func (a *PostgresHARequestAppender) appendProvisioningRequest(req *DatabaseProvisionRequest, database DatabaseInterface, reqData map[string]interface{}) (*DatabaseProvisionRequest, error) { + dbPassword := reqData[common.NDB_PARAM_PASSWORD].(string) + databaseNames := database.GetInstanceDatabaseNames() + req.SSHPublicKey = reqData[common.NDB_PARAM_SSH_PUBLIC_KEY].(string) + + // Set the number of nodes to 5, 3 Postgres nodes + 2 HA Proxy nodes + req.NodeCount = 5 + setNodesParameters(req, database) + + // Set clustered to true + req.Clustered = true + + // Default action arguments + actionArguments := map[string]string{ + /* Non-Configurable from additionalArguments*/ + "proxy_read_port": "5001", + "listener_port": "5432", + "proxy_write_port": "5000", + "enable_synchronous_mode": "true", + "auto_tune_staging_drive": "true", + "backup_policy": "primary_only", + "db_password": dbPassword, + "database_names": databaseNames, + "provision_virtual_ip": "true", + "deploy_haproxy": "true", + "failover_mode": "Automatic", + "node_type": "database", + "allocate_pg_hugepage": "false", + "cluster_database": "false", + "archive_wal_expire_days": "-1", + "enable_peer_auth": "false", + "cluster_name": "psqlcluster", + "patroni_cluster_name": "patroni", + } + + // Appending/overwriting database actionArguments to actionArguments + if err := setConfiguredActionArguments(database, actionArguments); err != nil { + return nil, err + } + + // Converting action arguments map to list and appending to req.ActionArguments + req.ActionArguments = append(req.ActionArguments, convertMapToActionArguments(actionArguments)...) + + return req, nil +} + func (a *MySqlRequestAppender) appendProvisioningRequest(req *DatabaseProvisionRequest, database DatabaseInterface, reqData map[string]interface{}) (*DatabaseProvisionRequest, error) { dbPassword := reqData[common.NDB_PARAM_PASSWORD].(string) databaseNames := database.GetInstanceDatabaseNames() diff --git a/ndb_api/db_helpers_test.go b/ndb_api/db_helpers_test.go index f39af280..a36522a0 100644 --- a/ndb_api/db_helpers_test.go +++ b/ndb_api/db_helpers_test.go @@ -85,28 +85,38 @@ func TestGetRequestAppenderByType(t *testing.T) { // test data map tests := []struct { - databaseType string - expected interface{} + databaseType string + isHighAvailability bool + expected interface{} }{ {databaseType: common.DATABASE_TYPE_POSTGRES, - expected: &PostgresRequestAppender{}, + isHighAvailability: false, + expected: &PostgresRequestAppender{}, + }, + {databaseType: common.DATABASE_TYPE_POSTGRES, + isHighAvailability: true, + expected: &PostgresHARequestAppender{}, }, {databaseType: common.DATABASE_TYPE_MYSQL, - expected: &MySqlRequestAppender{}, + isHighAvailability: false, + expected: &MySqlRequestAppender{}, }, {databaseType: common.DATABASE_TYPE_MSSQL, - expected: &MSSQLRequestAppender{}, + isHighAvailability: false, + expected: &MSSQLRequestAppender{}, }, {databaseType: common.DATABASE_TYPE_MONGODB, - expected: &MongoDbRequestAppender{}, + isHighAvailability: false, + expected: &MongoDbRequestAppender{}, }, {databaseType: "test", - expected: nil, + isHighAvailability: false, + expected: nil, }, } for _, tc := range tests { - got, _ := GetRequestAppender(tc.databaseType) + got, _ := GetRequestAppender(tc.databaseType, tc.isHighAvailability) if !reflect.DeepEqual(tc.expected, got) { t.Fatalf("expected: %v, got: %v", tc.expected, got) } @@ -166,7 +176,7 @@ func TestPostgresProvisionRequestAppender_withoutAdditionalArguments_positiveWor } // Get specific implementation of RequestAppender - requestAppender, _ := GetRequestAppender(common.DATABASE_TYPE_POSTGRES) + requestAppender, _ := GetRequestAppender(common.DATABASE_TYPE_POSTGRES, false) // Call function being tested resultRequest, err := requestAppender.appendProvisioningRequest(baseRequest, mockDatabase, reqData) @@ -248,7 +258,7 @@ func TestPostgresProvisionRequestAppender_withAdditionalArguments_positiveWorkfl } // Get specific implementation of RequestAppender - requestAppender, _ := GetRequestAppender(common.DATABASE_TYPE_POSTGRES) + requestAppender, _ := GetRequestAppender(common.DATABASE_TYPE_POSTGRES, false) // Call function being tested resultRequest, err := requestAppender.appendProvisioningRequest(baseRequest, mockDatabase, reqData) @@ -294,7 +304,292 @@ func TestPostgresProvisionRequestAppender_withAdditionalArguments_negativeWorkfl }) mockDatabase.On("IsClone").Return(false) // Get specific implementation of RequestAppender - requestAppender, _ := GetRequestAppender(common.DATABASE_TYPE_POSTGRES) + requestAppender, _ := GetRequestAppender(common.DATABASE_TYPE_POSTGRES, false) + + // Call function being tested + resultRequest, err := requestAppender.appendProvisioningRequest(baseRequest, mockDatabase, reqData) + + // Checks if error was returned + if err == nil { + t.Errorf("Should have errored. Expected: Setting configured action arguments failed! invalid-key is not an allowed additional argument, Got: %v", err) + } + // Checks if resultRequestIsNil + if resultRequest != nil { + t.Errorf("Should have errored. Expected: resultRequest to be nil, Got: %v", resultRequest) + } + + // Verify that the mock method was called with the expected arguments + mockDatabase.AssertCalled(t, "GetInstanceDatabaseNames") +} + +// Tests PostgresHAProvisionRequestAppender(), without additional arguments, positive workflow +func TestPostgresHAProvisionRequestAppender_withoutAdditionalArguments_positiveWorkflow(t *testing.T) { + + baseRequest := &DatabaseProvisionRequest{} + // Create a mock implementation of DatabaseInterface + mockDatabase := &MockDatabaseInterface{} + + reqData := map[string]interface{}{ + common.NDB_PARAM_SSH_PUBLIC_KEY: TEST_SSHKEY, + common.NDB_PARAM_PASSWORD: TEST_PASSWORD, + } + + // Mock required Mock Database Interface methods + mockDatabase.On("GetInstanceDatabaseNames").Return(TEST_DB_NAMES) + mockDatabase.On("GetName").Return("TestPostgresHADB") + mockDatabase.On("GetInstanceType").Return(common.DATABASE_TYPE_POSTGRES) + mockDatabase.On("GetAdditionalArguments").Return(map[string]string{}) + mockDatabase.On("GetClusterId").Return(TEST_CLUSTER_ID) + mockDatabase.On("IsClone").Return(false) + expectedActionArgs := []ActionArgument{ + { + Name: "proxy_read_port", + Value: "5001", + }, + { + Name: "listener_port", + Value: "5432", + }, + { + Name: "proxy_write_port", + Value: "5000", + }, + { + Name: "enable_synchronous_mode", + Value: "true", + }, + { + Name: "auto_tune_staging_drive", + Value: "true", + }, + { + Name: "backup_policy", + Value: "primary_only", + }, + { + Name: "db_password", + Value: TEST_PASSWORD, + }, + { + Name: "database_names", + Value: TEST_DB_NAMES, + }, + { + Name: "provision_virtual_ip", + Value: "true", + }, + { + Name: "deploy_haproxy", + Value: "true", + }, + { + Name: "failover_mode", + Value: "Automatic", + }, + { + Name: "node_type", + Value: "database", + }, + { + Name: "allocate_pg_hugepage", + Value: "false", + }, + { + Name: "cluster_database", + Value: "false", + }, + { + Name: "archive_wal_expire_days", + Value: "-1", + }, + { + Name: "enable_peer_auth", + Value: "false", + }, + { + Name: "cluster_name", + Value: "psqlcluster", + }, + { + Name: "patroni_cluster_name", + Value: "patroni", + }, + } + + // Get specific implementation of RequestAppender + requestAppender, _ := GetRequestAppender(common.DATABASE_TYPE_POSTGRES, true) + + // Call function being tested + resultRequest, err := requestAppender.appendProvisioningRequest(baseRequest, mockDatabase, reqData) + // Assert expected results + if resultRequest.SSHPublicKey != reqData[common.NDB_PARAM_SSH_PUBLIC_KEY] { + t.Errorf("Unexpected SSHPublicKey value. Expected: %s, Got: %s", reqData[common.NDB_PARAM_SSH_PUBLIC_KEY], resultRequest.SSHPublicKey) + } + + // Checks if expected and retrieved action arguments are equal + sortWantAndGotActionArgsByName(expectedActionArgs, resultRequest.ActionArguments) + + // Checks if no error was returned + if err != nil { + t.Errorf("Unexpected error. Expected: %v, Got: %v", nil, err) + } + + // Checks requestAppender.appendProvisioningRequest return type has no error and resultRequest.ActionArguments correctly configured + if !reflect.DeepEqual(expectedActionArgs, resultRequest.ActionArguments) { + t.Errorf("Unexpected ActionArguments. Expected: %v, Got: %v", expectedActionArgs, resultRequest.ActionArguments) + } + + // Verify that the mock method was called with the expected arguments + mockDatabase.AssertCalled(t, "GetInstanceDatabaseNames") +} + +// Test PostgresHAProvisionRequestAppender(), with additional arguments, positive workflow +func TestPostgresHAProvisionRequestAppender_withAdditionalArguments_positiveWorkflow(t *testing.T) { + + baseRequest := &DatabaseProvisionRequest{} + // Create a mock implementation of DatabaseInterface + mockDatabase := &MockDatabaseInterface{} + + reqData := map[string]interface{}{ + common.NDB_PARAM_SSH_PUBLIC_KEY: TEST_SSHKEY, + common.NDB_PARAM_PASSWORD: TEST_PASSWORD, + } + + // Mock required Mock Database Interface methods + mockDatabase.On("GetInstanceDatabaseNames").Return(TEST_DB_NAMES) + mockDatabase.On("GetName").Return("TestPostgresHADB") + mockDatabase.On("GetInstanceType").Return(common.DATABASE_TYPE_POSTGRES) + mockDatabase.On("GetAdditionalArguments").Return(map[string]string{ + "listener_port": "0000", + }) + mockDatabase.On("GetClusterId").Return(TEST_CLUSTER_ID) + mockDatabase.On("IsClone").Return(false) + + expectedActionArgs := []ActionArgument{ + { + Name: "listener_port", + Value: "0000", + }, + { + Name: "proxy_read_port", + Value: "5001", + }, + { + Name: "proxy_write_port", + Value: "5000", + }, + { + Name: "enable_synchronous_mode", + Value: "true", + }, + { + Name: "auto_tune_staging_drive", + Value: "true", + }, + { + Name: "backup_policy", + Value: "primary_only", + }, + { + Name: "db_password", + Value: TEST_PASSWORD, + }, + { + Name: "database_names", + Value: TEST_DB_NAMES, + }, + { + Name: "provision_virtual_ip", + Value: "true", + }, + { + Name: "deploy_haproxy", + Value: "true", + }, + { + Name: "failover_mode", + Value: "Automatic", + }, + { + Name: "node_type", + Value: "database", + }, + { + Name: "allocate_pg_hugepage", + Value: "false", + }, + { + Name: "cluster_database", + Value: "false", + }, + { + Name: "archive_wal_expire_days", + Value: "-1", + }, + { + Name: "enable_peer_auth", + Value: "false", + }, + { + Name: "cluster_name", + Value: "psqlcluster", + }, + { + Name: "patroni_cluster_name", + Value: "patroni", + }, + } + + // Get specific implementation of RequestAppender + requestAppender, _ := GetRequestAppender(common.DATABASE_TYPE_POSTGRES, true) + + // Call function being tested + resultRequest, err := requestAppender.appendProvisioningRequest(baseRequest, mockDatabase, reqData) + + // Assert expected results + if resultRequest.SSHPublicKey != reqData[common.NDB_PARAM_SSH_PUBLIC_KEY] { + t.Errorf("Unexpected SSHPublicKey value. Expected: %s, Got: %s", reqData[common.NDB_PARAM_SSH_PUBLIC_KEY], resultRequest.SSHPublicKey) + } + + // Sort expected and retrieved action arguments + sortWantAndGotActionArgsByName(expectedActionArgs, resultRequest.ActionArguments) + + // Checks if no error was returned + if err != nil { + t.Errorf("Unexpected error. Expected: %v, Got: %v", nil, err) + } + // Check if the lengths of expected and retrieved action arguments are equal + if !reflect.DeepEqual(expectedActionArgs, resultRequest.ActionArguments) { + t.Errorf("Unexpected ActionArguments. Expected: %v, Got: %v", expectedActionArgs, resultRequest.ActionArguments) + } + + // Verify that the mock method was called with the expected arguments + mockDatabase.AssertCalled(t, "GetInstanceDatabaseNames") +} + +// Test PostgresHAProvisionRequestAppender(), with additional arguments, negative workflow +func TestPostgresHAProvisionRequestAppender_withoutAdditionalArguments_negativeWorkflow(t *testing.T) { + + baseRequest := &DatabaseProvisionRequest{} + // Create a mock implementation of DatabaseInterface + mockDatabase := &MockDatabaseInterface{} + + reqData := map[string]interface{}{ + common.NDB_PARAM_SSH_PUBLIC_KEY: TEST_SSHKEY, + common.NDB_PARAM_PASSWORD: TEST_PASSWORD, + } + + // Mock required Mock Database Interface methods + mockDatabase.On("GetInstanceDatabaseNames").Return(TEST_DB_NAMES) + mockDatabase.On("GetName").Return("TestPostgresHADB") + mockDatabase.On("GetInstanceType").Return(common.DATABASE_TYPE_POSTGRES) + mockDatabase.On("GetAdditionalArguments").Return(map[string]string{ + "invalid-key": "invalid-value", + }) + mockDatabase.On("GetClusterId").Return(TEST_CLUSTER_ID) + mockDatabase.On("IsClone").Return(false) + // Get specific implementation of RequestAppender + requestAppender, _ := GetRequestAppender(common.DATABASE_TYPE_POSTGRES, true) // Call function being tested resultRequest, err := requestAppender.appendProvisioningRequest(baseRequest, mockDatabase, reqData) @@ -400,7 +695,7 @@ func TestMSSQLProvisionRequestAppender_withoutAdditionalArguments_positiveWorklo } // Get specific implementation of RequestAppender - requestAppender, _ := GetRequestAppender(common.DATABASE_TYPE_MSSQL) + requestAppender, _ := GetRequestAppender(common.DATABASE_TYPE_MSSQL, false) // Call function being tested resultRequest, err := requestAppender.appendProvisioningRequest(baseRequest, mockDatabase, reqData) @@ -531,7 +826,7 @@ func TestMSSQLProvisionRequestAppender_withAdditionalArguments_positiveWorkflow( } // Get specific implementation of RequestAppender - requestAppender, _ := GetRequestAppender(common.DATABASE_TYPE_MSSQL) + requestAppender, _ := GetRequestAppender(common.DATABASE_TYPE_MSSQL, false) // Call function being tested resultRequest, err := requestAppender.appendProvisioningRequest(baseRequest, mockDatabase, reqData) @@ -593,7 +888,7 @@ func TestMSSQLProvisionRequestAppender_withAdditionalArguments_negativeWorkflow( }) mockDatabase.On("IsClone").Return(false) // Get specific implementation of RequestAppender - requestAppender, _ := GetRequestAppender(common.DATABASE_TYPE_MSSQL) + requestAppender, _ := GetRequestAppender(common.DATABASE_TYPE_MSSQL, false) // Call function being tested resultRequest, err := requestAppender.appendProvisioningRequest(baseRequest, mockDatabase, reqData) @@ -669,7 +964,7 @@ func TestMongoDbProvisionRequestAppender_withoutAdditionalArguments_positiveWork } // Get specific implementation of RequestAppender - requestAppender, _ := GetRequestAppender(common.DATABASE_TYPE_MONGODB) + requestAppender, _ := GetRequestAppender(common.DATABASE_TYPE_MONGODB, false) // Call function being tested resultRequest, err := requestAppender.appendProvisioningRequest(baseRequest, mockDatabase, reqData) @@ -756,7 +1051,7 @@ func TestMongoDbProvisionRequestAppender_withAdditionalArguments_positiveWorkflo } // Get specific implementation of RequestAppender - requestAppender, _ := GetRequestAppender(common.DATABASE_TYPE_MONGODB) + requestAppender, _ := GetRequestAppender(common.DATABASE_TYPE_MONGODB, false) // Call function being tested resultRequest, err := requestAppender.appendProvisioningRequest(baseRequest, mockDatabase, reqData) @@ -802,7 +1097,7 @@ func TestMongoDbProvisionRequestAppender_withAdditionalArguments_negativeWorkflo }) mockDatabase.On("IsClone").Return(false) // Get specific implementation of RequestAppender - requestAppender, _ := GetRequestAppender(common.DATABASE_TYPE_MONGODB) + requestAppender, _ := GetRequestAppender(common.DATABASE_TYPE_MONGODB, false) // Call function being tested resultRequest, err := requestAppender.appendProvisioningRequest(baseRequest, mockDatabase, reqData) @@ -857,7 +1152,7 @@ func TestMySqlProvisionRequestAppender_withoutAdditionalArguments_positiveWorkfl } // Get specific implementation of RequestAppender - requestAppender, _ := GetRequestAppender(common.DATABASE_TYPE_MYSQL) + requestAppender, _ := GetRequestAppender(common.DATABASE_TYPE_MYSQL, false) // Call function being tested resultRequest, err := requestAppender.appendProvisioningRequest(baseRequest, mockDatabase, reqData) @@ -922,7 +1217,7 @@ func TestMySqlProvisionRequestAppender_withAdditionalArguments_positiveWorkflow( } // Get specific implementation of RequestAppender - requestAppender, _ := GetRequestAppender(common.DATABASE_TYPE_MYSQL) + requestAppender, _ := GetRequestAppender(common.DATABASE_TYPE_MYSQL, false) // Call function being tested resultRequest, err := requestAppender.appendProvisioningRequest(baseRequest, mockDatabase, reqData) @@ -968,7 +1263,7 @@ func TestMySqlProvisionRequestAppender_withAdditionalArguments_negativeWorkflow( }) mockDatabase.On("IsClone").Return(false) // Get specific implementation of RequestAppender - requestAppender, _ := GetRequestAppender(common.DATABASE_TYPE_MYSQL) + requestAppender, _ := GetRequestAppender(common.DATABASE_TYPE_MYSQL, false) // Call function being tested resultRequest, err := requestAppender.appendProvisioningRequest(baseRequest, mockDatabase, reqData) @@ -1288,6 +1583,7 @@ func TestGenerateProvisioningRequest_AgainstDifferentReqData(t *testing.T) { mockDatabase.On("GetInstanceSize").Return(TEST_INSTANCE_SIZE) mockDatabase.On("GetInstanceDatabaseNames").Return(TEST_DB_NAMES) mockDatabase.On("GetAdditionalArguments").Return(map[string]string{}) + mockDatabase.On("GetInstanceIsHighAvailability").Return(false) mockDatabase.On("IsClone").Return(false) // Test diff --git a/ndb_api/interface_mock_test.go b/ndb_api/interface_mock_test.go index 691db059..429225e1 100644 --- a/ndb_api/interface_mock_test.go +++ b/ndb_api/interface_mock_test.go @@ -158,3 +158,9 @@ func (m *MockNDBClientHTTPInterface) Do(req *http.Request) (*http.Response, erro } return args.Get(0).(*http.Response), args.Error(1) } + +// GetInstanceIsHighAvailability is a mock implementation of the GetInstanceIsHighAvailability method in the Database interface +func (m *MockDatabaseInterface) GetInstanceIsHighAvailability() bool { + args := m.Called() + return args.Bool(0) +} diff --git a/ndb_api/interfaces.go b/ndb_api/interfaces.go index f0c3dedd..60a0aabd 100644 --- a/ndb_api/interfaces.go +++ b/ndb_api/interfaces.go @@ -49,6 +49,7 @@ type DatabaseInterface interface { GetCloneSourceDBId() string GetCloneSnapshotId() string GetAdditionalArguments() map[string]string + GetInstanceIsHighAvailability() bool } // Internal Interfaces @@ -72,3 +73,6 @@ type PostgresRequestAppender struct{} // Implements RequestAppender type MySqlRequestAppender struct{} + +// Implements RequestAppender +type PostgresHARequestAppender struct{} From b18fc0e0b565585ac29109c2ff105d2cae0b4624 Mon Sep 17 00:00:00 2001 From: Cody Irion Date: Sun, 24 Mar 2024 11:32:50 -0400 Subject: [PATCH 02/60] Fixed ReadMe formatting --- README.md | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) diff --git a/README.md b/README.md index 21cdbb1e..b83629ce 100644 --- a/README.md +++ b/README.md @@ -110,12 +110,12 @@ metadata: app.kubernetes.io/created-by: ndb-operator name: ndb spec: - # Name of the secret that holds the credentials for NDB: username, password and ca_certificate created earlier - credentialSecret: ndb-secret-name - # NDB Server's API URL - server: https://[NDB IP]:8443/era/v0.9 - # Set to true to skip SSL certificate validation, should be false if ca_certificate is provided in the credential secret. - skipCertificateVerification: true + # Name of the secret that holds the credentials for NDB: username, password and ca_certificate created earlier + credentialSecret: ndb-secret-name + # NDB Server's API URL + server: https://[NDB IP]:8443/era/v0.9 + # Set to true to skip SSL certificate validation, should be false if ca_certificate is provided in the credential secret. + skipCertificateVerification: true ``` Create the NDBServer resource using: @@ -158,7 +158,7 @@ spec: type: postgres # isHighAvailability is an optional parameter. In case nothing is specified, it is set to false isHighAvailability: false - + # You can specify any (or none) of these types of profiles: compute, software, network, dbParam # If not specified, the corresponding Out-of-Box (OOB) profile will be used wherever applicable # Name is case-sensitive. ID is the UUID of the profile. Profile should be in the "READY" state @@ -218,6 +218,7 @@ spec: clusterId: "Nutanix Cluster Id" # isHighAvailability is an optional parameter. In case nothing is specified, it is set to false isHighAvailability: false + # You can specify any (or none) of these types of profiles: compute, software, network, dbParam # If not specified, the corresponding Out-of-Box (OOB) profile will be used wherever applicable # Name is case-sensitive. ID is the UUID of the profile. Profile should be in the "READY" state @@ -443,4 +444,4 @@ This code is developed in the open with input from the community through issues ## License Copyright 2022-2023 Nutanix, Inc. -The project is released under version 2.0 of the [Apache license](http://www.apache.org/licenses/LICENSE-2.0). \ No newline at end of file +The project is released under version 2.0 of the [Apache license](http://www.apache.org/licenses/LICENSE-2.0). From 04b6e176e496ff11b35af8b00313f4e270deca1d Mon Sep 17 00:00:00 2001 From: Cody Irion Date: Sun, 24 Mar 2024 20:21:35 -0400 Subject: [PATCH 03/60] Added back optional tags on IsHighAvailability field --- api/v1alpha1/database_types.go | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/api/v1alpha1/database_types.go b/api/v1alpha1/database_types.go index f8edc5d1..81e03a4a 100644 --- a/api/v1alpha1/database_types.go +++ b/api/v1alpha1/database_types.go @@ -107,7 +107,8 @@ type Instance struct { // +optional // Additional database engine specific arguments AdditionalArguments map[string]string `json:"additionalArguments"` - IsHighAvailability bool `json:"isHighAvailability"` + // +optional + IsHighAvailability bool `json:"isHighAvailability"` } type Clone struct { @@ -134,7 +135,8 @@ type Clone struct { // +optional // Additional database engine specific arguments AdditionalArguments map[string]string `json:"additionalArguments"` - IsHighAvailability bool `json:"isHighAvailability"` + // +optional + IsHighAvailability bool `json:"isHighAvailability"` } // Time Machine details From 917228aee0fdbe2a2c2582b430fce6791bea0a75 Mon Sep 17 00:00:00 2001 From: Cody Irion Date: Tue, 2 Apr 2024 17:25:13 -0400 Subject: [PATCH 04/60] Added node struct to allow setting of properties for individual nodes --- api/v1alpha1/database_types.go | 16 ++++++++++++++ common/util/additionalArguments.go | 20 ++++++++++++++++- .../crd/bases/ndb.nutanix.com_databases.yaml | 22 +++++++++++++++++++ 3 files changed, 57 insertions(+), 1 deletion(-) diff --git a/api/v1alpha1/database_types.go b/api/v1alpha1/database_types.go index 81e03a4a..ebf46582 100644 --- a/api/v1alpha1/database_types.go +++ b/api/v1alpha1/database_types.go @@ -109,6 +109,22 @@ type Instance struct { AdditionalArguments map[string]string `json:"additionalArguments"` // +optional IsHighAvailability bool `json:"isHighAvailability"` + // +optional + Nodes []*Node `json:"nodes"` +} +type Node struct { + VmName string `json:"vmName"` + // +optional + NxClusterName string `json:"nxClusterName"` // not sure if we need this + // +optional + NxClusterId string `json:"nxClusterId"` // not sure if we need this + NodeType string `json:"nodeType"` + // +optional + Role string `json:"role"` + // +optional + FailoverMode string `json:"failoverMode"` + // +optional + RemoteArchiveDestination string `json:"remoteArchiveDestination"` } type Clone struct { diff --git a/common/util/additionalArguments.go b/common/util/additionalArguments.go index 74ffa5fb..6fb83f94 100644 --- a/common/util/additionalArguments.go +++ b/common/util/additionalArguments.go @@ -1,6 +1,7 @@ package util import ( + "errors" "fmt" "github.com/nutanix-cloud-native/ndb-operator/common" @@ -106,7 +107,24 @@ func GetAllowedAdditionalArgumentsForDatabase(dbType string) (map[string]bool, e case common.DATABASE_TYPE_POSTGRES: return map[string]bool{ /* Has a default */ - "listener_port": true, + "listener_port": true, + "proxy_read_port": true, + "proxy_write_port": true, + "enable_synchronous_mode": true, + "auto_tune_staging_drive": true, + "backup_policy": true, + "db_password": true, + "database_names": true, + "provision_virtual_ip": true, + "deploy_haproxy": true, + "failover_mode": true, + "node_type": true, + "allocate_pg_hugepage": true, + "cluster_database": true, + "archive_wal_expire_days": true, + "enable_peer_auth": true, + "cluster_name": true, + "patroni_cluster_name": true, }, nil case common.DATABASE_TYPE_MYSQL: return map[string]bool{ diff --git a/config/crd/bases/ndb.nutanix.com_databases.yaml b/config/crd/bases/ndb.nutanix.com_databases.yaml index ef748fa9..7c28ce69 100644 --- a/config/crd/bases/ndb.nutanix.com_databases.yaml +++ b/config/crd/bases/ndb.nutanix.com_databases.yaml @@ -162,6 +162,28 @@ spec: name: description: Name of the database instance type: string + nodes: + items: + properties: + failoverMode: + type: string + nodeType: + type: string + nxClusterId: + type: string + nxClusterName: + type: string + remoteArchiveDestination: + type: string + role: + type: string + vmName: + type: string + required: + - nodeType + - vmName + type: object + type: array profiles: properties: compute: From b93735584ca97c2f335a96f66a311d0e5668addd Mon Sep 17 00:00:00 2001 From: Cody Irion Date: Thu, 4 Apr 2024 12:03:27 -0400 Subject: [PATCH 05/60] Added implementation for the getInstanceNodes method --- api/v1alpha1/database_types.go | 2 ++ .../crd/bases/ndb.nutanix.com_databases.yaml | 22 +++++++++++++++++++ controller_adapters/database.go | 4 ++++ ndb_api/interface_mock_test.go | 6 +++++ ndb_api/interfaces.go | 2 ++ 5 files changed, 36 insertions(+) diff --git a/api/v1alpha1/database_types.go b/api/v1alpha1/database_types.go index ebf46582..5c936800 100644 --- a/api/v1alpha1/database_types.go +++ b/api/v1alpha1/database_types.go @@ -153,6 +153,8 @@ type Clone struct { AdditionalArguments map[string]string `json:"additionalArguments"` // +optional IsHighAvailability bool `json:"isHighAvailability"` + // +optional + Nodes []*Node `json:"nodes"` } // Time Machine details diff --git a/config/crd/bases/ndb.nutanix.com_databases.yaml b/config/crd/bases/ndb.nutanix.com_databases.yaml index 7c28ce69..e38496ab 100644 --- a/config/crd/bases/ndb.nutanix.com_databases.yaml +++ b/config/crd/bases/ndb.nutanix.com_databases.yaml @@ -74,6 +74,28 @@ spec: name: description: Name of the clone instance type: string + nodes: + items: + properties: + failoverMode: + type: string + nodeType: + type: string + nxClusterId: + type: string + nxClusterName: + type: string + remoteArchiveDestination: + type: string + role: + type: string + vmName: + type: string + required: + - nodeType + - vmName + type: object + type: array profiles: properties: compute: diff --git a/controller_adapters/database.go b/controller_adapters/database.go index 3757ae23..b2dea24a 100644 --- a/controller_adapters/database.go +++ b/controller_adapters/database.go @@ -149,6 +149,10 @@ func (d *Database) GetInstanceIsHighAvailability() bool { return d.Spec.Instance.IsHighAvailability } +func (d *Database) GetInstanceNodes() []*v1alpha1.Node { + return d.Spec.Instance.Nodes +} + // Returns basic details about the Time Machine if provided in the // underlying database, else returns defaults like: // TM Name: _TM diff --git a/ndb_api/interface_mock_test.go b/ndb_api/interface_mock_test.go index 429225e1..2d9be8ce 100644 --- a/ndb_api/interface_mock_test.go +++ b/ndb_api/interface_mock_test.go @@ -4,6 +4,7 @@ import ( "context" "net/http" + "github.com/nutanix-cloud-native/ndb-operator/api/v1alpha1" "github.com/stretchr/testify/mock" ) @@ -164,3 +165,8 @@ func (m *MockDatabaseInterface) GetInstanceIsHighAvailability() bool { args := m.Called() return args.Bool(0) } + +func (m *MockDatabaseInterface) GetInstanceNodes() []*v1alpha1.Node { + args := m.Called() + return args.Get(0).([]*v1alpha1.Node) +} diff --git a/ndb_api/interfaces.go b/ndb_api/interfaces.go index 60a0aabd..69383cf2 100644 --- a/ndb_api/interfaces.go +++ b/ndb_api/interfaces.go @@ -18,6 +18,7 @@ package ndb_api import ( "context" + "github.com/nutanix-cloud-native/ndb-operator/api/v1alpha1" ) // External Interfaces @@ -50,6 +51,7 @@ type DatabaseInterface interface { GetCloneSnapshotId() string GetAdditionalArguments() map[string]string GetInstanceIsHighAvailability() bool + GetInstanceNodes() []*v1alpha1.Node } // Internal Interfaces From 5ba62b87b62ad4bbf06a9fe08222a8940a3f6dfb Mon Sep 17 00:00:00 2001 From: Cody Irion Date: Mon, 8 Apr 2024 16:40:27 -0400 Subject: [PATCH 06/60] Implemented dynamically setting of nodes and basic validation of node requests --- api/v1alpha1/database_types.go | 13 ++-- ndb_api/db_helpers.go | 130 +++++++++++++++++++++------------ 2 files changed, 88 insertions(+), 55 deletions(-) diff --git a/api/v1alpha1/database_types.go b/api/v1alpha1/database_types.go index 5c936800..7006a548 100644 --- a/api/v1alpha1/database_types.go +++ b/api/v1alpha1/database_types.go @@ -115,16 +115,13 @@ type Instance struct { type Node struct { VmName string `json:"vmName"` // +optional - NxClusterName string `json:"nxClusterName"` // not sure if we need this + ComputeProfileId string `json:"computeProfileId,omitempty"` // +optional - NxClusterId string `json:"nxClusterId"` // not sure if we need this - NodeType string `json:"nodeType"` + NetworkProfileId string `json:"networkProfileId,omitempty"` // +optional - Role string `json:"role"` - // +optional - FailoverMode string `json:"failoverMode"` - // +optional - RemoteArchiveDestination string `json:"remoteArchiveDestination"` + NewDbServerTimeZone string `json:"newDbServerTimeZone,omitempty"` + NxClusterId string `json:"nxClusterId,omitempty"` + Properties map[string]string `json:"properties"` } type Clone struct { diff --git a/ndb_api/db_helpers.go b/ndb_api/db_helpers.go index 74775787..d669ccb8 100644 --- a/ndb_api/db_helpers.go +++ b/ndb_api/db_helpers.go @@ -20,6 +20,7 @@ import ( "context" "errors" "fmt" + "github.com/nutanix-cloud-native/ndb-operator/api/v1alpha1" "strconv" "github.com/nutanix-cloud-native/ndb-operator/common" @@ -114,8 +115,8 @@ func GenerateProvisioningRequest(ctx context.Context, ndb_client *ndb_client.NDB log.Error(err, "Error while appending provisioning request") return } - requestBody, err = appender.appendProvisioningRequest(requestBody, database, reqData) + util.ToString(requestBody) if err != nil { log.Error(err, "Error while appending provisioning request") } @@ -304,75 +305,111 @@ func (a *PostgresRequestAppender) appendProvisioningRequest(req *DatabaseProvisi return req, nil } -func setNodesParameters(req *DatabaseProvisionRequest, database DatabaseInterface) { +func setNodesParameters(req *DatabaseProvisionRequest, database DatabaseInterface) (nodeErrors error) { // Clear the original req.Nodes array req.Nodes = []Node{} // Create node object for HA Proxy - for i := 0; i < 2; i++ { - // Hard coding the HA Proxy properties - props := make([]map[string]string, 1) - props[0] = map[string]string{ - "name": "node_type", - "value": "haproxy", - } - req.Nodes = append(req.Nodes, Node{ - Properties: props, - VmName: database.GetName() + "_haproxy" + strconv.Itoa(i+1), - NxClusterId: database.GetClusterId(), - }) + nodeCount := len(database.GetInstanceNodes()) + req.NodeCount = nodeCount + const MinReqNodes = 2 + if nodeCount < MinReqNodes { + return fmt.Errorf("invalid node count: HA instance needs at least %d nodes, given: %d", MinReqNodes, nodeCount) } - - // Create node object for Database Instances - for i := 0; i < 3; i++ { - // Hard coding the DB properties - props := make([]map[string]string, 4) - props[0] = map[string]string{ - "name": "role", - "value": "Secondary", - } - // 1st node will be the primary node - if i == 0 { - props[0]["value"] = "Primary" - } - props[1] = map[string]string{ - "name": "failover_mode", - "value": "Automatic", + for i := 0; i < nodeCount; i++ { + currentNode := database.GetInstanceNodes()[i] + nodeErrors = validateNodeRequest(currentNode) + if nodeErrors != nil { + return nodeErrors } - props[2] = map[string]string{ - "name": "node_type", - "value": "database", - } - props[3] = map[string]string{ - "name": "remote_archive_destination", - "value": "", + reqProps := database.GetInstanceNodes()[i].Properties + props := make([]map[string]string, 0) + for key, value := range reqProps { + props = append(props, map[string]string{ + "name": key, + "value": value, + }) } req.Nodes = append(req.Nodes, Node{ Properties: props, - VmName: database.GetName() + "-" + strconv.Itoa(i+1), + VmName: currentNode.VmName, + NxClusterId: database.GetClusterId(), // change to use from currentNode on NetworkProfileId: req.NetworkProfileId, ComputeProfileId: req.ComputeProfileId, - NxClusterId: database.GetClusterId(), }) } + return nil + //// Create node object for Database Instances + //for i := 0; i < 3; i++ { + // // Hard coding the DB properties + // props := make([]map[string]string, 4) + // props[0] = map[string]string{ + // "name": "role", + // "value": "Secondary", + // } + // // 1st node will be the primary node + // if i == 0 { + // props[0]["value"] = "Primary" + // } + // props[1] = map[string]string{ + // "name": "failover_mode", + // "value": "Automatic", + // } + // props[2] = map[string]string{ + // "name": "node_type", + // "value": "database", + // } + // props[3] = map[string]string{ + // "name": "remote_archive_destination", + // "value": "", + // } + // req.Nodes = append(req.Nodes, Node{ + // Properties: props, + // VmName: database.GetName() + "-" + strconv.Itoa(i+1), + // NetworkProfileId: req.NetworkProfileId, + // ComputeProfileId: req.ComputeProfileId, + // NxClusterId: database.GetClusterId(), + // }) + //} +} + +func validateNodeRequest(node *v1alpha1.Node) (nodeErrors error) { + if len(node.VmName) == 0 { + return fmt.Errorf("node VM name cannot be emtpy") + } + if len(node.NxClusterId) != 36 { + return fmt.Errorf("node NxClusterId must be a valid UUID") + } + properties := node.Properties + if properties == nil || len(properties) == 0 { + return fmt.Errorf("missing/empty properties for node: %s", node.VmName) + } + return nil } func (a *PostgresHARequestAppender) appendProvisioningRequest(req *DatabaseProvisionRequest, database DatabaseInterface, reqData map[string]interface{}) (*DatabaseProvisionRequest, error) { dbPassword := reqData[common.NDB_PARAM_PASSWORD].(string) databaseNames := database.GetInstanceDatabaseNames() req.SSHPublicKey = reqData[common.NDB_PARAM_SSH_PUBLIC_KEY].(string) - // Set the number of nodes to 5, 3 Postgres nodes + 2 HA Proxy nodes - req.NodeCount = 5 - setNodesParameters(req, database) + err := setNodesParameters(req, database) + if err != nil { + return nil, err + } // Set clustered to true req.Clustered = true - - // Default action arguments + failoverMode := database.GetAdditionalArguments()["failover_mode"] + if failoverMode == "" { + failoverMode = "Automatic" + } + proxyReadPort := database.GetAdditionalArguments()["proxy_read_port"] + if proxyReadPort == "" { + proxyReadPort = "5001" + } actionArguments := map[string]string{ /* Non-Configurable from additionalArguments*/ - "proxy_read_port": "5001", + "proxy_read_port": proxyReadPort, "listener_port": "5432", "proxy_write_port": "5000", "enable_synchronous_mode": "true", @@ -382,7 +419,7 @@ func (a *PostgresHARequestAppender) appendProvisioningRequest(req *DatabaseProvi "database_names": databaseNames, "provision_virtual_ip": "true", "deploy_haproxy": "true", - "failover_mode": "Automatic", + "failover_mode": failoverMode, "node_type": "database", "allocate_pg_hugepage": "false", "cluster_database": "false", @@ -396,7 +433,6 @@ func (a *PostgresHARequestAppender) appendProvisioningRequest(req *DatabaseProvi if err := setConfiguredActionArguments(database, actionArguments); err != nil { return nil, err } - // Converting action arguments map to list and appending to req.ActionArguments req.ActionArguments = append(req.ActionArguments, convertMapToActionArguments(actionArguments)...) From 0d43f84a955758668a4ac38ae3427232cfbd9e85 Mon Sep 17 00:00:00 2001 From: bobdoe945 Date: Mon, 8 Apr 2024 22:02:41 -0400 Subject: [PATCH 07/60] Dynamically setting all arguments --- ndb_api/db_helpers.go | 100 +++++++++++++++++++++++++++++++++--------- 1 file changed, 79 insertions(+), 21 deletions(-) diff --git a/ndb_api/db_helpers.go b/ndb_api/db_helpers.go index d669ccb8..1155d9a2 100644 --- a/ndb_api/db_helpers.go +++ b/ndb_api/db_helpers.go @@ -407,27 +407,85 @@ func (a *PostgresHARequestAppender) appendProvisioningRequest(req *DatabaseProvi if proxyReadPort == "" { proxyReadPort = "5001" } - actionArguments := map[string]string{ - /* Non-Configurable from additionalArguments*/ - "proxy_read_port": proxyReadPort, - "listener_port": "5432", - "proxy_write_port": "5000", - "enable_synchronous_mode": "true", - "auto_tune_staging_drive": "true", - "backup_policy": "primary_only", - "db_password": dbPassword, - "database_names": databaseNames, - "provision_virtual_ip": "true", - "deploy_haproxy": "true", - "failover_mode": failoverMode, - "node_type": "database", - "allocate_pg_hugepage": "false", - "cluster_database": "false", - "archive_wal_expire_days": "-1", - "enable_peer_auth": "false", - "cluster_name": "psqlcluster", - "patroni_cluster_name": "patroni", - } + listenerPort := database.GetAdditionalArguments()["listener_port"] + if listenerPort == "" { + listenerPort = "5432" + } + proxyWritePort := database.GetAdditionalArguments()["proxy_write_port"] + if proxyWritePort == "" { + proxyWritePort = "5000" + } + enableSynchronousMode := database.GetAdditionalArguments()["enable_synchronous_mode"] + if enableSynchronousMode == "" { + enableSynchronousMode = "true" + } + autoTuneStagingDrive := database.GetAdditionalArguments()["auto_tune_staging_drive"] + if autoTuneStagingDrive == "" { + autoTuneStagingDrive = "true" + } + backupPolicy := database.GetAdditionalArguments()["backup_policy"] + if backupPolicy == "" { + backupPolicy = "primary_only" + } + provisionVirtualIP := database.GetAdditionalArguments()["provision_virtual_ip"] + if provisionVirtualIP == "" { + provisionVirtualIP = "true" + } + deployHAProxy := database.GetAdditionalArguments()["deploy_haproxy"] + if deployHAProxy == "" { + deployHAProxy = "true" + } + nodeType := database.GetAdditionalArguments()["node_type"] + if nodeType == "" { + nodeType = "database" + } + allocatePGHugePage := database.GetAdditionalArguments()["allocate_pg_hugepage"] + if allocatePGHugePage == "" { + allocatePGHugePage = "false" + } + clusterDatabase := database.GetAdditionalArguments()["cluster_database"] + if clusterDatabase == "" { + clusterDatabase = "false" + } + archiveWALExpireDays := database.GetAdditionalArguments()["archive_wal_expire_days"] + if archiveWALExpireDays == "" { + archiveWALExpireDays = "-1" + } + enablePeerAuth := database.GetAdditionalArguments()["enable_peer_auth"] + if enablePeerAuth == "" { + enablePeerAuth = "false" + } + clusterName := database.GetAdditionalArguments()["cluster_name"] + if clusterName == "" { + clusterName = "psqlcluster" + } + patroniClusterName := database.GetAdditionalArguments()["patroni_cluster_name"] + if patroniClusterName == "" { + patroniClusterName = "patroni" + } + + actionArguments := map[string]string{ + /* Non-Configurable from additionalArguments*/ + "proxy_read_port": proxyReadPort, + "listener_port": listenerPort, + "proxy_write_port": proxyWritePort, + "enable_synchronous_mode": enableSynchronousMode, + "auto_tune_staging_drive": autoTuneStagingDrive, + "backup_policy": backupPolicy, + "db_password": dbPassword, + "database_names": databaseNames, + "provision_virtual_ip": provisionVirtualIP, + "deploy_haproxy": deployHAProxy, + "failover_mode": failoverMode, + "node_type": nodeType, + "allocate_pg_hugepage": allocatePGHugePage, + "cluster_database": clusterDatabase, + "archive_wal_expire_days": archiveWALExpireDays, + "enable_peer_auth": enablePeerAuth, + "cluster_name": clusterName, + "patroni_cluster_name": patroniClusterName, + } + // Appending/overwriting database actionArguments to actionArguments if err := setConfiguredActionArguments(database, actionArguments); err != nil { From 7ed55c904da0234cf6333e2f7dc3474d5566b639 Mon Sep 17 00:00:00 2001 From: Cody Irion Date: Thu, 11 Apr 2024 16:42:15 -0400 Subject: [PATCH 08/60] Changed Node struct to have new NodeProperties struct --- api/v1alpha1/database_types.go | 14 +- ndb_api/db_helpers.go | 273 +++++++++++++++++---------------- 2 files changed, 145 insertions(+), 142 deletions(-) diff --git a/api/v1alpha1/database_types.go b/api/v1alpha1/database_types.go index 7006a548..64287eab 100644 --- a/api/v1alpha1/database_types.go +++ b/api/v1alpha1/database_types.go @@ -113,15 +113,17 @@ type Instance struct { Nodes []*Node `json:"nodes"` } type Node struct { - VmName string `json:"vmName"` // +optional - ComputeProfileId string `json:"computeProfileId,omitempty"` + VmName string `json:"vmName"` + Properties NodeProperties `json:"properties"` +} + +type NodeProperties struct { + NodeType string `json:"properties"` // +optional - NetworkProfileId string `json:"networkProfileId,omitempty"` + Role string `json:"role"` // +optional - NewDbServerTimeZone string `json:"newDbServerTimeZone,omitempty"` - NxClusterId string `json:"nxClusterId,omitempty"` - Properties map[string]string `json:"properties"` + FailoverMode string `json:"failoverMode"` } type Clone struct { diff --git a/ndb_api/db_helpers.go b/ndb_api/db_helpers.go index 1155d9a2..9bd9f9e9 100644 --- a/ndb_api/db_helpers.go +++ b/ndb_api/db_helpers.go @@ -20,7 +20,6 @@ import ( "context" "errors" "fmt" - "github.com/nutanix-cloud-native/ndb-operator/api/v1alpha1" "strconv" "github.com/nutanix-cloud-native/ndb-operator/common" @@ -309,82 +308,85 @@ func setNodesParameters(req *DatabaseProvisionRequest, database DatabaseInterfac // Clear the original req.Nodes array req.Nodes = []Node{} - // Create node object for HA Proxy - nodeCount := len(database.GetInstanceNodes()) + // Validate node counts + nodeCount := len(req.Nodes) + databaseNodeCount := 0 + proxyNodeCount := 0 req.NodeCount = nodeCount - const MinReqNodes = 2 - if nodeCount < MinReqNodes { - return fmt.Errorf("invalid node count: HA instance needs at least %d nodes, given: %d", MinReqNodes, nodeCount) + primaryNodeCount := getPrimaryNodeCount(database) + if primaryNodeCount > 1 { + return fmt.Errorf("invalid nodes: HA instance can only have one primary node") } + for i := 0; i < nodeCount; i++ { currentNode := database.GetInstanceNodes()[i] - nodeErrors = validateNodeRequest(currentNode) - if nodeErrors != nil { - return nodeErrors + + if currentNode.Properties.NodeType != "database" && currentNode.Properties.NodeType != "haproxy" { + return fmt.Errorf("invalid node type: %s", currentNode.Properties.NodeType) + } + if currentNode.Properties.NodeType == "database" { + if databaseNodeCount == 0 && primaryNodeCount == 0 { + currentNode.Properties.Role = "Primary" + } + databaseNodeCount++ + if currentNode.VmName == "" { + defaultDatabaseName := database.GetAdditionalArguments()["cluster_name"] + "-" + strconv.Itoa(databaseNodeCount+1) + currentNode.VmName = defaultDatabaseName + } } - reqProps := database.GetInstanceNodes()[i].Properties + if currentNode.Properties.NodeType == "haproxy" { + proxyNodeCount++ + if currentNode.VmName == "" { + defaultDatabaseName := database.GetAdditionalArguments()["cluster_name"] + "_haproxy" + strconv.Itoa(proxyNodeCount+1) + currentNode.VmName = defaultDatabaseName + } + } + isPrimaryNode := currentNode.Properties.NodeType == "database" && currentNode.Properties.Role == "Primary" + if isPrimaryNode { + primaryNodeCount += 1 + } + + //if nodeErrors != nil { + // return nodeErrors + //} + props := make([]map[string]string, 0) - for key, value := range reqProps { - props = append(props, map[string]string{ - "name": key, - "value": value, - }) + props[0] = map[string]string{ + "role": currentNode.Properties.Role, + } + props[1] = map[string]string{ + "failover_mode": currentNode.Properties.FailoverMode, + } + props[2] = map[string]string{ + "role": currentNode.Properties.NodeType, + } + props[3] = map[string]string{ + "remove_archive_destination": database.GetAdditionalArguments()["remove_archive_destination"], } req.Nodes = append(req.Nodes, Node{ Properties: props, VmName: currentNode.VmName, - NxClusterId: database.GetClusterId(), // change to use from currentNode on + NxClusterId: database.GetClusterId(), NetworkProfileId: req.NetworkProfileId, ComputeProfileId: req.ComputeProfileId, }) } + const MinReqDatabaseNodes = 3 + if nodeCount < MinReqDatabaseNodes { + return fmt.Errorf("invalid node count: HA instance needs at least %d nodes, given: %d", MinReqDatabaseNodes, nodeCount) + } + return nil - //// Create node object for Database Instances - //for i := 0; i < 3; i++ { - // // Hard coding the DB properties - // props := make([]map[string]string, 4) - // props[0] = map[string]string{ - // "name": "role", - // "value": "Secondary", - // } - // // 1st node will be the primary node - // if i == 0 { - // props[0]["value"] = "Primary" - // } - // props[1] = map[string]string{ - // "name": "failover_mode", - // "value": "Automatic", - // } - // props[2] = map[string]string{ - // "name": "node_type", - // "value": "database", - // } - // props[3] = map[string]string{ - // "name": "remote_archive_destination", - // "value": "", - // } - // req.Nodes = append(req.Nodes, Node{ - // Properties: props, - // VmName: database.GetName() + "-" + strconv.Itoa(i+1), - // NetworkProfileId: req.NetworkProfileId, - // ComputeProfileId: req.ComputeProfileId, - // NxClusterId: database.GetClusterId(), - // }) - //} } -func validateNodeRequest(node *v1alpha1.Node) (nodeErrors error) { - if len(node.VmName) == 0 { - return fmt.Errorf("node VM name cannot be emtpy") - } - if len(node.NxClusterId) != 36 { - return fmt.Errorf("node NxClusterId must be a valid UUID") - } - properties := node.Properties - if properties == nil || len(properties) == 0 { - return fmt.Errorf("missing/empty properties for node: %s", node.VmName) +func getPrimaryNodeCount(database DatabaseInterface) int { + count := 0 + for _, node := range database.GetInstanceNodes() { + if node.Properties.Role == "Primary" { + count++ + } } - return nil + return count } func (a *PostgresHARequestAppender) appendProvisioningRequest(req *DatabaseProvisionRequest, database DatabaseInterface, reqData map[string]interface{}) (*DatabaseProvisionRequest, error) { @@ -408,84 +410,83 @@ func (a *PostgresHARequestAppender) appendProvisioningRequest(req *DatabaseProvi proxyReadPort = "5001" } listenerPort := database.GetAdditionalArguments()["listener_port"] - if listenerPort == "" { - listenerPort = "5432" - } - proxyWritePort := database.GetAdditionalArguments()["proxy_write_port"] - if proxyWritePort == "" { - proxyWritePort = "5000" - } - enableSynchronousMode := database.GetAdditionalArguments()["enable_synchronous_mode"] - if enableSynchronousMode == "" { - enableSynchronousMode = "true" - } - autoTuneStagingDrive := database.GetAdditionalArguments()["auto_tune_staging_drive"] - if autoTuneStagingDrive == "" { - autoTuneStagingDrive = "true" - } - backupPolicy := database.GetAdditionalArguments()["backup_policy"] - if backupPolicy == "" { - backupPolicy = "primary_only" - } - provisionVirtualIP := database.GetAdditionalArguments()["provision_virtual_ip"] - if provisionVirtualIP == "" { - provisionVirtualIP = "true" - } - deployHAProxy := database.GetAdditionalArguments()["deploy_haproxy"] - if deployHAProxy == "" { - deployHAProxy = "true" - } - nodeType := database.GetAdditionalArguments()["node_type"] - if nodeType == "" { - nodeType = "database" - } - allocatePGHugePage := database.GetAdditionalArguments()["allocate_pg_hugepage"] - if allocatePGHugePage == "" { - allocatePGHugePage = "false" - } - clusterDatabase := database.GetAdditionalArguments()["cluster_database"] - if clusterDatabase == "" { - clusterDatabase = "false" - } - archiveWALExpireDays := database.GetAdditionalArguments()["archive_wal_expire_days"] - if archiveWALExpireDays == "" { - archiveWALExpireDays = "-1" - } - enablePeerAuth := database.GetAdditionalArguments()["enable_peer_auth"] - if enablePeerAuth == "" { - enablePeerAuth = "false" - } - clusterName := database.GetAdditionalArguments()["cluster_name"] - if clusterName == "" { - clusterName = "psqlcluster" - } - patroniClusterName := database.GetAdditionalArguments()["patroni_cluster_name"] - if patroniClusterName == "" { - patroniClusterName = "patroni" - } - - actionArguments := map[string]string{ - /* Non-Configurable from additionalArguments*/ - "proxy_read_port": proxyReadPort, - "listener_port": listenerPort, - "proxy_write_port": proxyWritePort, - "enable_synchronous_mode": enableSynchronousMode, - "auto_tune_staging_drive": autoTuneStagingDrive, - "backup_policy": backupPolicy, - "db_password": dbPassword, - "database_names": databaseNames, - "provision_virtual_ip": provisionVirtualIP, - "deploy_haproxy": deployHAProxy, - "failover_mode": failoverMode, - "node_type": nodeType, - "allocate_pg_hugepage": allocatePGHugePage, - "cluster_database": clusterDatabase, - "archive_wal_expire_days": archiveWALExpireDays, - "enable_peer_auth": enablePeerAuth, - "cluster_name": clusterName, - "patroni_cluster_name": patroniClusterName, - } + if listenerPort == "" { + listenerPort = "5432" + } + proxyWritePort := database.GetAdditionalArguments()["proxy_write_port"] + if proxyWritePort == "" { + proxyWritePort = "5000" + } + enableSynchronousMode := database.GetAdditionalArguments()["enable_synchronous_mode"] + if enableSynchronousMode == "" { + enableSynchronousMode = "true" + } + autoTuneStagingDrive := database.GetAdditionalArguments()["auto_tune_staging_drive"] + if autoTuneStagingDrive == "" { + autoTuneStagingDrive = "true" + } + backupPolicy := database.GetAdditionalArguments()["backup_policy"] + if backupPolicy == "" { + backupPolicy = "primary_only" + } + provisionVirtualIP := database.GetAdditionalArguments()["provision_virtual_ip"] + if provisionVirtualIP == "" { + provisionVirtualIP = "true" + } + deployHAProxy := database.GetAdditionalArguments()["deploy_haproxy"] + if deployHAProxy == "" { + deployHAProxy = "true" + } + nodeType := database.GetAdditionalArguments()["node_type"] + if nodeType == "" { + nodeType = "database" + } + allocatePGHugePage := database.GetAdditionalArguments()["allocate_pg_hugepage"] + if allocatePGHugePage == "" { + allocatePGHugePage = "false" + } + clusterDatabase := database.GetAdditionalArguments()["cluster_database"] + if clusterDatabase == "" { + clusterDatabase = "false" + } + archiveWALExpireDays := database.GetAdditionalArguments()["archive_wal_expire_days"] + if archiveWALExpireDays == "" { + archiveWALExpireDays = "-1" + } + enablePeerAuth := database.GetAdditionalArguments()["enable_peer_auth"] + if enablePeerAuth == "" { + enablePeerAuth = "false" + } + clusterName := database.GetAdditionalArguments()["cluster_name"] + if clusterName == "" { + clusterName = "psqlcluster" + } + patroniClusterName := database.GetAdditionalArguments()["patroni_cluster_name"] + if patroniClusterName == "" { + patroniClusterName = "patroni" + } + actionArguments := map[string]string{ + /* Non-Configurable from additionalArguments*/ + "proxy_read_port": proxyReadPort, + "listener_port": listenerPort, + "proxy_write_port": proxyWritePort, + "enable_synchronous_mode": enableSynchronousMode, + "auto_tune_staging_drive": autoTuneStagingDrive, + "backup_policy": backupPolicy, + "db_password": dbPassword, + "database_names": databaseNames, + "provision_virtual_ip": provisionVirtualIP, + "deploy_haproxy": deployHAProxy, + "failover_mode": failoverMode, + "node_type": nodeType, + "allocate_pg_hugepage": allocatePGHugePage, + "cluster_database": clusterDatabase, + "archive_wal_expire_days": archiveWALExpireDays, + "enable_peer_auth": enablePeerAuth, + "cluster_name": clusterName, + "patroni_cluster_name": patroniClusterName, + } // Appending/overwriting database actionArguments to actionArguments if err := setConfiguredActionArguments(database, actionArguments); err != nil { From 77d9f6778ffa682ff4a2948f5f9696077fc7b110 Mon Sep 17 00:00:00 2001 From: Cody Irion Date: Thu, 11 Apr 2024 20:40:44 -0400 Subject: [PATCH 09/60] Fixed unit tests and added default nodes settings --- ndb_api/db_helpers.go | 58 ++++++++++++++++++++++++++++++++++---- ndb_api/db_helpers_test.go | 9 ++++-- 2 files changed, 59 insertions(+), 8 deletions(-) diff --git a/ndb_api/db_helpers.go b/ndb_api/db_helpers.go index 9bd9f9e9..36861690 100644 --- a/ndb_api/db_helpers.go +++ b/ndb_api/db_helpers.go @@ -309,17 +309,22 @@ func setNodesParameters(req *DatabaseProvisionRequest, database DatabaseInterfac req.Nodes = []Node{} // Validate node counts - nodeCount := len(req.Nodes) + nodesRequested := database.GetInstanceNodes() + nodeCount := len(nodesRequested) + if nodeCount == 0 { + nodeCount = 5 + nodesRequested = createDefaultNodes(database) + } databaseNodeCount := 0 proxyNodeCount := 0 req.NodeCount = nodeCount - primaryNodeCount := getPrimaryNodeCount(database) + primaryNodeCount := getPrimaryNodeCount(nodesRequested) if primaryNodeCount > 1 { return fmt.Errorf("invalid nodes: HA instance can only have one primary node") } for i := 0; i < nodeCount; i++ { - currentNode := database.GetInstanceNodes()[i] + currentNode := nodesRequested[i] if currentNode.Properties.NodeType != "database" && currentNode.Properties.NodeType != "haproxy" { return fmt.Errorf("invalid node type: %s", currentNode.Properties.NodeType) @@ -350,7 +355,7 @@ func setNodesParameters(req *DatabaseProvisionRequest, database DatabaseInterfac // return nodeErrors //} - props := make([]map[string]string, 0) + props := make([]map[string]string, 4) props[0] = map[string]string{ "role": currentNode.Properties.Role, } @@ -379,9 +384,50 @@ func setNodesParameters(req *DatabaseProvisionRequest, database DatabaseInterfac return nil } -func getPrimaryNodeCount(database DatabaseInterface) int { +func createDefaultNodes(database DatabaseInterface) []*v1alpha1.Node { + nodes := make([]*v1alpha1.Node, 0) + nodes = append(nodes, &v1alpha1.Node{ + VmName: database.GetAdditionalArguments()["cluster_name"] + "_haproxy1", + Properties: v1alpha1.NodeProperties{ + NodeType: "haproxy", + }, + }) + nodes = append(nodes, &v1alpha1.Node{ + VmName: database.GetAdditionalArguments()["cluster_name"] + "_haproxy2", + Properties: v1alpha1.NodeProperties{ + NodeType: "haproxy", + }, + }) + nodes = append(nodes, &v1alpha1.Node{ + VmName: database.GetAdditionalArguments()["cluster_name"] + "-1", + Properties: v1alpha1.NodeProperties{ + NodeType: "database", + Role: "Primary", + FailoverMode: "Automatic", + }, + }) + nodes = append(nodes, &v1alpha1.Node{ + VmName: database.GetAdditionalArguments()["cluster_name"] + "-2", + Properties: v1alpha1.NodeProperties{ + NodeType: "database", + Role: "Secondary", + FailoverMode: "Automatic", + }, + }) + nodes = append(nodes, &v1alpha1.Node{ + VmName: database.GetAdditionalArguments()["cluster_name"] + "-3", + Properties: v1alpha1.NodeProperties{ + NodeType: "database", + Role: "Secondary", + FailoverMode: "Automatic", + }, + }) + return nodes +} + +func getPrimaryNodeCount(nodesRequested []*v1alpha1.Node) int { count := 0 - for _, node := range database.GetInstanceNodes() { + for _, node := range nodesRequested { if node.Properties.Role == "Primary" { count++ } diff --git a/ndb_api/db_helpers_test.go b/ndb_api/db_helpers_test.go index a36522a0..fb888bf0 100644 --- a/ndb_api/db_helpers_test.go +++ b/ndb_api/db_helpers_test.go @@ -3,6 +3,7 @@ package ndb_api import ( "context" "errors" + "github.com/nutanix-cloud-native/ndb-operator/api/v1alpha1" "reflect" "sort" "testing" @@ -333,12 +334,14 @@ func TestPostgresHAProvisionRequestAppender_withoutAdditionalArguments_positiveW common.NDB_PARAM_SSH_PUBLIC_KEY: TEST_SSHKEY, common.NDB_PARAM_PASSWORD: TEST_PASSWORD, } + emptyNodes := make([]*v1alpha1.Node, 0) // Mock required Mock Database Interface methods mockDatabase.On("GetInstanceDatabaseNames").Return(TEST_DB_NAMES) mockDatabase.On("GetName").Return("TestPostgresHADB") mockDatabase.On("GetInstanceType").Return(common.DATABASE_TYPE_POSTGRES) mockDatabase.On("GetAdditionalArguments").Return(map[string]string{}) + mockDatabase.On("GetInstanceNodes").Return(emptyNodes) mockDatabase.On("GetClusterId").Return(TEST_CLUSTER_ID) mockDatabase.On("IsClone").Return(false) expectedActionArgs := []ActionArgument{ @@ -454,11 +457,12 @@ func TestPostgresHAProvisionRequestAppender_withAdditionalArguments_positiveWork common.NDB_PARAM_SSH_PUBLIC_KEY: TEST_SSHKEY, common.NDB_PARAM_PASSWORD: TEST_PASSWORD, } - + emptyNodes := make([]*v1alpha1.Node, 0) // Mock required Mock Database Interface methods mockDatabase.On("GetInstanceDatabaseNames").Return(TEST_DB_NAMES) mockDatabase.On("GetName").Return("TestPostgresHADB") mockDatabase.On("GetInstanceType").Return(common.DATABASE_TYPE_POSTGRES) + mockDatabase.On("GetInstanceNodes").Return(emptyNodes) mockDatabase.On("GetAdditionalArguments").Return(map[string]string{ "listener_port": "0000", }) @@ -578,11 +582,12 @@ func TestPostgresHAProvisionRequestAppender_withoutAdditionalArguments_negativeW common.NDB_PARAM_SSH_PUBLIC_KEY: TEST_SSHKEY, common.NDB_PARAM_PASSWORD: TEST_PASSWORD, } - + emptyNodes := make([]*v1alpha1.Node, 0) // Mock required Mock Database Interface methods mockDatabase.On("GetInstanceDatabaseNames").Return(TEST_DB_NAMES) mockDatabase.On("GetName").Return("TestPostgresHADB") mockDatabase.On("GetInstanceType").Return(common.DATABASE_TYPE_POSTGRES) + mockDatabase.On("GetInstanceNodes").Return(emptyNodes) mockDatabase.On("GetAdditionalArguments").Return(map[string]string{ "invalid-key": "invalid-value", }) From 6cc94b7c51014d930d856d6a95c0bb62e2808ee2 Mon Sep 17 00:00:00 2001 From: Cody Irion Date: Tue, 16 Apr 2024 16:45:46 -0400 Subject: [PATCH 10/60] cleaning up --- common/util/additionalArguments.go | 1 - ndb_api/db_helpers.go | 1 + 2 files changed, 1 insertion(+), 1 deletion(-) diff --git a/common/util/additionalArguments.go b/common/util/additionalArguments.go index 6fb83f94..7d6b72d0 100644 --- a/common/util/additionalArguments.go +++ b/common/util/additionalArguments.go @@ -1,7 +1,6 @@ package util import ( - "errors" "fmt" "github.com/nutanix-cloud-native/ndb-operator/common" diff --git a/ndb_api/db_helpers.go b/ndb_api/db_helpers.go index 36861690..b2c8b574 100644 --- a/ndb_api/db_helpers.go +++ b/ndb_api/db_helpers.go @@ -20,6 +20,7 @@ import ( "context" "errors" "fmt" + "github.com/nutanix-cloud-native/ndb-operator/api/v1alpha1" "strconv" "github.com/nutanix-cloud-native/ndb-operator/common" From d2d16a0a29c4443656077b461f9da8387dc2e97a Mon Sep 17 00:00:00 2001 From: Cody Irion Date: Tue, 16 Apr 2024 16:53:00 -0400 Subject: [PATCH 11/60] cleaning up --- README.md | 10 +++++----- ndb_api/db_helpers.go | 11 +++++------ 2 files changed, 10 insertions(+), 11 deletions(-) diff --git a/README.md b/README.md index b83629ce..4cd600ae 100644 --- a/README.md +++ b/README.md @@ -20,7 +20,7 @@ The NDB operator brings automated and simplified database administration, provis 4. A clone of the source code ([this](https://github.com/nutanix-cloud-native/ndb-operator) repository). 5. Cert-manager (only when running in non OpenShift clusters). Follow the instructions [here](https://cert-manager.io/docs/installation/). -With the pre-requisites completed, the NDB Operator can be deployed in one of the following ways: +With the pre-requisites completed, the NDB Operator can be deployed in one of the following ways: ### Outside Kubernetes Runs the controller outside the Kubernetes cluster as a process, but installs the CRDs, services and RBAC entities within the Kubernetes cluster. Generally used while development (without running webhooks): @@ -28,7 +28,7 @@ Runs the controller outside the Kubernetes cluster as a process, but installs th make install run ``` -### Within Kubernetes +### Within Kubernetes Runs the controller pod, installs the CRDs, services and RBAC entities within the Kubernetes cluster. Used to run the operator from the container image defined in the Makefile. Make sure that the cert-manager is installed if not using OpenShift. ```sh @@ -262,7 +262,7 @@ kubectl apply -f ### Additional Arguments for Databases Below are the various optional addtionalArguments you can specify along with examples of their corresponding values. Arguments that have defaults will be indicated. -Provisioning Additional Arguments: +Provisioning Additional Arguments: ```yaml # PostGres additionalArguments: @@ -292,7 +292,7 @@ additionalArguments: vm_win_license_key: # NO Default. ``` -Cloning Additional Arguments: +Cloning Additional Arguments: ```yaml MSSQL: windows_domain_profile_id @@ -365,7 +365,7 @@ Run your controller locally (this will run in the foreground, so switch to a new make run ``` -**NOTES:** +**NOTES:** 1. You can also run this in one step by running: `make install run` 2. Run `make --help` for more information on all potential `make` targets diff --git a/ndb_api/db_helpers.go b/ndb_api/db_helpers.go index b2c8b574..fbe7d6ab 100644 --- a/ndb_api/db_helpers.go +++ b/ndb_api/db_helpers.go @@ -115,8 +115,8 @@ func GenerateProvisioningRequest(ctx context.Context, ndb_client *ndb_client.NDB log.Error(err, "Error while appending provisioning request") return } + requestBody, err = appender.appendProvisioningRequest(requestBody, database, reqData) - util.ToString(requestBody) if err != nil { log.Error(err, "Error while appending provisioning request") } @@ -331,7 +331,7 @@ func setNodesParameters(req *DatabaseProvisionRequest, database DatabaseInterfac return fmt.Errorf("invalid node type: %s", currentNode.Properties.NodeType) } if currentNode.Properties.NodeType == "database" { - if databaseNodeCount == 0 && primaryNodeCount == 0 { + if databaseNodeCount == 0 && primaryNodeCount == 0 && currentNode.Properties.Role == "" { currentNode.Properties.Role = "Primary" } databaseNodeCount++ @@ -339,6 +339,9 @@ func setNodesParameters(req *DatabaseProvisionRequest, database DatabaseInterfac defaultDatabaseName := database.GetAdditionalArguments()["cluster_name"] + "-" + strconv.Itoa(databaseNodeCount+1) currentNode.VmName = defaultDatabaseName } + if currentNode.Properties.Role == "" { + currentNode.Properties.Role = "Secondary" + } } if currentNode.Properties.NodeType == "haproxy" { proxyNodeCount++ @@ -352,10 +355,6 @@ func setNodesParameters(req *DatabaseProvisionRequest, database DatabaseInterfac primaryNodeCount += 1 } - //if nodeErrors != nil { - // return nodeErrors - //} - props := make([]map[string]string, 4) props[0] = map[string]string{ "role": currentNode.Properties.Role, From af1bce277696f6f4b4380bbcfe90e84d819642ba Mon Sep 17 00:00:00 2001 From: Cody Irion Date: Tue, 16 Apr 2024 18:26:27 -0400 Subject: [PATCH 12/60] Updated dynamic nodes format --- api/v1alpha1/database_types.go | 4 ++-- ndb_api/common_helpers_test.go | 2 +- ndb_api/db_helpers.go | 12 ++++++++---- 3 files changed, 11 insertions(+), 7 deletions(-) diff --git a/api/v1alpha1/database_types.go b/api/v1alpha1/database_types.go index 64287eab..92ddfce9 100644 --- a/api/v1alpha1/database_types.go +++ b/api/v1alpha1/database_types.go @@ -119,11 +119,11 @@ type Node struct { } type NodeProperties struct { - NodeType string `json:"properties"` + NodeType string `json:"node_type"` // +optional Role string `json:"role"` // +optional - FailoverMode string `json:"failoverMode"` + FailoverMode string `json:"failover_mode"` } type Clone struct { diff --git a/ndb_api/common_helpers_test.go b/ndb_api/common_helpers_test.go index bc95c376..e3cc4964 100644 --- a/ndb_api/common_helpers_test.go +++ b/ndb_api/common_helpers_test.go @@ -242,7 +242,7 @@ func TestGetRequestAppender(t *testing.T) { } for _, tc := range testCases { - result, err := GetRequestAppender(tc.databaseType) + result, err := GetRequestAppender(tc.databaseType, false) if tc.expectedResult { assert.NotNil(t, result) assert.NoError(t, err) diff --git a/ndb_api/db_helpers.go b/ndb_api/db_helpers.go index fbe7d6ab..9369a855 100644 --- a/ndb_api/db_helpers.go +++ b/ndb_api/db_helpers.go @@ -357,16 +357,20 @@ func setNodesParameters(req *DatabaseProvisionRequest, database DatabaseInterfac props := make([]map[string]string, 4) props[0] = map[string]string{ - "role": currentNode.Properties.Role, + "name": "role", + "value": currentNode.Properties.Role, } props[1] = map[string]string{ - "failover_mode": currentNode.Properties.FailoverMode, + "name": "failover_mode", + "value": currentNode.Properties.FailoverMode, } props[2] = map[string]string{ - "role": currentNode.Properties.NodeType, + "name": "node_type", + "value": currentNode.Properties.NodeType, } props[3] = map[string]string{ - "remove_archive_destination": database.GetAdditionalArguments()["remove_archive_destination"], + "name": "remove_archive_destination", + "value": database.GetAdditionalArguments()["remove_archive_destination"], } req.Nodes = append(req.Nodes, Node{ Properties: props, From c326980f1c1eeb786ed606c3a97266a8450995e7 Mon Sep 17 00:00:00 2001 From: Cody Irion Date: Wed, 17 Apr 2024 09:16:04 -0400 Subject: [PATCH 13/60] Added omitempty to nodes to fix existing unit tests --- api/v1alpha1/database_types.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/api/v1alpha1/database_types.go b/api/v1alpha1/database_types.go index 92ddfce9..d9e05b12 100644 --- a/api/v1alpha1/database_types.go +++ b/api/v1alpha1/database_types.go @@ -110,7 +110,7 @@ type Instance struct { // +optional IsHighAvailability bool `json:"isHighAvailability"` // +optional - Nodes []*Node `json:"nodes"` + Nodes []*Node `json:"nodes,omitempty"` } type Node struct { // +optional @@ -153,7 +153,7 @@ type Clone struct { // +optional IsHighAvailability bool `json:"isHighAvailability"` // +optional - Nodes []*Node `json:"nodes"` + Nodes []*Node `json:"nodes,omitempty"` } // Time Machine details From ec0daa80107c9f40845f898e1bd65b3fa48e208e Mon Sep 17 00:00:00 2001 From: Justin Orringer Date: Thu, 18 Apr 2024 00:55:23 -0400 Subject: [PATCH 14/60] Moved default logic to map and new function --- ndb_api/db_helpers.go | 125 +++++++++++++----------------------------- 1 file changed, 39 insertions(+), 86 deletions(-) diff --git a/ndb_api/db_helpers.go b/ndb_api/db_helpers.go index 9369a855..4de9a93c 100644 --- a/ndb_api/db_helpers.go +++ b/ndb_api/db_helpers.go @@ -449,94 +449,10 @@ func (a *PostgresHARequestAppender) appendProvisioningRequest(req *DatabaseProvi return nil, err } - // Set clustered to true req.Clustered = true - failoverMode := database.GetAdditionalArguments()["failover_mode"] - if failoverMode == "" { - failoverMode = "Automatic" - } - proxyReadPort := database.GetAdditionalArguments()["proxy_read_port"] - if proxyReadPort == "" { - proxyReadPort = "5001" - } - listenerPort := database.GetAdditionalArguments()["listener_port"] - if listenerPort == "" { - listenerPort = "5432" - } - proxyWritePort := database.GetAdditionalArguments()["proxy_write_port"] - if proxyWritePort == "" { - proxyWritePort = "5000" - } - enableSynchronousMode := database.GetAdditionalArguments()["enable_synchronous_mode"] - if enableSynchronousMode == "" { - enableSynchronousMode = "true" - } - autoTuneStagingDrive := database.GetAdditionalArguments()["auto_tune_staging_drive"] - if autoTuneStagingDrive == "" { - autoTuneStagingDrive = "true" - } - backupPolicy := database.GetAdditionalArguments()["backup_policy"] - if backupPolicy == "" { - backupPolicy = "primary_only" - } - provisionVirtualIP := database.GetAdditionalArguments()["provision_virtual_ip"] - if provisionVirtualIP == "" { - provisionVirtualIP = "true" - } - deployHAProxy := database.GetAdditionalArguments()["deploy_haproxy"] - if deployHAProxy == "" { - deployHAProxy = "true" - } - nodeType := database.GetAdditionalArguments()["node_type"] - if nodeType == "" { - nodeType = "database" - } - allocatePGHugePage := database.GetAdditionalArguments()["allocate_pg_hugepage"] - if allocatePGHugePage == "" { - allocatePGHugePage = "false" - } - clusterDatabase := database.GetAdditionalArguments()["cluster_database"] - if clusterDatabase == "" { - clusterDatabase = "false" - } - archiveWALExpireDays := database.GetAdditionalArguments()["archive_wal_expire_days"] - if archiveWALExpireDays == "" { - archiveWALExpireDays = "-1" - } - enablePeerAuth := database.GetAdditionalArguments()["enable_peer_auth"] - if enablePeerAuth == "" { - enablePeerAuth = "false" - } - clusterName := database.GetAdditionalArguments()["cluster_name"] - if clusterName == "" { - clusterName = "psqlcluster" - } - patroniClusterName := database.GetAdditionalArguments()["patroni_cluster_name"] - if patroniClusterName == "" { - patroniClusterName = "patroni" - } - actionArguments := map[string]string{ - /* Non-Configurable from additionalArguments*/ - "proxy_read_port": proxyReadPort, - "listener_port": listenerPort, - "proxy_write_port": proxyWritePort, - "enable_synchronous_mode": enableSynchronousMode, - "auto_tune_staging_drive": autoTuneStagingDrive, - "backup_policy": backupPolicy, - "db_password": dbPassword, - "database_names": databaseNames, - "provision_virtual_ip": provisionVirtualIP, - "deploy_haproxy": deployHAProxy, - "failover_mode": failoverMode, - "node_type": nodeType, - "allocate_pg_hugepage": allocatePGHugePage, - "cluster_database": clusterDatabase, - "archive_wal_expire_days": archiveWALExpireDays, - "enable_peer_auth": enablePeerAuth, - "cluster_name": clusterName, - "patroni_cluster_name": patroniClusterName, - } + // Default action arguments + actionArguments := defaultActionArgumentsforHAProvisioning(database, dbPassword, databaseNames) // Appending/overwriting database actionArguments to actionArguments if err := setConfiguredActionArguments(database, actionArguments); err != nil { @@ -548,6 +464,43 @@ func (a *PostgresHARequestAppender) appendProvisioningRequest(req *DatabaseProvi return req, nil } +func defaultActionArgumentsforHAProvisioning(database DatabaseInterface, dbPassword string, databaseNames string) map[string]string { + defaults := map[string]string{ + "failover_mode": "Automatic", + "proxy_read_port": "5001", + "listener_port": "5432", + "proxy_write_port": "5000", + "enable_synchronous_mode": "true", + "auto_tune_staging_drive": "true", + "backup_policy": "primary_only", + "provision_virtual_ip": "true", + "deploy_haproxy": "true", + "node_type": "database", + "allocate_pg_hugepage": "false", + "cluster_database": "false", + "archive_wal_expire_days": "-1", + "enable_peer_auth": "false", + "cluster_name": "psqlcluster", + "patroni_cluster_name": "patroni", + } + + additionalArguments := map[string]string{ + "db_password": dbPassword, + "database_names": databaseNames, + } + originalAdditionalArguments := database.GetAdditionalArguments() + for key, defaultValue := range defaults { + value := originalAdditionalArguments[key] + if value == "" { + additionalArguments[key] = defaultValue + } else { + additionalArguments[key] = value + } + } + + return additionalArguments +} + func (a *MySqlRequestAppender) appendProvisioningRequest(req *DatabaseProvisionRequest, database DatabaseInterface, reqData map[string]interface{}) (*DatabaseProvisionRequest, error) { dbPassword := reqData[common.NDB_PARAM_PASSWORD].(string) databaseNames := database.GetInstanceDatabaseNames() From 83a0bce739ff6bfbe574b97a201bd7b1044bf2f1 Mon Sep 17 00:00:00 2001 From: Justin Orringer Date: Thu, 18 Apr 2024 01:36:46 -0400 Subject: [PATCH 15/60] Changed clone to dynamically build Nodes --- ndb_api/clone_helpers.go | 100 ++++++++++++++++++++------------------- 1 file changed, 51 insertions(+), 49 deletions(-) diff --git a/ndb_api/clone_helpers.go b/ndb_api/clone_helpers.go index cba2901d..876c25f6 100644 --- a/ndb_api/clone_helpers.go +++ b/ndb_api/clone_helpers.go @@ -17,8 +17,8 @@ import ( "context" "errors" "fmt" - "strconv" + "github.com/nutanix-cloud-native/ndb-operator/api/v1alpha1" "github.com/nutanix-cloud-native/ndb-operator/common" "github.com/nutanix-cloud-native/ndb-operator/ndb_client" ctrllog "sigs.k8s.io/controller-runtime/pkg/log" @@ -220,56 +220,58 @@ func setCloneNodesParameters(req *DatabaseCloneRequest, database DatabaseInterfa networkProfileId := req.Nodes[0].NetworkProfileId serverTimeZone := req.Nodes[0].NewDbServerTimeZone - // Clear the original req.Nodes array + // Convert database.Instance.Nodes to the common type Nodes req.Nodes = []Node{} - - // Create node object for HA Proxy - for i := 0; i < 2; i++ { - // Hard coding the HA Proxy properties - props := make([]map[string]string, 1) - props[0] = map[string]string{ - "name": "node_type", - "value": "haproxy", - } - req.Nodes = append(req.Nodes, Node{ - Properties: props, - VmName: database.GetName() + "_haproxy" + strconv.Itoa(i), - NxClusterId: database.GetClusterId(), - }) - } - - // Create node object for Database Instances - for i := 0; i < 3; i++ { - // Hard coding the DB properties - props := make([]map[string]string, 4) - props[0] = map[string]string{ - "name": "role", - "value": "Secondary", - } - // 1st node will be the primary node - if i == 0 { - props[0]["value"] = "Primary" - } - props[1] = map[string]string{ - "name": "failover_mode", - "value": "Automatic", - } - props[2] = map[string]string{ - "name": "node_type", - "value": "database", + for _, node := range database.GetInstanceNodes() { + built := Node{} + if node.Properties.NodeType == "haproxy" { + built = buildHAProxyNode(req, node, database.GetClusterId()) + } else { + built = buildDatabaseNode(req, node, computeProfileId, networkProfileId, serverTimeZone, database.GetClusterId()) } - props[3] = map[string]string{ - "name": "remote_archive_destination", - "value": "", - } - req.Nodes = append(req.Nodes, Node{ - ComputeProfileId: computeProfileId, - NetworkProfileId: networkProfileId, - NewDbServerTimeZone: serverTimeZone, - Properties: props, - VmName: database.GetName() + "-" + strconv.Itoa(i), - NxClusterId: database.GetClusterId(), - }) + + req.Nodes = append(req.Nodes, built) + } +} + +func buildHAProxyNode(req *DatabaseCloneRequest, node *v1alpha1.Node, clusterId string) Node { + props := make([]map[string]string, 1) + props[0] = map[string]string{ + "name": "node_type", + "value": node.Properties.NodeType, + } + return Node{ + Properties: props, + VmName: node.VmName, + NxClusterId: clusterId, + } +} + +func buildDatabaseNode(req *DatabaseCloneRequest, node *v1alpha1.Node, computeProfileId, networkProfileId, serverTimeZone, clusterId string) Node { + props := make([]map[string]string, 4) + props[0] = map[string]string{ + "name": "role", + "value": node.Properties.Role, + } + props[1] = map[string]string{ + "name": "failover_mode", + "value": node.Properties.FailoverMode, + } + props[2] = map[string]string{ + "name": "node_type", + "value": node.Properties.NodeType, + } + props[3] = map[string]string{ + "name": "remote_archive_destination", + "value": "", + } + return Node{ + ComputeProfileId: computeProfileId, + NetworkProfileId: networkProfileId, + NewDbServerTimeZone: serverTimeZone, + Properties: props, + VmName: node.VmName, + NxClusterId: clusterId, } } From f3d3d62e60521c5098d045183ddebd08625593c8 Mon Sep 17 00:00:00 2001 From: Justin Orringer Date: Thu, 18 Apr 2024 01:40:54 -0400 Subject: [PATCH 16/60] Set NodeCount based on actual nodes --- ndb_api/clone_helpers.go | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/ndb_api/clone_helpers.go b/ndb_api/clone_helpers.go index 876c25f6..37239e99 100644 --- a/ndb_api/clone_helpers.go +++ b/ndb_api/clone_helpers.go @@ -279,8 +279,7 @@ func (a *PostgresHARequestAppender) appendCloningRequest(req *DatabaseCloneReque req.SSHPublicKey = reqData[common.NDB_PARAM_SSH_PUBLIC_KEY].(string) dbPassword := reqData[common.NDB_PARAM_PASSWORD].(string) - // Set the number of nodes to 5, 3 Postgres nodes + 2 HA Proxy nodes - req.NodeCount = 5 + req.NodeCount = len(database.GetInstanceNodes()) setCloneNodesParameters(req, database) // Default action arguments From 16caf1a278dff14c6e2eb9166a105e9f7f19f1f8 Mon Sep 17 00:00:00 2001 From: Zhang Zhi Date: Thu, 18 Apr 2024 18:40:46 -0400 Subject: [PATCH 17/60] Add changes for end-to-end high availability tests --- api/v1alpha1/zz_generated.deepcopy.go | 53 +++++ .../pg-ha_test/config/database.yaml | 49 ++++ .../pg-ha_test/config/db-secret.yaml | 8 + .../pg-ha_test/config/ndb-secret.yaml | 10 + .../provisioning/pg-ha_test/config/ndb.yaml | 8 + .../provisioning/pg-ha_test/config/pod.yaml | 30 +++ .../provisioning/pg-ha_test/pg-ha_test.go | 212 ++++++++++++++++++ automation/util/setup.go | 3 +- .../crd/bases/ndb.nutanix.com_databases.yaml | 52 ++--- 9 files changed, 395 insertions(+), 30 deletions(-) create mode 100644 automation/tests/provisioning/pg-ha_test/config/database.yaml create mode 100644 automation/tests/provisioning/pg-ha_test/config/db-secret.yaml create mode 100644 automation/tests/provisioning/pg-ha_test/config/ndb-secret.yaml create mode 100644 automation/tests/provisioning/pg-ha_test/config/ndb.yaml create mode 100644 automation/tests/provisioning/pg-ha_test/config/pod.yaml create mode 100644 automation/tests/provisioning/pg-ha_test/pg-ha_test.go diff --git a/api/v1alpha1/zz_generated.deepcopy.go b/api/v1alpha1/zz_generated.deepcopy.go index 2a061e22..ff63d5f3 100644 --- a/api/v1alpha1/zz_generated.deepcopy.go +++ b/api/v1alpha1/zz_generated.deepcopy.go @@ -39,6 +39,17 @@ func (in *Clone) DeepCopyInto(out *Clone) { (*out)[key] = val } } + if in.Nodes != nil { + in, out := &in.Nodes, &out.Nodes + *out = make([]*Node, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(Node) + **out = **in + } + } + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Clone. @@ -190,6 +201,17 @@ func (in *Instance) DeepCopyInto(out *Instance) { (*out)[key] = val } } + if in.Nodes != nil { + in, out := &in.Nodes, &out.Nodes + *out = make([]*Node, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(Node) + **out = **in + } + } + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Instance. @@ -314,6 +336,37 @@ func (in *NDBServerStatus) DeepCopy() *NDBServerStatus { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Node) DeepCopyInto(out *Node) { + *out = *in + out.Properties = in.Properties +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Node. +func (in *Node) DeepCopy() *Node { + if in == nil { + return nil + } + out := new(Node) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeProperties) DeepCopyInto(out *NodeProperties) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeProperties. +func (in *NodeProperties) DeepCopy() *NodeProperties { + if in == nil { + return nil + } + out := new(NodeProperties) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Profile) DeepCopyInto(out *Profile) { *out = *in diff --git a/automation/tests/provisioning/pg-ha_test/config/database.yaml b/automation/tests/provisioning/pg-ha_test/config/database.yaml new file mode 100644 index 00000000..594823f1 --- /dev/null +++ b/automation/tests/provisioning/pg-ha_test/config/database.yaml @@ -0,0 +1,49 @@ +apiVersion: ndb.nutanix.com/v1alpha1 +kind: Database +metadata: + name: db-pg-ha +spec: + ndbRef: ndb-pg + databaseInstance: + Name: db-pg-ha + databaseNames: + - database_one + - database_two + - database_three + clusterId: + credentialSecret: db-secret-pg-ha + size: 10 + timezone: "UTC" + type: postgres + isHighAvailability: true + profiles: {} + timeMachine: + name: db-pg-ha_TM + description: "TM provisioned by operator" + sla : "DEFAULT_OOB_GOLD_SLA" + dailySnapshotTime: "12:34:56" + snapshotsPerDay: 4 + logCatchUpFrequency: 90 + weeklySnapshotDay: "WEDNESDAY" + monthlySnapshotDay: 24 + quarterlySnapshotMonth: "Jan" + additionalArguments: # Optional block, can specify additional arguments that are unique to database engines. + listener_port: "5432" + failover_mode: "Automatic" + deploy_haproxy: "false" + nodes: + - vmName: "test1" + properties: + node_type: database + role: Primary + failover_mode: Automatic + - vmName: "test2" + properties: + node_type: database + role: Secondary + failover_mode: Automatic + - vmName: "test3" + properties: + node_type: database + role: Secondary + failover_mode: Automatic \ No newline at end of file diff --git a/automation/tests/provisioning/pg-ha_test/config/db-secret.yaml b/automation/tests/provisioning/pg-ha_test/config/db-secret.yaml new file mode 100644 index 00000000..84c54aa7 --- /dev/null +++ b/automation/tests/provisioning/pg-ha_test/config/db-secret.yaml @@ -0,0 +1,8 @@ +apiVersion: v1 +kind: Secret +metadata: + name: db-secret-pg-ha +type: Opaque +stringData: + password: + ssh_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCwyAhpllp2WwrUB1aO/0/DN5nIWNXJWQ3ybhuEG4U+kHl8xFFKnPOTDQtTK8UwByoSf6wqIfTr10ESAoHySOpxHk2gyVHVmUmRZ1WFiNR5tW3Q4qbq1qKpIVy1jH9ZRoTJwzg0J33W9W8SZzhM8Nj0nwuDqp6FS8ui7q9H3tgM+9bYYxETTg52NEw7jTVQx6KaZgG+p/8armoYPKh9DGhBYGY3oCmGiOYlm/phSlj3R63qghZIsBXKxeJDEs4cLolQ+9QYoRqqusdEGVCp7Ba/GtUPdBPYdTy+xuXGiALEpsCrqyUstxypHZVJEQfmqS8uy9UB8KFg2YepwhPgX1oN noname diff --git a/automation/tests/provisioning/pg-ha_test/config/ndb-secret.yaml b/automation/tests/provisioning/pg-ha_test/config/ndb-secret.yaml new file mode 100644 index 00000000..f3ac03b0 --- /dev/null +++ b/automation/tests/provisioning/pg-ha_test/config/ndb-secret.yaml @@ -0,0 +1,10 @@ +apiVersion: v1 +kind: Secret +metadata: + name: ndb-secret-pg-ha +type: Opaque +stringData: +# username and password for the test database + username: user1 + password: user1/pwd + ca_certificate: "" diff --git a/automation/tests/provisioning/pg-ha_test/config/ndb.yaml b/automation/tests/provisioning/pg-ha_test/config/ndb.yaml new file mode 100644 index 00000000..067609a3 --- /dev/null +++ b/automation/tests/provisioning/pg-ha_test/config/ndb.yaml @@ -0,0 +1,8 @@ +apiVersion: ndb.nutanix.com/v1alpha1 +kind: NDBServer +metadata: + name: ndb-pg +spec: + credentialSecret: ndb-secret-pg-ha + server: https://ndbis33dbma11-era.nutanixtestdrive.com:8443/era/v0.9 + skipCertificateVerification: true diff --git a/automation/tests/provisioning/pg-ha_test/config/pod.yaml b/automation/tests/provisioning/pg-ha_test/config/pod.yaml new file mode 100644 index 00000000..5d966bb0 --- /dev/null +++ b/automation/tests/provisioning/pg-ha_test/config/pod.yaml @@ -0,0 +1,30 @@ +apiVersion: v1 +kind: Pod +metadata: + name: app-pg-si + labels: + app: app-pg-si +spec: + containers: + - name: best-app + image: manavrajvanshinx/best-app:latest + resources: + limits: + memory: 512Mi + cpu: "1" + env: + - name: DBHOST + value: db-pg-si-svc + - name: DBPORT + value: '80' + - name: PASSWORD + valueFrom: + secretKeyRef: + name: db-secret-pg-si + key: password + ports: + - containerPort: 3000 + initContainers: + - name: init-db + image: busybox:1.28 + command: ['sh', '-c', "until nslookup db-pg-si-svc.$(cat /var/run/secrets/kubernetes.io/serviceaccount/namespace).svc.cluster.local; do echo waiting for database service; sleep 2; done"] diff --git a/automation/tests/provisioning/pg-ha_test/pg-ha_test.go b/automation/tests/provisioning/pg-ha_test/pg-ha_test.go new file mode 100644 index 00000000..aab80a84 --- /dev/null +++ b/automation/tests/provisioning/pg-ha_test/pg-ha_test.go @@ -0,0 +1,212 @@ +package postgres_provisoning_ha + +// Basic imports +import ( + "context" + "fmt" + "testing" + "time" + + "github.com/nutanix-cloud-native/ndb-operator/automation" + clientsetv1alpha1 "github.com/nutanix-cloud-native/ndb-operator/automation/clientset/v1alpha1" + util "github.com/nutanix-cloud-native/ndb-operator/automation/util" + "github.com/nutanix-cloud-native/ndb-operator/common" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/suite" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" +) + +// A test suite is a collection of related test cases that are grouped together for testing a specific package or functionality. +// The testify package builds on top of Go's built-in testing package and enhances it with additional features like assertions and test suite management. +// PostgresProvisioningHightAvailabilityInstanceTestSuite is a test suite struct that embeds testify's suite.Suite +type PostgresProvisioningHighAvailabilityTestSuite struct { + suite.Suite + ctx context.Context + setupTypes *util.SetupTypes + v1alpha1ClientSet *clientsetv1alpha1.V1alpha1Client + clientset *kubernetes.Clientset + tsm util.TestSuiteManager +} + +// SetupSuite is called once before running the tests in the suite +func (suite *PostgresProvisioningHighAvailabilityTestSuite) SetupSuite() { + var err error + var config *rest.Config + var ctx context.Context + var v1alpha1ClientSet *clientsetv1alpha1.V1alpha1Client + var clientset *kubernetes.Clientset + var tsm util.TestSuiteManager + + // Setup logger and context + logger, err := util.SetupLogger(fmt.Sprintf("%s/pg-provisioning-ha_test.log", automation.PROVISIONING_LOG_PATH), "pg-provisioning-ha: ") + if err != nil { + suite.T().FailNow() + } + ctx = util.SetupContext(context.Background(), logger) + + logger.Println("SetupSuite() starting...") + errBaseMsg := "Error: SetupSuite() ended" + + // Setup env + if err = util.CheckRequiredEnv(ctx); err != nil { + logger.Printf("%s! %s\n", errBaseMsg, err) + suite.T().FailNow() + } + + // Setup kubeconfig + config, err = util.SetupKubeconfig(ctx) + if err != nil { + logger.Printf("%s! %s\n", errBaseMsg, err) + suite.T().FailNow() + } + + // Setup scheme and clientsets + if v1alpha1ClientSet, clientset, err = util.SetupSchemeAndClientSet(ctx, config); err != nil { + logger.Printf("%s! %s\n", errBaseMsg, err) + suite.T().FailNow() + } + + // Setup yaml types + setupTypes, err := util.SetupTypeTemplates(ctx) + if err != nil { + logger.Printf("%s! %s\n", errBaseMsg, err) + suite.T().FailNow() + } + + // Get test suite manager + tsm = util.GetTestSuiteManager(ctx, *setupTypes) + + // Provision database and wait for database and pod to be ready + if err := tsm.Setup(ctx, setupTypes, clientset, v1alpha1ClientSet, suite.T()); err != nil { + logger.Printf("%s! %s\n", errBaseMsg, err) + suite.T().FailNow() + } + + // Set variables for the entire suite + suite.ctx = ctx + suite.setupTypes = setupTypes + suite.v1alpha1ClientSet = v1alpha1ClientSet + suite.clientset = clientset + suite.tsm = tsm + + logger.Println("SetupSuite() ended!") +} + +// TearDownSuite is called once after running the tests in the suite +func (suite *PostgresProvisioningHighAvailabilityTestSuite) TearDownSuite() { + var err error + + logger := util.GetLogger(suite.ctx) + logger.Println("TearDownSuite() starting...") + errBaseMsg := "Error: SetupSuite() ended" + + // Setup yaml types + setupTypes, err := util.SetupTypeTemplates(suite.ctx) + if err != nil { + logger.Printf("%s! %s\n", errBaseMsg, err) + suite.T().FailNow() + } + + // Delete resources and de-provision database + if err = suite.tsm.TearDown(suite.ctx, setupTypes, suite.clientset, suite.v1alpha1ClientSet, suite.T()); err != nil { + logger.Printf("%s! %s\n", errBaseMsg, err) + suite.T().FailNow() + } + + logger.Println("HA TearDownSuite() completed!") +} + +// This will run right before the test starts and receives the suite and test names as input +func (suite *PostgresProvisioningHighAvailabilityTestSuite) BeforeTest(suiteName, testName string) { + util.GetLogger(suite.ctx).Printf("******************** RUNNING HA TEST %s %s ********************\n", suiteName, testName) +} + +// This will run after test finishes and receives the suite and test names as input +func (suite *PostgresProvisioningHighAvailabilityTestSuite) AfterTest(suiteName, testName string) { + util.GetLogger(suite.ctx).Printf("******************** END HA TEST %s %s ********************\n", suiteName, testName) +} + +// Tests if provisioning is succesful by checking if database status is 'READY' +func (suite *PostgresProvisioningHighAvailabilityTestSuite) TestProvisioningSuccess() { + logger := util.GetLogger(suite.ctx) + + databaseResponse, err := suite.tsm.GetDatabaseOrCloneResponse(suite.ctx, suite.setupTypes, suite.clientset, suite.v1alpha1ClientSet) + if err != nil { + logger.Printf("Error: TestProvisioningSuccess() failed! %v", err) + } else { + logger.Println("Database response retrieved.") + } + + assert := assert.New(suite.T()) + assert.Equal(common.DATABASE_CR_STATUS_READY, databaseResponse.Status, "The database status should be ready.") +} + +// Tests if app is able to connect to database via GET request +func (suite *PostgresProvisioningHighAvailabilityTestSuite) TestAppConnectivity() { + logger := util.GetLogger(suite.ctx) + + resp, err := suite.tsm.GetAppResponse(suite.ctx, suite.setupTypes, suite.clientset, automation.POSTGRES_SI_PROVISONING_LOCAL_PORT) + if err != nil { + logger.Printf("Error: TestAppConnectivity failed! %v", err) + } else { + logger.Println("App response retrieved.") + } + + assert := assert.New(suite.T()) + assert.Equal(200, resp.StatusCode, "The response status should be 200.") +} + +// Tests if creation of time machine is succesful +func (suite *PostgresProvisioningHighAvailabilityTestSuite) TestTimeMachineSuccess() { + logger := util.GetLogger(suite.ctx) + assert := assert.New(suite.T()) + + if suite.setupTypes.Database.Spec.Instance.TMInfo.SLAName == "" || suite.setupTypes.Database.Spec.Instance.TMInfo.SLAName == "NONE" { + logger.Println("No time machine specified, test automatically passing.") + return + } + + tm, err := suite.tsm.GetTimemachineResponseByDatabaseId(suite.ctx, suite.setupTypes, suite.clientset, suite.v1alpha1ClientSet) + if err != nil { + logger.Printf("Error: TestTimeMachineSuccess() failed! %v", err) + assert.FailNow("Error: TestTimeMachineSuccess() failed! %v", err) + } else { + logger.Println("Timemachine response retrieved.") + } + + err = util.CheckTmInfo(suite.ctx, suite.setupTypes.Database, &tm) + if err != nil { + logger.Printf("Error: TestTimeMachineSuccess() failed! %v", err) + assert.FailNow("Error: TestTimeMachineSuccess() failed! %v", err) + } else { + logger.Println("CheckTmInfo succesful") + } + + assert.Equal(common.DATABASE_CR_STATUS_READY, tm.Status, "The tm status should be ready.") +} + +// In order for 'go test' to run this suite, we need to create +// a normal test function and pass our suite to suite.Run +func TestPostgresProvisioningHighAvailabilityTestSuite(t *testing.T) { + suite.Run(t, new(PostgresProvisioningHighAvailabilityTestSuite)) +} + +// BeforeTestLogTime will run right before the test starts and logs the start time of the test. +func (suite *PostgresProvisioningHighAvailabilityTestSuite) BeforeTestLogTime(suiteName, testName string) { + logger := util.GetLogger(suite.ctx) + startTime := time.Now() + // Store the start time in the context for use in AfterTestLogTime + ctx := context.WithValue(suite.ctx, "startTime", startTime) + suite.ctx = ctx + logger.Printf("******************** STARTING HA TEST %s %s at %v ********************\n", suiteName, testName, startTime) +} + +// AfterTestLogTime will run after the test finishes and calculates the duration of the test. +func (suite *PostgresProvisioningHighAvailabilityTestSuite) AfterTestLogTime(suiteName, testName string) { + logger := util.GetLogger(suite.ctx) + startTime := suite.ctx.Value("startTime").(time.Time) + endTime := time.Now() + duration := endTime.Sub(startTime) + logger.Printf("******************** ENDING HA TEST %s %s at %v (Duration: %v) ********************\n", suiteName, testName, endTime, duration) +} diff --git a/automation/util/setup.go b/automation/util/setup.go index 102cf670..428b6835 100644 --- a/automation/util/setup.go +++ b/automation/util/setup.go @@ -47,13 +47,12 @@ func SetupLogger(path string, rootName string) (*log.Logger, error) { if err := os.MkdirAll(dir, 0777); err != nil { return nil, err } - + // Creates the file file, err := os.OpenFile(path, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0666) if err != nil { return nil, err } - // Links the logger to the file and returns the logger return log.New(file, rootName, log.Ldate|log.Ltime|log.Lshortfile), nil diff --git a/config/crd/bases/ndb.nutanix.com_databases.yaml b/config/crd/bases/ndb.nutanix.com_databases.yaml index e38496ab..12c424ac 100644 --- a/config/crd/bases/ndb.nutanix.com_databases.yaml +++ b/config/crd/bases/ndb.nutanix.com_databases.yaml @@ -77,23 +77,21 @@ spec: nodes: items: properties: - failoverMode: - type: string - nodeType: - type: string - nxClusterId: - type: string - nxClusterName: - type: string - remoteArchiveDestination: - type: string - role: - type: string + properties: + properties: + failover_mode: + type: string + node_type: + type: string + role: + type: string + required: + - node_type + type: object vmName: type: string required: - - nodeType - - vmName + - properties type: object type: array profiles: @@ -187,23 +185,21 @@ spec: nodes: items: properties: - failoverMode: - type: string - nodeType: - type: string - nxClusterId: - type: string - nxClusterName: - type: string - remoteArchiveDestination: - type: string - role: - type: string + properties: + properties: + failover_mode: + type: string + node_type: + type: string + role: + type: string + required: + - node_type + type: object vmName: type: string required: - - nodeType - - vmName + - properties type: object type: array profiles: From ddd0d6e1f49ff6bb018380429bbf54dce3f48569 Mon Sep 17 00:00:00 2001 From: Zhang Zhi Date: Thu, 18 Apr 2024 18:54:01 -0400 Subject: [PATCH 18/60] edit folder name --- .../{pg-ha_test => pg-ha_test_2}/config/database.yaml | 0 .../{pg-ha_test => pg-ha_test_2}/config/db-secret.yaml | 0 .../{pg-ha_test => pg-ha_test_2}/config/ndb-secret.yaml | 0 .../provisioning/{pg-ha_test => pg-ha_test_2}/config/ndb.yaml | 0 .../provisioning/{pg-ha_test => pg-ha_test_2}/config/pod.yaml | 0 .../tests/provisioning/{pg-ha_test => pg-ha_test_2}/pg-ha_test.go | 0 6 files changed, 0 insertions(+), 0 deletions(-) rename automation/tests/provisioning/{pg-ha_test => pg-ha_test_2}/config/database.yaml (100%) rename automation/tests/provisioning/{pg-ha_test => pg-ha_test_2}/config/db-secret.yaml (100%) rename automation/tests/provisioning/{pg-ha_test => pg-ha_test_2}/config/ndb-secret.yaml (100%) rename automation/tests/provisioning/{pg-ha_test => pg-ha_test_2}/config/ndb.yaml (100%) rename automation/tests/provisioning/{pg-ha_test => pg-ha_test_2}/config/pod.yaml (100%) rename automation/tests/provisioning/{pg-ha_test => pg-ha_test_2}/pg-ha_test.go (100%) diff --git a/automation/tests/provisioning/pg-ha_test/config/database.yaml b/automation/tests/provisioning/pg-ha_test_2/config/database.yaml similarity index 100% rename from automation/tests/provisioning/pg-ha_test/config/database.yaml rename to automation/tests/provisioning/pg-ha_test_2/config/database.yaml diff --git a/automation/tests/provisioning/pg-ha_test/config/db-secret.yaml b/automation/tests/provisioning/pg-ha_test_2/config/db-secret.yaml similarity index 100% rename from automation/tests/provisioning/pg-ha_test/config/db-secret.yaml rename to automation/tests/provisioning/pg-ha_test_2/config/db-secret.yaml diff --git a/automation/tests/provisioning/pg-ha_test/config/ndb-secret.yaml b/automation/tests/provisioning/pg-ha_test_2/config/ndb-secret.yaml similarity index 100% rename from automation/tests/provisioning/pg-ha_test/config/ndb-secret.yaml rename to automation/tests/provisioning/pg-ha_test_2/config/ndb-secret.yaml diff --git a/automation/tests/provisioning/pg-ha_test/config/ndb.yaml b/automation/tests/provisioning/pg-ha_test_2/config/ndb.yaml similarity index 100% rename from automation/tests/provisioning/pg-ha_test/config/ndb.yaml rename to automation/tests/provisioning/pg-ha_test_2/config/ndb.yaml diff --git a/automation/tests/provisioning/pg-ha_test/config/pod.yaml b/automation/tests/provisioning/pg-ha_test_2/config/pod.yaml similarity index 100% rename from automation/tests/provisioning/pg-ha_test/config/pod.yaml rename to automation/tests/provisioning/pg-ha_test_2/config/pod.yaml diff --git a/automation/tests/provisioning/pg-ha_test/pg-ha_test.go b/automation/tests/provisioning/pg-ha_test_2/pg-ha_test.go similarity index 100% rename from automation/tests/provisioning/pg-ha_test/pg-ha_test.go rename to automation/tests/provisioning/pg-ha_test_2/pg-ha_test.go From ff1f6d9b80508530ea06144bf931ea0e2e750272 Mon Sep 17 00:00:00 2001 From: Justin Orringer Date: Thu, 18 Apr 2024 19:19:55 -0400 Subject: [PATCH 19/60] Update automation/tests/provisioning/pg-ha_test_2/pg-ha_test.go --- automation/tests/provisioning/pg-ha_test_2/pg-ha_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/automation/tests/provisioning/pg-ha_test_2/pg-ha_test.go b/automation/tests/provisioning/pg-ha_test_2/pg-ha_test.go index aab80a84..3c8c75e4 100644 --- a/automation/tests/provisioning/pg-ha_test_2/pg-ha_test.go +++ b/automation/tests/provisioning/pg-ha_test_2/pg-ha_test.go @@ -175,7 +175,7 @@ func (suite *PostgresProvisioningHighAvailabilityTestSuite) TestTimeMachineSucce logger.Println("Timemachine response retrieved.") } - err = util.CheckTmInfo(suite.ctx, suite.setupTypes.Database, &tm) + err = util.CheckTmInfo(suite.ctx, suite.setupTypes.Database, tm) if err != nil { logger.Printf("Error: TestTimeMachineSuccess() failed! %v", err) assert.FailNow("Error: TestTimeMachineSuccess() failed! %v", err) From 383ad7beae82cd1f7ed4b9898de9d392fb94279a Mon Sep 17 00:00:00 2001 From: Cody Irion Date: Thu, 18 Apr 2024 19:07:05 -0400 Subject: [PATCH 20/60] Added default cluster_name when none given --- ndb_api/db_helpers.go | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/ndb_api/db_helpers.go b/ndb_api/db_helpers.go index 4de9a93c..aaf1bf7d 100644 --- a/ndb_api/db_helpers.go +++ b/ndb_api/db_helpers.go @@ -308,7 +308,9 @@ func (a *PostgresRequestAppender) appendProvisioningRequest(req *DatabaseProvisi func setNodesParameters(req *DatabaseProvisionRequest, database DatabaseInterface) (nodeErrors error) { // Clear the original req.Nodes array req.Nodes = []Node{} - + if database.GetAdditionalArguments()["cluster_name"] == "" { + database.GetAdditionalArguments()["cluster_name"] = "postgresHaCluster" + } // Validate node counts nodesRequested := database.GetInstanceNodes() nodeCount := len(nodesRequested) From d4057bfb3fb2b07e8cf617703e6890f9cb661560 Mon Sep 17 00:00:00 2001 From: Cody Irion Date: Thu, 18 Apr 2024 19:20:25 -0400 Subject: [PATCH 21/60] fixed unit test --- ndb_api/db_helpers_test.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/ndb_api/db_helpers_test.go b/ndb_api/db_helpers_test.go index fb888bf0..38087fc7 100644 --- a/ndb_api/db_helpers_test.go +++ b/ndb_api/db_helpers_test.go @@ -349,6 +349,10 @@ func TestPostgresHAProvisionRequestAppender_withoutAdditionalArguments_positiveW Name: "proxy_read_port", Value: "5001", }, + { + Name: "cluster_name", + Value: "postgresHaCluster", + }, { Name: "listener_port", Value: "5432", From 108572dd4706f66350edff87877d249605bad61b Mon Sep 17 00:00:00 2001 From: Cody Irion Date: Thu, 18 Apr 2024 19:28:49 -0400 Subject: [PATCH 22/60] fixed unit test --- ndb_api/db_helpers_test.go | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/ndb_api/db_helpers_test.go b/ndb_api/db_helpers_test.go index 38087fc7..706fe875 100644 --- a/ndb_api/db_helpers_test.go +++ b/ndb_api/db_helpers_test.go @@ -349,10 +349,6 @@ func TestPostgresHAProvisionRequestAppender_withoutAdditionalArguments_positiveW Name: "proxy_read_port", Value: "5001", }, - { - Name: "cluster_name", - Value: "postgresHaCluster", - }, { Name: "listener_port", Value: "5432", @@ -415,7 +411,7 @@ func TestPostgresHAProvisionRequestAppender_withoutAdditionalArguments_positiveW }, { Name: "cluster_name", - Value: "psqlcluster", + Value: "postgresHaCluster", }, { Name: "patroni_cluster_name", From 933a4058e395cf279c8a7fd8a5442c2de7579313 Mon Sep 17 00:00:00 2001 From: Cody Irion Date: Sun, 21 Apr 2024 15:13:41 -0400 Subject: [PATCH 23/60] Separated postgres HA additional arguments and fixed isHighAvailability and getInstanceNodes getter methods --- api/v1alpha1/database_webhook.go | 4 +-- api/v1alpha1/webhook_helpers.go | 4 +-- common/util/additionalArguments.go | 56 +++++++++++++++++------------- controller_adapters/database.go | 6 ++++ ndb_api/db_helpers.go | 2 +- ndb_api/db_helpers_test.go | 17 ++++++++- 6 files changed, 59 insertions(+), 30 deletions(-) diff --git a/api/v1alpha1/database_webhook.go b/api/v1alpha1/database_webhook.go index 6553d4f1..44208df3 100644 --- a/api/v1alpha1/database_webhook.go +++ b/api/v1alpha1/database_webhook.go @@ -97,13 +97,13 @@ func (r *Database) ValidateDelete() (admission.Warnings, error) { } /* Checks if configured additional arguments are valid or not and returns the corresponding additional arguments. If error is nil valid, else invalid */ -func additionalArgumentsValidationCheck(isClone bool, dbType string, specifiedAdditionalArguments map[string]string) error { +func additionalArgumentsValidationCheck(isClone bool, dbType string, isHA bool, specifiedAdditionalArguments map[string]string) error { // Empty additionalArguments is always valid if specifiedAdditionalArguments == nil { return nil } - allowedAdditionalArguments, err := util.GetAllowedAdditionalArguments(isClone, dbType) + allowedAdditionalArguments, err := util.GetAllowedAdditionalArguments(isClone, dbType, isHA) // Invalid type returns error if err != nil { diff --git a/api/v1alpha1/webhook_helpers.go b/api/v1alpha1/webhook_helpers.go index 45559d8f..b9b3e746 100644 --- a/api/v1alpha1/webhook_helpers.go +++ b/api/v1alpha1/webhook_helpers.go @@ -91,7 +91,7 @@ func (v *CloningWebhookHandler) validateCreate(spec *DatabaseSpec, errors *field } } - if err := additionalArgumentsValidationCheck(spec.IsClone, clone.Type, clone.AdditionalArguments); err != nil { + if err := additionalArgumentsValidationCheck(spec.IsClone, clone.Type, clone.IsHighAvailability, clone.AdditionalArguments); err != nil { *errors = append(*errors, field.Invalid(clonePath.Child("additionalArguments"), clone.AdditionalArguments, err.Error())) } databaselog.Info("Exiting validateCreate for clone") @@ -230,7 +230,7 @@ func (v *ProvisioningWebhookHandler) validateCreate(spec *DatabaseSpec, errors * )) } - if err := additionalArgumentsValidationCheck(spec.IsClone, instance.Type, instance.AdditionalArguments); err != nil { + if err := additionalArgumentsValidationCheck(spec.IsClone, instance.Type, instance.IsHighAvailability, instance.AdditionalArguments); err != nil { *errors = append(*errors, field.Invalid(instancePath.Child("additionalArguments"), instance.AdditionalArguments, err.Error())) } diff --git a/common/util/additionalArguments.go b/common/util/additionalArguments.go index 7d6b72d0..6be911ac 100644 --- a/common/util/additionalArguments.go +++ b/common/util/additionalArguments.go @@ -10,11 +10,12 @@ import ( // 1. A map where the keys are the allowed additional arguments for the database type, and the corresponding values indicates whether the key is an action argument (where true=yes and false=no). // Currently, all additional arguments are action arguments but this might not always be the case, thus this distinction is made so actual action arguments are appended to the appropriate provisioning body property. // 2. An error if there is no allowed additional arguments for the corresponding type, in other words, if the dbType is not MSSQL, MongoDB, PostGres, or MYSQL. Else nil. -func GetAllowedAdditionalArguments(isClone bool, dbType string) (map[string]bool, error) { + +func GetAllowedAdditionalArguments(isClone bool, dbType string, isHa bool) (map[string]bool, error) { if isClone { return GetAllowedAdditionalArgumentsForClone(dbType) } else { - return GetAllowedAdditionalArgumentsForDatabase(dbType) + return GetAllowedAdditionalArgumentsForDatabase(dbType, isHa) } } @@ -79,7 +80,7 @@ func GetAllowedAdditionalArgumentsForClone(dbType string) (map[string]bool, erro } } -func GetAllowedAdditionalArgumentsForDatabase(dbType string) (map[string]bool, error) { +func GetAllowedAdditionalArgumentsForDatabase(dbType string, isHA bool) (map[string]bool, error) { switch dbType { case common.DATABASE_TYPE_MSSQL: return map[string]bool{ @@ -104,27 +105,34 @@ func GetAllowedAdditionalArgumentsForDatabase(dbType string) (map[string]bool, e "journal_size": true, }, nil case common.DATABASE_TYPE_POSTGRES: - return map[string]bool{ - /* Has a default */ - "listener_port": true, - "proxy_read_port": true, - "proxy_write_port": true, - "enable_synchronous_mode": true, - "auto_tune_staging_drive": true, - "backup_policy": true, - "db_password": true, - "database_names": true, - "provision_virtual_ip": true, - "deploy_haproxy": true, - "failover_mode": true, - "node_type": true, - "allocate_pg_hugepage": true, - "cluster_database": true, - "archive_wal_expire_days": true, - "enable_peer_auth": true, - "cluster_name": true, - "patroni_cluster_name": true, - }, nil + if isHA { + return map[string]bool{ + /* Has a default */ + "listener_port": true, + "proxy_read_port": true, + "proxy_write_port": true, + "enable_synchronous_mode": true, + "auto_tune_staging_drive": true, + "backup_policy": true, + "db_password": true, + "database_names": true, + "provision_virtual_ip": true, + "deploy_haproxy": true, + "failover_mode": true, + "node_type": true, + "allocate_pg_hugepage": true, + "cluster_database": true, + "archive_wal_expire_days": true, + "enable_peer_auth": true, + "cluster_name": true, + "patroni_cluster_name": true, + }, nil + } else { + return map[string]bool{ + /* Has a default */ + "listener_port": true, + }, nil + } case common.DATABASE_TYPE_MYSQL: return map[string]bool{ "listener_port": true, diff --git a/controller_adapters/database.go b/controller_adapters/database.go index b2dea24a..e6f052db 100644 --- a/controller_adapters/database.go +++ b/controller_adapters/database.go @@ -146,10 +146,16 @@ func (d *Database) GetInstanceSize() int { } func (d *Database) GetInstanceIsHighAvailability() bool { + if d.IsClone() { + return d.Spec.Clone.IsHighAvailability + } return d.Spec.Instance.IsHighAvailability } func (d *Database) GetInstanceNodes() []*v1alpha1.Node { + if d.IsClone() { + return d.Spec.Instance.Nodes + } return d.Spec.Instance.Nodes } diff --git a/ndb_api/db_helpers.go b/ndb_api/db_helpers.go index aaf1bf7d..5d61b046 100644 --- a/ndb_api/db_helpers.go +++ b/ndb_api/db_helpers.go @@ -190,7 +190,7 @@ func setConfiguredActionArguments(database DatabaseInterface, actionArguments ma return fmt.Errorf("%s! Action arguments cannot be nil", errMsgRoot) } - allowedAdditionalArguments, err := util.GetAllowedAdditionalArguments(database.IsClone(), database.GetInstanceType()) + allowedAdditionalArguments, err := util.GetAllowedAdditionalArguments(database.IsClone(), database.GetInstanceType(), database.GetInstanceIsHighAvailability()) if err != nil { return fmt.Errorf("%s! %s", errMsgRoot, err.Error()) } diff --git a/ndb_api/db_helpers_test.go b/ndb_api/db_helpers_test.go index 706fe875..c13fdbd5 100644 --- a/ndb_api/db_helpers_test.go +++ b/ndb_api/db_helpers_test.go @@ -141,6 +141,7 @@ func TestPostgresProvisionRequestAppender_withoutAdditionalArguments_positiveWor mockDatabase.On("GetInstanceType").Return(common.DATABASE_TYPE_POSTGRES) mockDatabase.On("GetAdditionalArguments").Return(map[string]string{}) mockDatabase.On("IsClone").Return(false) + mockDatabase.On("GetInstanceIsHighAvailability").Return(false) expectedActionArgs := []ActionArgument{ { Name: "proxy_read_port", @@ -222,6 +223,7 @@ func TestPostgresProvisionRequestAppender_withAdditionalArguments_positiveWorkfl "listener_port": "0000", }) mockDatabase.On("IsClone").Return(false) + mockDatabase.On("GetInstanceIsHighAvailability").Return(false) expectedActionArgs := []ActionArgument{ { @@ -304,6 +306,7 @@ func TestPostgresProvisionRequestAppender_withAdditionalArguments_negativeWorkfl "invalid-key": "invalid-value", }) mockDatabase.On("IsClone").Return(false) + mockDatabase.On("GetInstanceIsHighAvailability").Return(false) // Get specific implementation of RequestAppender requestAppender, _ := GetRequestAppender(common.DATABASE_TYPE_POSTGRES, false) @@ -344,6 +347,7 @@ func TestPostgresHAProvisionRequestAppender_withoutAdditionalArguments_positiveW mockDatabase.On("GetInstanceNodes").Return(emptyNodes) mockDatabase.On("GetClusterId").Return(TEST_CLUSTER_ID) mockDatabase.On("IsClone").Return(false) + mockDatabase.On("GetInstanceIsHighAvailability").Return(true) expectedActionArgs := []ActionArgument{ { Name: "proxy_read_port", @@ -468,6 +472,7 @@ func TestPostgresHAProvisionRequestAppender_withAdditionalArguments_positiveWork }) mockDatabase.On("GetClusterId").Return(TEST_CLUSTER_ID) mockDatabase.On("IsClone").Return(false) + mockDatabase.On("GetInstanceIsHighAvailability").Return(true) expectedActionArgs := []ActionArgument{ { @@ -536,7 +541,7 @@ func TestPostgresHAProvisionRequestAppender_withAdditionalArguments_positiveWork }, { Name: "cluster_name", - Value: "psqlcluster", + Value: "postgresHaCluster", }, { Name: "patroni_cluster_name", @@ -593,6 +598,7 @@ func TestPostgresHAProvisionRequestAppender_withoutAdditionalArguments_negativeW }) mockDatabase.On("GetClusterId").Return(TEST_CLUSTER_ID) mockDatabase.On("IsClone").Return(false) + mockDatabase.On("GetInstanceIsHighAvailability").Return(true) // Get specific implementation of RequestAppender requestAppender, _ := GetRequestAppender(common.DATABASE_TYPE_POSTGRES, true) @@ -644,6 +650,7 @@ func TestMSSQLProvisionRequestAppender_withoutAdditionalArguments_positiveWorklo mockDatabase.On("GetInstanceType").Return(common.DATABASE_TYPE_MSSQL) mockDatabase.On("GetAdditionalArguments").Return(map[string]string{}) mockDatabase.On("IsClone").Return(false) + mockDatabase.On("GetInstanceIsHighAvailability").Return(false) expectedActionArgs := []ActionArgument{ { Name: "working_dir", @@ -763,6 +770,7 @@ func TestMSSQLProvisionRequestAppender_withAdditionalArguments_positiveWorkflow( "vm_db_server_user": "", }) mockDatabase.On("IsClone").Return(false) + mockDatabase.On("GetInstanceIsHighAvailability").Return(false) expectedActionArgs := []ActionArgument{ { Name: "sql_user_name", @@ -892,6 +900,7 @@ func TestMSSQLProvisionRequestAppender_withAdditionalArguments_negativeWorkflow( "invalid-key2": "invalid-value", }) mockDatabase.On("IsClone").Return(false) + mockDatabase.On("GetInstanceIsHighAvailability").Return(false) // Get specific implementation of RequestAppender requestAppender, _ := GetRequestAppender(common.DATABASE_TYPE_MSSQL, false) @@ -929,6 +938,7 @@ func TestMongoDbProvisionRequestAppender_withoutAdditionalArguments_positiveWork mockDatabase.On("GetInstanceType").Return(common.DATABASE_TYPE_MONGODB) mockDatabase.On("GetAdditionalArguments").Return(map[string]string{}) mockDatabase.On("IsClone").Return(false) + mockDatabase.On("GetInstanceIsHighAvailability").Return(false) expectedActionArgs := []ActionArgument{ { Name: "listener_port", @@ -1016,6 +1026,7 @@ func TestMongoDbProvisionRequestAppender_withAdditionalArguments_positiveWorkflo "journal_size": "1", }) mockDatabase.On("IsClone").Return(false) + mockDatabase.On("GetInstanceIsHighAvailability").Return(false) expectedActionArgs := []ActionArgument{ { Name: "listener_port", @@ -1101,6 +1112,7 @@ func TestMongoDbProvisionRequestAppender_withAdditionalArguments_negativeWorkflo "invalid-key": "invalid-value", }) mockDatabase.On("IsClone").Return(false) + mockDatabase.On("GetInstanceIsHighAvailability").Return(false) // Get specific implementation of RequestAppender requestAppender, _ := GetRequestAppender(common.DATABASE_TYPE_MONGODB, false) @@ -1137,6 +1149,7 @@ func TestMySqlProvisionRequestAppender_withoutAdditionalArguments_positiveWorkfl mockDatabase.On("GetInstanceType").Return(common.DATABASE_TYPE_MYSQL) mockDatabase.On("GetAdditionalArguments").Return(map[string]string{}) mockDatabase.On("IsClone").Return(false) + mockDatabase.On("GetInstanceIsHighAvailability").Return(false) expectedActionArgs := []ActionArgument{ { Name: "listener_port", @@ -1202,6 +1215,7 @@ func TestMySqlProvisionRequestAppender_withAdditionalArguments_positiveWorkflow( "listener_port": "1111", }) mockDatabase.On("IsClone").Return(false) + mockDatabase.On("GetInstanceIsHighAvailability").Return(false) expectedActionArgs := []ActionArgument{ { Name: "listener_port", @@ -1267,6 +1281,7 @@ func TestMySqlProvisionRequestAppender_withAdditionalArguments_negativeWorkflow( "invalid-key": "invalid-value", }) mockDatabase.On("IsClone").Return(false) + mockDatabase.On("GetInstanceIsHighAvailability").Return(false) // Get specific implementation of RequestAppender requestAppender, _ := GetRequestAppender(common.DATABASE_TYPE_MYSQL, false) From ce9e102c68d8980f44a98324d82071cc4d4ab77d Mon Sep 17 00:00:00 2001 From: Cody Irion Date: Sun, 21 Apr 2024 17:04:45 -0400 Subject: [PATCH 24/60] moved the appendProvisioningRequest for PostgresHA to be near others --- ndb_api/db_helpers.go | 50 +++++++++++++++++++++---------------------- 1 file changed, 25 insertions(+), 25 deletions(-) diff --git a/ndb_api/db_helpers.go b/ndb_api/db_helpers.go index 5d61b046..e28bc19a 100644 --- a/ndb_api/db_helpers.go +++ b/ndb_api/db_helpers.go @@ -305,6 +305,31 @@ func (a *PostgresRequestAppender) appendProvisioningRequest(req *DatabaseProvisi return req, nil } +func (a *PostgresHARequestAppender) appendProvisioningRequest(req *DatabaseProvisionRequest, database DatabaseInterface, reqData map[string]interface{}) (*DatabaseProvisionRequest, error) { + dbPassword := reqData[common.NDB_PARAM_PASSWORD].(string) + databaseNames := database.GetInstanceDatabaseNames() + req.SSHPublicKey = reqData[common.NDB_PARAM_SSH_PUBLIC_KEY].(string) + // Set the number of nodes to 5, 3 Postgres nodes + 2 HA Proxy nodes + err := setNodesParameters(req, database) + if err != nil { + return nil, err + } + + req.Clustered = true + + // Default action arguments + actionArguments := defaultActionArgumentsforHAProvisioning(database, dbPassword, databaseNames) + + // Appending/overwriting database actionArguments to actionArguments + if err := setConfiguredActionArguments(database, actionArguments); err != nil { + return nil, err + } + // Converting action arguments map to list and appending to req.ActionArguments + req.ActionArguments = append(req.ActionArguments, convertMapToActionArguments(actionArguments)...) + + return req, nil +} + func setNodesParameters(req *DatabaseProvisionRequest, database DatabaseInterface) (nodeErrors error) { // Clear the original req.Nodes array req.Nodes = []Node{} @@ -441,31 +466,6 @@ func getPrimaryNodeCount(nodesRequested []*v1alpha1.Node) int { return count } -func (a *PostgresHARequestAppender) appendProvisioningRequest(req *DatabaseProvisionRequest, database DatabaseInterface, reqData map[string]interface{}) (*DatabaseProvisionRequest, error) { - dbPassword := reqData[common.NDB_PARAM_PASSWORD].(string) - databaseNames := database.GetInstanceDatabaseNames() - req.SSHPublicKey = reqData[common.NDB_PARAM_SSH_PUBLIC_KEY].(string) - // Set the number of nodes to 5, 3 Postgres nodes + 2 HA Proxy nodes - err := setNodesParameters(req, database) - if err != nil { - return nil, err - } - - req.Clustered = true - - // Default action arguments - actionArguments := defaultActionArgumentsforHAProvisioning(database, dbPassword, databaseNames) - - // Appending/overwriting database actionArguments to actionArguments - if err := setConfiguredActionArguments(database, actionArguments); err != nil { - return nil, err - } - // Converting action arguments map to list and appending to req.ActionArguments - req.ActionArguments = append(req.ActionArguments, convertMapToActionArguments(actionArguments)...) - - return req, nil -} - func defaultActionArgumentsforHAProvisioning(database DatabaseInterface, dbPassword string, databaseNames string) map[string]string { defaults := map[string]string{ "failover_mode": "Automatic", From 731a1deed7a650f5af249a4ab89de581bb5d7886 Mon Sep 17 00:00:00 2001 From: Justin Orringer Date: Sun, 21 Apr 2024 18:22:42 -0400 Subject: [PATCH 25/60] Moving folder, cleanup --- .../{pg-ha_test_2 => pg-ha_test}/config/database.yaml | 2 +- .../{pg-ha_test_2 => pg-ha_test}/config/db-secret.yaml | 0 .../config/ndb-secret.yaml | 0 .../{pg-ha_test_2 => pg-ha_test}/config/ndb.yaml | 2 +- .../{pg-ha_test_2 => pg-ha_test}/config/pod.yaml | 10 +++++----- .../{pg-ha_test_2 => pg-ha_test}/pg-ha_test.go | 0 6 files changed, 7 insertions(+), 7 deletions(-) rename automation/tests/provisioning/{pg-ha_test_2 => pg-ha_test}/config/database.yaml (97%) rename automation/tests/provisioning/{pg-ha_test_2 => pg-ha_test}/config/db-secret.yaml (100%) rename automation/tests/provisioning/{pg-ha_test_2 => pg-ha_test}/config/ndb-secret.yaml (100%) rename automation/tests/provisioning/{pg-ha_test_2 => pg-ha_test}/config/ndb.yaml (68%) rename automation/tests/provisioning/{pg-ha_test_2 => pg-ha_test}/config/pod.yaml (78%) rename automation/tests/provisioning/{pg-ha_test_2 => pg-ha_test}/pg-ha_test.go (100%) diff --git a/automation/tests/provisioning/pg-ha_test_2/config/database.yaml b/automation/tests/provisioning/pg-ha_test/config/database.yaml similarity index 97% rename from automation/tests/provisioning/pg-ha_test_2/config/database.yaml rename to automation/tests/provisioning/pg-ha_test/config/database.yaml index 594823f1..675e1e96 100644 --- a/automation/tests/provisioning/pg-ha_test_2/config/database.yaml +++ b/automation/tests/provisioning/pg-ha_test/config/database.yaml @@ -46,4 +46,4 @@ spec: properties: node_type: database role: Secondary - failover_mode: Automatic \ No newline at end of file + failover_mode: Automatic diff --git a/automation/tests/provisioning/pg-ha_test_2/config/db-secret.yaml b/automation/tests/provisioning/pg-ha_test/config/db-secret.yaml similarity index 100% rename from automation/tests/provisioning/pg-ha_test_2/config/db-secret.yaml rename to automation/tests/provisioning/pg-ha_test/config/db-secret.yaml diff --git a/automation/tests/provisioning/pg-ha_test_2/config/ndb-secret.yaml b/automation/tests/provisioning/pg-ha_test/config/ndb-secret.yaml similarity index 100% rename from automation/tests/provisioning/pg-ha_test_2/config/ndb-secret.yaml rename to automation/tests/provisioning/pg-ha_test/config/ndb-secret.yaml diff --git a/automation/tests/provisioning/pg-ha_test_2/config/ndb.yaml b/automation/tests/provisioning/pg-ha_test/config/ndb.yaml similarity index 68% rename from automation/tests/provisioning/pg-ha_test_2/config/ndb.yaml rename to automation/tests/provisioning/pg-ha_test/config/ndb.yaml index 067609a3..c0857802 100644 --- a/automation/tests/provisioning/pg-ha_test_2/config/ndb.yaml +++ b/automation/tests/provisioning/pg-ha_test/config/ndb.yaml @@ -4,5 +4,5 @@ metadata: name: ndb-pg spec: credentialSecret: ndb-secret-pg-ha - server: https://ndbis33dbma11-era.nutanixtestdrive.com:8443/era/v0.9 + server: :8443/era/v0.9> skipCertificateVerification: true diff --git a/automation/tests/provisioning/pg-ha_test_2/config/pod.yaml b/automation/tests/provisioning/pg-ha_test/config/pod.yaml similarity index 78% rename from automation/tests/provisioning/pg-ha_test_2/config/pod.yaml rename to automation/tests/provisioning/pg-ha_test/config/pod.yaml index 5d966bb0..24056a89 100644 --- a/automation/tests/provisioning/pg-ha_test_2/config/pod.yaml +++ b/automation/tests/provisioning/pg-ha_test/config/pod.yaml @@ -1,9 +1,9 @@ apiVersion: v1 kind: Pod metadata: - name: app-pg-si + name: app-pg-ha labels: - app: app-pg-si + app: app-pg-ha spec: containers: - name: best-app @@ -14,17 +14,17 @@ spec: cpu: "1" env: - name: DBHOST - value: db-pg-si-svc + value: db-pg-ha-svc - name: DBPORT value: '80' - name: PASSWORD valueFrom: secretKeyRef: - name: db-secret-pg-si + name: db-secret-pg-ha key: password ports: - containerPort: 3000 initContainers: - name: init-db image: busybox:1.28 - command: ['sh', '-c', "until nslookup db-pg-si-svc.$(cat /var/run/secrets/kubernetes.io/serviceaccount/namespace).svc.cluster.local; do echo waiting for database service; sleep 2; done"] + command: ['sh', '-c', "until nslookup db-pg-ha-svc.$(cat /var/run/secrets/kubernetes.io/serviceaccount/namespace).svc.cluster.local; do echo waiting for database service; sleep 2; done"] diff --git a/automation/tests/provisioning/pg-ha_test_2/pg-ha_test.go b/automation/tests/provisioning/pg-ha_test/pg-ha_test.go similarity index 100% rename from automation/tests/provisioning/pg-ha_test_2/pg-ha_test.go rename to automation/tests/provisioning/pg-ha_test/pg-ha_test.go From ee127e50fc8fb7964271e7e0e68a229ba47a2403 Mon Sep 17 00:00:00 2001 From: Justin Orringer Date: Sun, 21 Apr 2024 18:26:04 -0400 Subject: [PATCH 26/60] Uncommit spacing change --- automation/util/setup.go | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/automation/util/setup.go b/automation/util/setup.go index 428b6835..2410ae3e 100644 --- a/automation/util/setup.go +++ b/automation/util/setup.go @@ -47,12 +47,13 @@ func SetupLogger(path string, rootName string) (*log.Logger, error) { if err := os.MkdirAll(dir, 0777); err != nil { return nil, err } - + // Creates the file file, err := os.OpenFile(path, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0666) if err != nil { return nil, err } + // Links the logger to the file and returns the logger return log.New(file, rootName, log.Ldate|log.Ltime|log.Lshortfile), nil @@ -286,4 +287,4 @@ func waitAndRetryOperation(ctx context.Context, interval time.Duration, retries // Operation failed after all retries, return the last error received return err -} +} \ No newline at end of file From 707cb29d6cacaf9d16f61cd1ce8caeb16c528219 Mon Sep 17 00:00:00 2001 From: Justin Orringer Date: Sun, 21 Apr 2024 18:26:51 -0400 Subject: [PATCH 27/60] Newline --- automation/util/setup.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/automation/util/setup.go b/automation/util/setup.go index 2410ae3e..102cf670 100644 --- a/automation/util/setup.go +++ b/automation/util/setup.go @@ -287,4 +287,4 @@ func waitAndRetryOperation(ctx context.Context, interval time.Duration, retries // Operation failed after all retries, return the last error received return err -} \ No newline at end of file +} From 835a1acef0f51465f9b14a34bbaaaffc11e41f5c Mon Sep 17 00:00:00 2001 From: bobdoe945 Date: Sun, 21 Apr 2024 20:25:00 -0400 Subject: [PATCH 28/60] webhook test updates --- api/v1alpha1/webhook_suite_test.go | 121 +++++++++++++++++++++++++++++ 1 file changed, 121 insertions(+) diff --git a/api/v1alpha1/webhook_suite_test.go b/api/v1alpha1/webhook_suite_test.go index 89219420..f3b5b1ed 100644 --- a/api/v1alpha1/webhook_suite_test.go +++ b/api/v1alpha1/webhook_suite_test.go @@ -596,6 +596,119 @@ var _ = Describe("Webhook Tests", func() { Expect(errMsg).To(ContainSubstring(fmt.Sprintf("additional arguments validation for type: %s failed!", common.DATABASE_TYPE_MSSQL))) }) }) + + FWhen("Postgres specified with IsHighAvailability", func() { + It("Should have zero nodes and IsHighAvailability set to true", func() { + clone := createDefaultClone("clone19") + clone.Spec.Clone.IsHighAvailability = true + clone.Spec.Clone.Nodes = nil + + err := k8sClient.Create(context.Background(), clone) + Expect(err).To(HaveOccurred()) + + errMsg := err.(*errors.StatusError).ErrStatus.Message + Expect(errMsg).To(ContainSubstring("invalid Node: nil")) + }) + + It("Should have 5 nodes and IsHighAvailability set to true", func() { + clone := createDefaultClone("clone19") + primaryProp := createDefaultNodeProperties("database", "primary") + secondaryProp := createDefaultNodeProperties("database", "secondary") + proxyProp := createDefaultNodeProperties("haproxy", "secondary") + clone.Spec.Clone.IsHighAvailability = true + clone.Spec.Clone.Nodes = []Nodes{ + Node { + VMName: "VM1", + Properties: primaryProp + }, + Node { + VMName: "VM2", + Properties: secondaryProp + }, + Node { + VMName: "VM3", + Properties: secondaryProp + }, + Node { + VMName: "VM4", + Properties: proxyProp + }, + Node { + VMName: "VM5", + Properties: proxyProp + } + } + + err := k8sClient.Create(context.Background(), clone) + Expect(err).To(HaveOccurred()) + }) + + // It("Should have 5 nodes and IsHighAvailability set to true", func() { + // clone := createDefaultClone("clone19") + // clone.Spec.Clone.IsHighAvailability = true + // clone.Spec.Clone.Nodes = []Nodes{} + + // err := k8sClient.Create(context.Background(), clone) + // Expect(err).To(HaveOccurred()) + + // totalNodes := len(clone.Spec.Clone.Nodes) + // Expect(totalNodes).To(Equal(5)) + + // haproxyNodes := 0 + // databaseNodes := 0 + + // for _, node := range nodes { + // if node.Type == "haproxy" { + // haproxyNodes++ + // } else if node.Type == "database" { + // databaseNodes++ + // } + // } + + // Expect(haproxyNodes).To(Equal(2)) + // Expect(databaseNodes).To(Equal(3)) + // }) + + // It("Should have 2 haproxy node types and IsHighAvailability set to true", func() { + // clone := createDefaultClone("clone20") + // clone.Spec.Clone.IsHighAvailability = true + + // err := k8sClient.Create(context.Background(), clone) + // Expect(err).To(HaveOccurred()) + + // nodes, err := getNodesFromCluster() + // Expect(err).ToNot(HaveOccurred()) + + // haproxyNodes := 0 + // for _, node := range nodes { + // if node.Type == "haproxy" { + // haproxyNodes++ + // } + // } + + // Expect(haproxyNodes).To(Equal(2)) + // }) + + // It("Should have 3 database node types and IsHighAvailability set to true", func() { + // clone := createDefaultClone("clone21") + // clone.Spec.Clone.IsHighAvailability = true + + // err := k8sClient.Create(context.Background(), clone) + // Expect(err).To(HaveOccurred())z + + // nodes, err := getNodesFromCluster() + // Expect(err).ToNot(HaveOccurred()) + + // databaseNodes := 0 + // for _, node := range nodes { + // if node.Type == "database" { + // databaseNodes++ + // } + // } + + // Expect(databaseNodes).To(Equal(3)) + // }) + }) }) }) @@ -646,3 +759,11 @@ func createDefaultClone(metadataName string) *Database { }, } } + +func createDefaultNodeProperties(type, role string) *NodeProperties { + return &NodeProperties { + NodeType: type, + Role: role, + FailoverMode: "Automatic" + } +} From 233de04a0f08e4c50f84bdd279eae97663f393a6 Mon Sep 17 00:00:00 2001 From: bobdoe945 Date: Sun, 21 Apr 2024 20:30:13 -0400 Subject: [PATCH 29/60] syntax errors --- api/v1alpha1/webhook_suite_test.go | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/api/v1alpha1/webhook_suite_test.go b/api/v1alpha1/webhook_suite_test.go index f3b5b1ed..8a62f388 100644 --- a/api/v1alpha1/webhook_suite_test.go +++ b/api/v1alpha1/webhook_suite_test.go @@ -619,23 +619,23 @@ var _ = Describe("Webhook Tests", func() { clone.Spec.Clone.Nodes = []Nodes{ Node { VMName: "VM1", - Properties: primaryProp + Properties: primaryProp, }, Node { VMName: "VM2", - Properties: secondaryProp + Properties: secondaryProp, }, Node { VMName: "VM3", - Properties: secondaryProp + Properties: secondaryProp, }, Node { VMName: "VM4", - Properties: proxyProp + Properties: proxyProp, }, Node { VMName: "VM5", - Properties: proxyProp + Properties: proxyProp, } } @@ -760,10 +760,10 @@ func createDefaultClone(metadataName string) *Database { } } -func createDefaultNodeProperties(type, role string) *NodeProperties { +func createDefaultNodeProperties(type string, role string) *NodeProperties { return &NodeProperties { NodeType: type, Role: role, - FailoverMode: "Automatic" + FailoverMode: "Automatic", } } From 667a9f2d3ffc78669073eb893a75f43addffa067 Mon Sep 17 00:00:00 2001 From: bobdoe945 Date: Sun, 21 Apr 2024 20:34:01 -0400 Subject: [PATCH 30/60] more syntax errors --- api/v1alpha1/webhook_suite_test.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/api/v1alpha1/webhook_suite_test.go b/api/v1alpha1/webhook_suite_test.go index 8a62f388..00896f07 100644 --- a/api/v1alpha1/webhook_suite_test.go +++ b/api/v1alpha1/webhook_suite_test.go @@ -636,7 +636,7 @@ var _ = Describe("Webhook Tests", func() { Node { VMName: "VM5", Properties: proxyProp, - } + }, } err := k8sClient.Create(context.Background(), clone) @@ -760,9 +760,9 @@ func createDefaultClone(metadataName string) *Database { } } -func createDefaultNodeProperties(type string, role string) *NodeProperties { +func createDefaultNodeProperties(node_type, role string) *NodeProperties { return &NodeProperties { - NodeType: type, + NodeType: node_type, Role: role, FailoverMode: "Automatic", } From ee2a1e14e38a4cb773bedbb2ad5f25a1b9cef441 Mon Sep 17 00:00:00 2001 From: bobdoe945 Date: Sun, 21 Apr 2024 20:36:30 -0400 Subject: [PATCH 31/60] Naming issue --- api/v1alpha1/webhook_suite_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/api/v1alpha1/webhook_suite_test.go b/api/v1alpha1/webhook_suite_test.go index 00896f07..82a7c8a9 100644 --- a/api/v1alpha1/webhook_suite_test.go +++ b/api/v1alpha1/webhook_suite_test.go @@ -616,7 +616,7 @@ var _ = Describe("Webhook Tests", func() { secondaryProp := createDefaultNodeProperties("database", "secondary") proxyProp := createDefaultNodeProperties("haproxy", "secondary") clone.Spec.Clone.IsHighAvailability = true - clone.Spec.Clone.Nodes = []Nodes{ + clone.Spec.Clone.Nodes = []Node{ Node { VMName: "VM1", Properties: primaryProp, From 708b4cf594d84527a868adff0ea93faf348389ec Mon Sep 17 00:00:00 2001 From: bobdoe945 Date: Sun, 21 Apr 2024 20:39:07 -0400 Subject: [PATCH 32/60] VmName --- api/v1alpha1/webhook_suite_test.go | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/api/v1alpha1/webhook_suite_test.go b/api/v1alpha1/webhook_suite_test.go index 82a7c8a9..89428e8f 100644 --- a/api/v1alpha1/webhook_suite_test.go +++ b/api/v1alpha1/webhook_suite_test.go @@ -618,23 +618,23 @@ var _ = Describe("Webhook Tests", func() { clone.Spec.Clone.IsHighAvailability = true clone.Spec.Clone.Nodes = []Node{ Node { - VMName: "VM1", + VmName: "VM1", Properties: primaryProp, }, Node { - VMName: "VM2", + VmName: "VM2", Properties: secondaryProp, }, Node { - VMName: "VM3", + VmName: "VM3", Properties: secondaryProp, }, Node { - VMName: "VM4", + VmName: "VM4", Properties: proxyProp, }, Node { - VMName: "VM5", + VmName: "VM5", Properties: proxyProp, }, } From 58862937825cf119c13e206ad06eb6a578970114 Mon Sep 17 00:00:00 2001 From: bobdoe945 Date: Sun, 21 Apr 2024 20:55:50 -0400 Subject: [PATCH 33/60] properties name --- api/v1alpha1/webhook_suite_test.go | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/api/v1alpha1/webhook_suite_test.go b/api/v1alpha1/webhook_suite_test.go index 89428e8f..d2205639 100644 --- a/api/v1alpha1/webhook_suite_test.go +++ b/api/v1alpha1/webhook_suite_test.go @@ -619,23 +619,23 @@ var _ = Describe("Webhook Tests", func() { clone.Spec.Clone.Nodes = []Node{ Node { VmName: "VM1", - Properties: primaryProp, + Properties: &primaryProp, }, Node { VmName: "VM2", - Properties: secondaryProp, + Properties: &secondaryProp, }, Node { VmName: "VM3", - Properties: secondaryProp, + Properties: &secondaryProp, }, Node { VmName: "VM4", - Properties: proxyProp, + Properties: &proxyProp, }, Node { VmName: "VM5", - Properties: proxyProp, + Properties: &proxyProp, }, } From c81b5aa659eaf72c9184ed56e9f88ec09e7ce099 Mon Sep 17 00:00:00 2001 From: bobdoe945 Date: Sun, 21 Apr 2024 21:37:57 -0400 Subject: [PATCH 34/60] properties --- api/v1alpha1/webhook_suite_test.go | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/api/v1alpha1/webhook_suite_test.go b/api/v1alpha1/webhook_suite_test.go index d2205639..7d2d770d 100644 --- a/api/v1alpha1/webhook_suite_test.go +++ b/api/v1alpha1/webhook_suite_test.go @@ -619,23 +619,23 @@ var _ = Describe("Webhook Tests", func() { clone.Spec.Clone.Nodes = []Node{ Node { VmName: "VM1", - Properties: &primaryProp, + Properties: *primaryProp, }, Node { VmName: "VM2", - Properties: &secondaryProp, + Properties: *secondaryProp, }, Node { VmName: "VM3", - Properties: &secondaryProp, + Properties: *secondaryProp, }, Node { VmName: "VM4", - Properties: &proxyProp, + Properties: *proxyProp, }, Node { VmName: "VM5", - Properties: &proxyProp, + Properties: *proxyProp, }, } From 95857e4a1a812af1618803f0fc25b4cc62533c25 Mon Sep 17 00:00:00 2001 From: bobdoe945 Date: Sun, 21 Apr 2024 21:42:34 -0400 Subject: [PATCH 35/60] Node change --- api/v1alpha1/webhook_suite_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/api/v1alpha1/webhook_suite_test.go b/api/v1alpha1/webhook_suite_test.go index 7d2d770d..13754005 100644 --- a/api/v1alpha1/webhook_suite_test.go +++ b/api/v1alpha1/webhook_suite_test.go @@ -616,7 +616,7 @@ var _ = Describe("Webhook Tests", func() { secondaryProp := createDefaultNodeProperties("database", "secondary") proxyProp := createDefaultNodeProperties("haproxy", "secondary") clone.Spec.Clone.IsHighAvailability = true - clone.Spec.Clone.Nodes = []Node{ + clone.Spec.Clone.Nodes = []&Node{ Node { VmName: "VM1", Properties: *primaryProp, From 9f24ced1f4b65ebdfab2ebbb4c61ce3db694614e Mon Sep 17 00:00:00 2001 From: bobdoe945 Date: Mon, 22 Apr 2024 09:45:38 -0400 Subject: [PATCH 36/60] testing a new change for creating nodes --- api/v1alpha1/webhook_suite_test.go | 30 +++++++++++++++--------------- 1 file changed, 15 insertions(+), 15 deletions(-) diff --git a/api/v1alpha1/webhook_suite_test.go b/api/v1alpha1/webhook_suite_test.go index 13754005..cfe7ef7d 100644 --- a/api/v1alpha1/webhook_suite_test.go +++ b/api/v1alpha1/webhook_suite_test.go @@ -602,43 +602,43 @@ var _ = Describe("Webhook Tests", func() { clone := createDefaultClone("clone19") clone.Spec.Clone.IsHighAvailability = true clone.Spec.Clone.Nodes = nil - + err := k8sClient.Create(context.Background(), clone) Expect(err).To(HaveOccurred()) - + errMsg := err.(*errors.StatusError).ErrStatus.Message Expect(errMsg).To(ContainSubstring("invalid Node: nil")) }) - + It("Should have 5 nodes and IsHighAvailability set to true", func() { clone := createDefaultClone("clone19") primaryProp := createDefaultNodeProperties("database", "primary") secondaryProp := createDefaultNodeProperties("database", "secondary") proxyProp := createDefaultNodeProperties("haproxy", "secondary") clone.Spec.Clone.IsHighAvailability = true - clone.Spec.Clone.Nodes = []&Node{ - Node { - VmName: "VM1", + clone.Spec.Clone.Nodes = []Node{ + { + VmName: "VM1", Properties: *primaryProp, }, - Node { - VmName: "VM2", + { + VmName: "VM2", Properties: *secondaryProp, }, - Node { - VmName: "VM3", + { + VmName: "VM3", Properties: *secondaryProp, }, - Node { - VmName: "VM4", + { + VmName: "VM4", Properties: *proxyProp, }, - Node { - VmName: "VM5", + { + VmName: "VM5", Properties: *proxyProp, }, } - + err := k8sClient.Create(context.Background(), clone) Expect(err).To(HaveOccurred()) }) From de3903b05e292180c1fb2cce7b9330f59ec8aee7 Mon Sep 17 00:00:00 2001 From: Justin Orringer Date: Sun, 21 Apr 2024 20:55:55 -0400 Subject: [PATCH 37/60] Validation for clone and provision --- api/v1alpha1/node_helpers.go | 52 +++++++++++++++++++++++++++++++++ api/v1alpha1/webhook_helpers.go | 11 +++++++ 2 files changed, 63 insertions(+) create mode 100644 api/v1alpha1/node_helpers.go diff --git a/api/v1alpha1/node_helpers.go b/api/v1alpha1/node_helpers.go new file mode 100644 index 00000000..f0b44c26 --- /dev/null +++ b/api/v1alpha1/node_helpers.go @@ -0,0 +1,52 @@ +package v1alpha1 + +// validate the Node and NodeProperties passed are valid +// e.g validate vmNames being unique, properties correctly defined, etc. +// one day move to common/util + +import ( + "fmt" + "reflect" + "strings" +) + +TYPE_OPTIONS := map[string]bool{"database": true, "haproxy": true} +ROLE_OPTIONS := map[string]bool{"primary": true, "secondary": true} +FAILOVER_OPTIONS := map[string]bool{"Automatic": true, "Manual": true} + +func ValidateNodes(nodes []Node, isHighAvailability bool) error { + if !isHighAvailability || len(nodes) == 0 { + return nil // no nodes is valid? + } + + vmNames := make(map[string]bool) // for validating that vmnames are unique + for _, node := range nodes { + for _, np := range node.Properties { + if err := ValidateNodeProperties(np); err != nil { + return err + } + } + + if _, ok := vmNames[node.VMName]; ok { + return fmt.Errorf("vmName %s is already specified", np.VMName) + } + vmNames[node.VMName] = true + } + + return nil +} + +func ValidateNodeProperties(np v1alpha1.NodeProperties) error { + if !TYPE_OPTIONS[np.NodeType] { + return fmt.Errorf("invalid NodeType in Node Properties: %s", np.NodeType) + } + + if !ROLE_OPTIONS[np.Role] { + return fmt.Errorf("invalid Role in Node Properties: %s", np.Role) + + if !FAILOVER_OPTIONS[np.FailoverMode] { + return fmt.Errorf("invalid FailoverMode in Node Properties: %s", np.FailoverMode) + } + + return nil +} \ No newline at end of file diff --git a/api/v1alpha1/webhook_helpers.go b/api/v1alpha1/webhook_helpers.go index b9b3e746..f424a632 100644 --- a/api/v1alpha1/webhook_helpers.go +++ b/api/v1alpha1/webhook_helpers.go @@ -94,6 +94,12 @@ func (v *CloningWebhookHandler) validateCreate(spec *DatabaseSpec, errors *field if err := additionalArgumentsValidationCheck(spec.IsClone, clone.Type, clone.IsHighAvailability, clone.AdditionalArguments); err != nil { *errors = append(*errors, field.Invalid(clonePath.Child("additionalArguments"), clone.AdditionalArguments, err.Error())) } + + // Validate nodes for HA + if err := ValidateNodes(clone.Nodes, clone.IsHighAvailability); err != nil { + *errors = append(*errors, field.Invalid(clonePath.Child("nodes"), spec.Nodes, err.Error())) + } + databaselog.Info("Exiting validateCreate for clone") } @@ -234,6 +240,11 @@ func (v *ProvisioningWebhookHandler) validateCreate(spec *DatabaseSpec, errors * *errors = append(*errors, field.Invalid(instancePath.Child("additionalArguments"), instance.AdditionalArguments, err.Error())) } + // Validate nodes for HA + if err := ValidateNodes(instance.Nodes, instance.IsHighAvailability); err != nil { + *errors = append(*errors, field.Invalid(instancePath.Child("nodes"), instance.Nodes, err.Error())) + } + databaselog.Info("Exiting validateCreate for provisioning") } From c71d1a09849d75804c4f97607bab09636d10aeac Mon Sep 17 00:00:00 2001 From: Justin Orringer Date: Sun, 21 Apr 2024 21:04:02 -0400 Subject: [PATCH 38/60] Correct variable declaration --- api/v1alpha1/node_helpers.go | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/api/v1alpha1/node_helpers.go b/api/v1alpha1/node_helpers.go index f0b44c26..5a9a6f66 100644 --- a/api/v1alpha1/node_helpers.go +++ b/api/v1alpha1/node_helpers.go @@ -10,9 +10,11 @@ import ( "strings" ) -TYPE_OPTIONS := map[string]bool{"database": true, "haproxy": true} -ROLE_OPTIONS := map[string]bool{"primary": true, "secondary": true} -FAILOVER_OPTIONS := map[string]bool{"Automatic": true, "Manual": true} +var ( + typeOptions = map[string]bool{"database": true, "haproxy": true} + roleOptions = map[string]bool{"primary": true, "secondary": true} + failoverOptions = map[string]bool{"Automatic": true, "Manual": true} +) func ValidateNodes(nodes []Node, isHighAvailability bool) error { if !isHighAvailability || len(nodes) == 0 { @@ -37,14 +39,14 @@ func ValidateNodes(nodes []Node, isHighAvailability bool) error { } func ValidateNodeProperties(np v1alpha1.NodeProperties) error { - if !TYPE_OPTIONS[np.NodeType] { + if !typeOptions[np.NodeType] { return fmt.Errorf("invalid NodeType in Node Properties: %s", np.NodeType) } - if !ROLE_OPTIONS[np.Role] { + if !roleOptions[np.Role] { return fmt.Errorf("invalid Role in Node Properties: %s", np.Role) - if !FAILOVER_OPTIONS[np.FailoverMode] { + if !failoverOptions[np.FailoverMode] { return fmt.Errorf("invalid FailoverMode in Node Properties: %s", np.FailoverMode) } From 5879c22c71536fa2250dab20087f4155450958bb Mon Sep 17 00:00:00 2001 From: Justin Orringer Date: Sun, 21 Apr 2024 21:29:11 -0400 Subject: [PATCH 39/60] Type errors --- api/v1alpha1/node_helpers.go | 29 +++++++++++++---------------- api/v1alpha1/webhook_helpers.go | 2 +- 2 files changed, 14 insertions(+), 17 deletions(-) diff --git a/api/v1alpha1/node_helpers.go b/api/v1alpha1/node_helpers.go index 5a9a6f66..61efff6a 100644 --- a/api/v1alpha1/node_helpers.go +++ b/api/v1alpha1/node_helpers.go @@ -6,49 +6,46 @@ package v1alpha1 import ( "fmt" - "reflect" - "strings" ) var ( - typeOptions = map[string]bool{"database": true, "haproxy": true} - roleOptions = map[string]bool{"primary": true, "secondary": true} + typeOptions = map[string]bool{"database": true, "haproxy": true} + roleOptions = map[string]bool{"primary": true, "secondary": true} failoverOptions = map[string]bool{"Automatic": true, "Manual": true} ) -func ValidateNodes(nodes []Node, isHighAvailability bool) error { - if !isHighAvailability || len(nodes) == 0 { +func ValidateNodes(nodes []*Node, isHighAvailability bool) error { + if !isHighAvailability || len(nodes) < 5 { return nil // no nodes is valid? } - + vmNames := make(map[string]bool) // for validating that vmnames are unique for _, node := range nodes { - for _, np := range node.Properties { - if err := ValidateNodeProperties(np); err != nil { - return err - } + if err := ValidateNodeProperties(node.Properties); err != nil { + return err } - if _, ok := vmNames[node.VMName]; ok { - return fmt.Errorf("vmName %s is already specified", np.VMName) + if _, ok := vmNames[node.VmName]; ok { + return fmt.Errorf("vmName %s is already specified", node.VmName) } - vmNames[node.VMName] = true + vmNames[node.VmName] = true } return nil } -func ValidateNodeProperties(np v1alpha1.NodeProperties) error { +func ValidateNodeProperties(np NodeProperties) error { if !typeOptions[np.NodeType] { return fmt.Errorf("invalid NodeType in Node Properties: %s", np.NodeType) } if !roleOptions[np.Role] { return fmt.Errorf("invalid Role in Node Properties: %s", np.Role) + } if !failoverOptions[np.FailoverMode] { return fmt.Errorf("invalid FailoverMode in Node Properties: %s", np.FailoverMode) } return nil -} \ No newline at end of file +} diff --git a/api/v1alpha1/webhook_helpers.go b/api/v1alpha1/webhook_helpers.go index f424a632..40ddfd39 100644 --- a/api/v1alpha1/webhook_helpers.go +++ b/api/v1alpha1/webhook_helpers.go @@ -97,7 +97,7 @@ func (v *CloningWebhookHandler) validateCreate(spec *DatabaseSpec, errors *field // Validate nodes for HA if err := ValidateNodes(clone.Nodes, clone.IsHighAvailability); err != nil { - *errors = append(*errors, field.Invalid(clonePath.Child("nodes"), spec.Nodes, err.Error())) + *errors = append(*errors, field.Invalid(clonePath.Child("nodes"), clone.Nodes, err.Error())) } databaselog.Info("Exiting validateCreate for clone") From 5032fbc93c9fae82c044bdd5b3f8c90ff5ddc136 Mon Sep 17 00:00:00 2001 From: Justin Orringer Date: Sun, 21 Apr 2024 21:51:42 -0400 Subject: [PATCH 40/60] Default should run prior to Validate --- api/v1alpha1/node_helpers.go | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/api/v1alpha1/node_helpers.go b/api/v1alpha1/node_helpers.go index 61efff6a..1699373a 100644 --- a/api/v1alpha1/node_helpers.go +++ b/api/v1alpha1/node_helpers.go @@ -15,8 +15,12 @@ var ( ) func ValidateNodes(nodes []*Node, isHighAvailability bool) error { - if !isHighAvailability || len(nodes) < 5 { - return nil // no nodes is valid? + if !isHighAvailability { + return nil + } + + if len(nodes) < 5 { + return fmt.Errorf("High Availability requires at least 5 nodes") } vmNames := make(map[string]bool) // for validating that vmnames are unique From a087217f6235ea36cfd2ecc9b65f974bfdcd1c5e Mon Sep 17 00:00:00 2001 From: Justin Orringer Date: Sun, 21 Apr 2024 20:55:55 -0400 Subject: [PATCH 41/60] Validation for clone and provision --- api/v1alpha1/node_helpers.go | 52 +++++++++++++++++++++++++++++++++ api/v1alpha1/webhook_helpers.go | 11 +++++++ 2 files changed, 63 insertions(+) create mode 100644 api/v1alpha1/node_helpers.go diff --git a/api/v1alpha1/node_helpers.go b/api/v1alpha1/node_helpers.go new file mode 100644 index 00000000..f0b44c26 --- /dev/null +++ b/api/v1alpha1/node_helpers.go @@ -0,0 +1,52 @@ +package v1alpha1 + +// validate the Node and NodeProperties passed are valid +// e.g validate vmNames being unique, properties correctly defined, etc. +// one day move to common/util + +import ( + "fmt" + "reflect" + "strings" +) + +TYPE_OPTIONS := map[string]bool{"database": true, "haproxy": true} +ROLE_OPTIONS := map[string]bool{"primary": true, "secondary": true} +FAILOVER_OPTIONS := map[string]bool{"Automatic": true, "Manual": true} + +func ValidateNodes(nodes []Node, isHighAvailability bool) error { + if !isHighAvailability || len(nodes) == 0 { + return nil // no nodes is valid? + } + + vmNames := make(map[string]bool) // for validating that vmnames are unique + for _, node := range nodes { + for _, np := range node.Properties { + if err := ValidateNodeProperties(np); err != nil { + return err + } + } + + if _, ok := vmNames[node.VMName]; ok { + return fmt.Errorf("vmName %s is already specified", np.VMName) + } + vmNames[node.VMName] = true + } + + return nil +} + +func ValidateNodeProperties(np v1alpha1.NodeProperties) error { + if !TYPE_OPTIONS[np.NodeType] { + return fmt.Errorf("invalid NodeType in Node Properties: %s", np.NodeType) + } + + if !ROLE_OPTIONS[np.Role] { + return fmt.Errorf("invalid Role in Node Properties: %s", np.Role) + + if !FAILOVER_OPTIONS[np.FailoverMode] { + return fmt.Errorf("invalid FailoverMode in Node Properties: %s", np.FailoverMode) + } + + return nil +} \ No newline at end of file diff --git a/api/v1alpha1/webhook_helpers.go b/api/v1alpha1/webhook_helpers.go index b9b3e746..f424a632 100644 --- a/api/v1alpha1/webhook_helpers.go +++ b/api/v1alpha1/webhook_helpers.go @@ -94,6 +94,12 @@ func (v *CloningWebhookHandler) validateCreate(spec *DatabaseSpec, errors *field if err := additionalArgumentsValidationCheck(spec.IsClone, clone.Type, clone.IsHighAvailability, clone.AdditionalArguments); err != nil { *errors = append(*errors, field.Invalid(clonePath.Child("additionalArguments"), clone.AdditionalArguments, err.Error())) } + + // Validate nodes for HA + if err := ValidateNodes(clone.Nodes, clone.IsHighAvailability); err != nil { + *errors = append(*errors, field.Invalid(clonePath.Child("nodes"), spec.Nodes, err.Error())) + } + databaselog.Info("Exiting validateCreate for clone") } @@ -234,6 +240,11 @@ func (v *ProvisioningWebhookHandler) validateCreate(spec *DatabaseSpec, errors * *errors = append(*errors, field.Invalid(instancePath.Child("additionalArguments"), instance.AdditionalArguments, err.Error())) } + // Validate nodes for HA + if err := ValidateNodes(instance.Nodes, instance.IsHighAvailability); err != nil { + *errors = append(*errors, field.Invalid(instancePath.Child("nodes"), instance.Nodes, err.Error())) + } + databaselog.Info("Exiting validateCreate for provisioning") } From 6e753d089cce4e9ceeb3a09c144bd83e7a2534f7 Mon Sep 17 00:00:00 2001 From: Justin Orringer Date: Sun, 21 Apr 2024 21:04:02 -0400 Subject: [PATCH 42/60] Correct variable declaration --- api/v1alpha1/node_helpers.go | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/api/v1alpha1/node_helpers.go b/api/v1alpha1/node_helpers.go index f0b44c26..5a9a6f66 100644 --- a/api/v1alpha1/node_helpers.go +++ b/api/v1alpha1/node_helpers.go @@ -10,9 +10,11 @@ import ( "strings" ) -TYPE_OPTIONS := map[string]bool{"database": true, "haproxy": true} -ROLE_OPTIONS := map[string]bool{"primary": true, "secondary": true} -FAILOVER_OPTIONS := map[string]bool{"Automatic": true, "Manual": true} +var ( + typeOptions = map[string]bool{"database": true, "haproxy": true} + roleOptions = map[string]bool{"primary": true, "secondary": true} + failoverOptions = map[string]bool{"Automatic": true, "Manual": true} +) func ValidateNodes(nodes []Node, isHighAvailability bool) error { if !isHighAvailability || len(nodes) == 0 { @@ -37,14 +39,14 @@ func ValidateNodes(nodes []Node, isHighAvailability bool) error { } func ValidateNodeProperties(np v1alpha1.NodeProperties) error { - if !TYPE_OPTIONS[np.NodeType] { + if !typeOptions[np.NodeType] { return fmt.Errorf("invalid NodeType in Node Properties: %s", np.NodeType) } - if !ROLE_OPTIONS[np.Role] { + if !roleOptions[np.Role] { return fmt.Errorf("invalid Role in Node Properties: %s", np.Role) - if !FAILOVER_OPTIONS[np.FailoverMode] { + if !failoverOptions[np.FailoverMode] { return fmt.Errorf("invalid FailoverMode in Node Properties: %s", np.FailoverMode) } From 6b108e6203564e6a7a08cea5fe47ba8adf3b86ec Mon Sep 17 00:00:00 2001 From: Justin Orringer Date: Sun, 21 Apr 2024 21:29:11 -0400 Subject: [PATCH 43/60] Type errors --- api/v1alpha1/node_helpers.go | 29 +++++++++++++---------------- api/v1alpha1/webhook_helpers.go | 2 +- 2 files changed, 14 insertions(+), 17 deletions(-) diff --git a/api/v1alpha1/node_helpers.go b/api/v1alpha1/node_helpers.go index 5a9a6f66..61efff6a 100644 --- a/api/v1alpha1/node_helpers.go +++ b/api/v1alpha1/node_helpers.go @@ -6,49 +6,46 @@ package v1alpha1 import ( "fmt" - "reflect" - "strings" ) var ( - typeOptions = map[string]bool{"database": true, "haproxy": true} - roleOptions = map[string]bool{"primary": true, "secondary": true} + typeOptions = map[string]bool{"database": true, "haproxy": true} + roleOptions = map[string]bool{"primary": true, "secondary": true} failoverOptions = map[string]bool{"Automatic": true, "Manual": true} ) -func ValidateNodes(nodes []Node, isHighAvailability bool) error { - if !isHighAvailability || len(nodes) == 0 { +func ValidateNodes(nodes []*Node, isHighAvailability bool) error { + if !isHighAvailability || len(nodes) < 5 { return nil // no nodes is valid? } - + vmNames := make(map[string]bool) // for validating that vmnames are unique for _, node := range nodes { - for _, np := range node.Properties { - if err := ValidateNodeProperties(np); err != nil { - return err - } + if err := ValidateNodeProperties(node.Properties); err != nil { + return err } - if _, ok := vmNames[node.VMName]; ok { - return fmt.Errorf("vmName %s is already specified", np.VMName) + if _, ok := vmNames[node.VmName]; ok { + return fmt.Errorf("vmName %s is already specified", node.VmName) } - vmNames[node.VMName] = true + vmNames[node.VmName] = true } return nil } -func ValidateNodeProperties(np v1alpha1.NodeProperties) error { +func ValidateNodeProperties(np NodeProperties) error { if !typeOptions[np.NodeType] { return fmt.Errorf("invalid NodeType in Node Properties: %s", np.NodeType) } if !roleOptions[np.Role] { return fmt.Errorf("invalid Role in Node Properties: %s", np.Role) + } if !failoverOptions[np.FailoverMode] { return fmt.Errorf("invalid FailoverMode in Node Properties: %s", np.FailoverMode) } return nil -} \ No newline at end of file +} diff --git a/api/v1alpha1/webhook_helpers.go b/api/v1alpha1/webhook_helpers.go index f424a632..40ddfd39 100644 --- a/api/v1alpha1/webhook_helpers.go +++ b/api/v1alpha1/webhook_helpers.go @@ -97,7 +97,7 @@ func (v *CloningWebhookHandler) validateCreate(spec *DatabaseSpec, errors *field // Validate nodes for HA if err := ValidateNodes(clone.Nodes, clone.IsHighAvailability); err != nil { - *errors = append(*errors, field.Invalid(clonePath.Child("nodes"), spec.Nodes, err.Error())) + *errors = append(*errors, field.Invalid(clonePath.Child("nodes"), clone.Nodes, err.Error())) } databaselog.Info("Exiting validateCreate for clone") From 666abfd91a28319ec6bf2202e41e8ee607dc05c1 Mon Sep 17 00:00:00 2001 From: Justin Orringer Date: Sun, 21 Apr 2024 21:51:42 -0400 Subject: [PATCH 44/60] Default should run prior to Validate --- api/v1alpha1/node_helpers.go | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/api/v1alpha1/node_helpers.go b/api/v1alpha1/node_helpers.go index 61efff6a..1699373a 100644 --- a/api/v1alpha1/node_helpers.go +++ b/api/v1alpha1/node_helpers.go @@ -15,8 +15,12 @@ var ( ) func ValidateNodes(nodes []*Node, isHighAvailability bool) error { - if !isHighAvailability || len(nodes) < 5 { - return nil // no nodes is valid? + if !isHighAvailability { + return nil + } + + if len(nodes) < 5 { + return fmt.Errorf("High Availability requires at least 5 nodes") } vmNames := make(map[string]bool) // for validating that vmnames are unique From 66c9069f9108f4301c803a8d16d24548d6f8d013 Mon Sep 17 00:00:00 2001 From: Cody Irion Date: Mon, 22 Apr 2024 14:44:46 -0400 Subject: [PATCH 45/60] Updated tests --- api/v1alpha1/webhook_suite_test.go | 109 +++++++++-------------------- 1 file changed, 32 insertions(+), 77 deletions(-) diff --git a/api/v1alpha1/webhook_suite_test.go b/api/v1alpha1/webhook_suite_test.go index cfe7ef7d..44786c16 100644 --- a/api/v1alpha1/webhook_suite_test.go +++ b/api/v1alpha1/webhook_suite_test.go @@ -597,26 +597,23 @@ var _ = Describe("Webhook Tests", func() { }) }) - FWhen("Postgres specified with IsHighAvailability", func() { + When("Postgres specified with IsHighAvailability", func() { It("Should have zero nodes and IsHighAvailability set to true", func() { clone := createDefaultClone("clone19") clone.Spec.Clone.IsHighAvailability = true clone.Spec.Clone.Nodes = nil - + err := k8sClient.Create(context.Background(), clone) Expect(err).To(HaveOccurred()) - - errMsg := err.(*errors.StatusError).ErrStatus.Message - Expect(errMsg).To(ContainSubstring("invalid Node: nil")) }) - + It("Should have 5 nodes and IsHighAvailability set to true", func() { clone := createDefaultClone("clone19") primaryProp := createDefaultNodeProperties("database", "primary") secondaryProp := createDefaultNodeProperties("database", "secondary") proxyProp := createDefaultNodeProperties("haproxy", "secondary") clone.Spec.Clone.IsHighAvailability = true - clone.Spec.Clone.Nodes = []Node{ + clone.Spec.Clone.Nodes = []*Node{ { VmName: "VM1", Properties: *primaryProp, @@ -638,76 +635,34 @@ var _ = Describe("Webhook Tests", func() { Properties: *proxyProp, }, } - + err := k8sClient.Create(context.Background(), clone) - Expect(err).To(HaveOccurred()) + Expect(err).ToNot(HaveOccurred()) }) - // It("Should have 5 nodes and IsHighAvailability set to true", func() { - // clone := createDefaultClone("clone19") - // clone.Spec.Clone.IsHighAvailability = true - // clone.Spec.Clone.Nodes = []Nodes{} - - // err := k8sClient.Create(context.Background(), clone) - // Expect(err).To(HaveOccurred()) - - // totalNodes := len(clone.Spec.Clone.Nodes) - // Expect(totalNodes).To(Equal(5)) - - // haproxyNodes := 0 - // databaseNodes := 0 - - // for _, node := range nodes { - // if node.Type == "haproxy" { - // haproxyNodes++ - // } else if node.Type == "database" { - // databaseNodes++ - // } - // } - - // Expect(haproxyNodes).To(Equal(2)) - // Expect(databaseNodes).To(Equal(3)) - // }) - - // It("Should have 2 haproxy node types and IsHighAvailability set to true", func() { - // clone := createDefaultClone("clone20") - // clone.Spec.Clone.IsHighAvailability = true - - // err := k8sClient.Create(context.Background(), clone) - // Expect(err).To(HaveOccurred()) - - // nodes, err := getNodesFromCluster() - // Expect(err).ToNot(HaveOccurred()) - - // haproxyNodes := 0 - // for _, node := range nodes { - // if node.Type == "haproxy" { - // haproxyNodes++ - // } - // } - - // Expect(haproxyNodes).To(Equal(2)) - // }) - - // It("Should have 3 database node types and IsHighAvailability set to true", func() { - // clone := createDefaultClone("clone21") - // clone.Spec.Clone.IsHighAvailability = true - - // err := k8sClient.Create(context.Background(), clone) - // Expect(err).To(HaveOccurred())z - - // nodes, err := getNodesFromCluster() - // Expect(err).ToNot(HaveOccurred()) - - // databaseNodes := 0 - // for _, node := range nodes { - // if node.Type == "database" { - // databaseNodes++ - // } - // } - - // Expect(databaseNodes).To(Equal(3)) - // }) + It("Should throw error when given 3 nodes", func() { + db := createDefaultDatabase("db19") + primaryProp := createDefaultNodeProperties("database", "primary") + secondaryProp := createDefaultNodeProperties("database", "secondary") + db.Spec.Instance.IsHighAvailability = true + db.Spec.Instance.Nodes = []*Node{ + { + VmName: "VM1", + Properties: *primaryProp, + }, + { + VmName: "VM2", + Properties: *secondaryProp, + }, + { + VmName: "VM3", + Properties: *secondaryProp, + }, + } + + err := k8sClient.Create(context.Background(), db) + Expect(err).To(HaveOccurred()) + }) }) }) }) @@ -761,9 +716,9 @@ func createDefaultClone(metadataName string) *Database { } func createDefaultNodeProperties(node_type, role string) *NodeProperties { - return &NodeProperties { - NodeType: node_type, - Role: role, + return &NodeProperties{ + NodeType: node_type, + Role: role, FailoverMode: "Automatic", } } From 20a725d54caa8cd5f1b9696e73d3d8cf91fad051 Mon Sep 17 00:00:00 2001 From: Cody Irion Date: Mon, 22 Apr 2024 16:59:53 -0400 Subject: [PATCH 46/60] Updated required nodes to 3 database nodes --- api/v1alpha1/node_helpers.go | 13 ++++---- ndb_api/db_helpers.go | 58 +++++++++++++++++++----------------- 2 files changed, 37 insertions(+), 34 deletions(-) diff --git a/api/v1alpha1/node_helpers.go b/api/v1alpha1/node_helpers.go index 1699373a..b4bd539b 100644 --- a/api/v1alpha1/node_helpers.go +++ b/api/v1alpha1/node_helpers.go @@ -18,13 +18,12 @@ func ValidateNodes(nodes []*Node, isHighAvailability bool) error { if !isHighAvailability { return nil } - - if len(nodes) < 5 { - return fmt.Errorf("High Availability requires at least 5 nodes") - } - + databaseNodeCount := 0 vmNames := make(map[string]bool) // for validating that vmnames are unique for _, node := range nodes { + if node.Properties.NodeType == "database" { + databaseNodeCount++ + } if err := ValidateNodeProperties(node.Properties); err != nil { return err } @@ -34,7 +33,9 @@ func ValidateNodes(nodes []*Node, isHighAvailability bool) error { } vmNames[node.VmName] = true } - + if databaseNodeCount < 3 { + return fmt.Errorf("high Availability requires at least 3 nodes database nodes") + } return nil } diff --git a/ndb_api/db_helpers.go b/ndb_api/db_helpers.go index e28bc19a..0011fbad 100644 --- a/ndb_api/db_helpers.go +++ b/ndb_api/db_helpers.go @@ -338,20 +338,21 @@ func setNodesParameters(req *DatabaseProvisionRequest, database DatabaseInterfac } // Validate node counts nodesRequested := database.GetInstanceNodes() - nodeCount := len(nodesRequested) - if nodeCount == 0 { - nodeCount = 5 + if len(nodesRequested) == 0 { nodesRequested = createDefaultNodes(database) } - databaseNodeCount := 0 proxyNodeCount := 0 - req.NodeCount = nodeCount - primaryNodeCount := getPrimaryNodeCount(nodesRequested) + req.NodeCount = len(nodesRequested) + primaryNodeCount, databaseNodeCount := getNodeCounts(nodesRequested) if primaryNodeCount > 1 { return fmt.Errorf("invalid nodes: HA instance can only have one primary node") } + const MinReqDatabaseNodes = 3 + if databaseNodeCount < MinReqDatabaseNodes { + return fmt.Errorf("invalid node count: HA instance needs at least %d nodes, given: %d", MinReqDatabaseNodes, databaseNodeCount) + } - for i := 0; i < nodeCount; i++ { + for i := 0; i < req.NodeCount; i++ { currentNode := nodesRequested[i] if currentNode.Properties.NodeType != "database" && currentNode.Properties.NodeType != "haproxy" { @@ -407,28 +408,26 @@ func setNodesParameters(req *DatabaseProvisionRequest, database DatabaseInterfac ComputeProfileId: req.ComputeProfileId, }) } - const MinReqDatabaseNodes = 3 - if nodeCount < MinReqDatabaseNodes { - return fmt.Errorf("invalid node count: HA instance needs at least %d nodes, given: %d", MinReqDatabaseNodes, nodeCount) - } - return nil } func createDefaultNodes(database DatabaseInterface) []*v1alpha1.Node { nodes := make([]*v1alpha1.Node, 0) - nodes = append(nodes, &v1alpha1.Node{ - VmName: database.GetAdditionalArguments()["cluster_name"] + "_haproxy1", - Properties: v1alpha1.NodeProperties{ - NodeType: "haproxy", - }, - }) - nodes = append(nodes, &v1alpha1.Node{ - VmName: database.GetAdditionalArguments()["cluster_name"] + "_haproxy2", - Properties: v1alpha1.NodeProperties{ - NodeType: "haproxy", - }, - }) + deployProxy := database.GetAdditionalArguments()["deploy_haproxy"] == "" || database.GetAdditionalArguments()["deploy_haproxy"] == "true" + if deployProxy { + nodes = append(nodes, &v1alpha1.Node{ + VmName: database.GetAdditionalArguments()["cluster_name"] + "_haproxy1", + Properties: v1alpha1.NodeProperties{ + NodeType: "haproxy", + }, + }) + nodes = append(nodes, &v1alpha1.Node{ + VmName: database.GetAdditionalArguments()["cluster_name"] + "_haproxy2", + Properties: v1alpha1.NodeProperties{ + NodeType: "haproxy", + }, + }) + } nodes = append(nodes, &v1alpha1.Node{ VmName: database.GetAdditionalArguments()["cluster_name"] + "-1", Properties: v1alpha1.NodeProperties{ @@ -456,14 +455,17 @@ func createDefaultNodes(database DatabaseInterface) []*v1alpha1.Node { return nodes } -func getPrimaryNodeCount(nodesRequested []*v1alpha1.Node) int { - count := 0 +func getNodeCounts(nodesRequested []*v1alpha1.Node) (primaryCount int, databaseCount int) { + for _, node := range nodesRequested { if node.Properties.Role == "Primary" { - count++ + primaryCount++ + } + if node.Properties.NodeType == "database" { + databaseCount++ } } - return count + return primaryCount, databaseCount } func defaultActionArgumentsforHAProvisioning(database DatabaseInterface, dbPassword string, databaseNames string) map[string]string { From 760ddb283a54ae8eb914bcfed883869efcad0358 Mon Sep 17 00:00:00 2001 From: Justin Orringer Date: Mon, 22 Apr 2024 22:20:11 -0400 Subject: [PATCH 47/60] Adding back the Make changes --- Makefile | 17 +++++++++++++---- 1 file changed, 13 insertions(+), 4 deletions(-) diff --git a/Makefile b/Makefile index 732ee844..6abec820 100644 --- a/Makefile +++ b/Makefile @@ -299,13 +299,22 @@ run-automation-cloning: fi; \ go test $(DEFAULT_CLONING_ROOT)$$folders -v -timeout 90m -.PHONY: run-automation-provisioning +.PHONY: run-automation-provisioning-si DEFAULT_PROVISIONING_ROOT := ./automation/tests/provisioning/ -PROVISIONING_FOLDERS := ... -run-automation-provisioning: +# change this list to every folder but the pg-ha_test one +SI_PROVISIONING_FOLDERS := ... +run-automation-provisioning-si: @read -p "Enter the test directories with spacing to run (mongo-si_test mssql-si_test mysql-si_test pg-si_test). Else all directories will be run: " folders; \ if [ -z "$$folders" ]; then \ - folders="$(PROVISIONING_FOLDERS)"; \ + folders="$(SI_PROVISIONING_FOLDERS)"; \ fi; \ go test $(DEFAULT_PROVISIONING_ROOT)$$folders -v -timeout 90m +.PHONY: run-automation-provisioning-ha +HA_PROVISIONING_FOLDERS := pg-ha_test +run-automation-provisioning-ha: + @read -p "Enter the test directories with spacing to run (pg-ha_test). Else all directories will be run: " folders; \ + if [ -z "$$folders" ]; then \ + folders="$(HA_PROVISIONING_FOLDERS)"; \ + fi; \ + go test $(DEFAULT_PROVISIONING_ROOT)$$folders -v -timeout 90m From 3dcbcd1370ef8bf058f88cb841eed3e4a5defeac Mon Sep 17 00:00:00 2001 From: Justin Orringer Date: Mon, 22 Apr 2024 22:51:53 -0400 Subject: [PATCH 48/60] Revert "end to end test with custom test case" --- Makefile | 17 +- api/v1alpha1/zz_generated.deepcopy.go | 53 ----- .../pg-ha_test/config/database.yaml | 49 ---- .../pg-ha_test/config/db-secret.yaml | 8 - .../pg-ha_test/config/ndb-secret.yaml | 10 - .../provisioning/pg-ha_test/config/ndb.yaml | 8 - .../provisioning/pg-ha_test/config/pod.yaml | 30 --- .../provisioning/pg-ha_test/pg-ha_test.go | 212 ------------------ .../crd/bases/ndb.nutanix.com_databases.yaml | 52 +++-- 9 files changed, 32 insertions(+), 407 deletions(-) delete mode 100644 automation/tests/provisioning/pg-ha_test/config/database.yaml delete mode 100644 automation/tests/provisioning/pg-ha_test/config/db-secret.yaml delete mode 100644 automation/tests/provisioning/pg-ha_test/config/ndb-secret.yaml delete mode 100644 automation/tests/provisioning/pg-ha_test/config/ndb.yaml delete mode 100644 automation/tests/provisioning/pg-ha_test/config/pod.yaml delete mode 100644 automation/tests/provisioning/pg-ha_test/pg-ha_test.go diff --git a/Makefile b/Makefile index 6abec820..732ee844 100644 --- a/Makefile +++ b/Makefile @@ -299,22 +299,13 @@ run-automation-cloning: fi; \ go test $(DEFAULT_CLONING_ROOT)$$folders -v -timeout 90m -.PHONY: run-automation-provisioning-si +.PHONY: run-automation-provisioning DEFAULT_PROVISIONING_ROOT := ./automation/tests/provisioning/ -# change this list to every folder but the pg-ha_test one -SI_PROVISIONING_FOLDERS := ... -run-automation-provisioning-si: +PROVISIONING_FOLDERS := ... +run-automation-provisioning: @read -p "Enter the test directories with spacing to run (mongo-si_test mssql-si_test mysql-si_test pg-si_test). Else all directories will be run: " folders; \ if [ -z "$$folders" ]; then \ - folders="$(SI_PROVISIONING_FOLDERS)"; \ + folders="$(PROVISIONING_FOLDERS)"; \ fi; \ go test $(DEFAULT_PROVISIONING_ROOT)$$folders -v -timeout 90m -.PHONY: run-automation-provisioning-ha -HA_PROVISIONING_FOLDERS := pg-ha_test -run-automation-provisioning-ha: - @read -p "Enter the test directories with spacing to run (pg-ha_test). Else all directories will be run: " folders; \ - if [ -z "$$folders" ]; then \ - folders="$(HA_PROVISIONING_FOLDERS)"; \ - fi; \ - go test $(DEFAULT_PROVISIONING_ROOT)$$folders -v -timeout 90m diff --git a/api/v1alpha1/zz_generated.deepcopy.go b/api/v1alpha1/zz_generated.deepcopy.go index ff63d5f3..2a061e22 100644 --- a/api/v1alpha1/zz_generated.deepcopy.go +++ b/api/v1alpha1/zz_generated.deepcopy.go @@ -39,17 +39,6 @@ func (in *Clone) DeepCopyInto(out *Clone) { (*out)[key] = val } } - if in.Nodes != nil { - in, out := &in.Nodes, &out.Nodes - *out = make([]*Node, len(*in)) - for i := range *in { - if (*in)[i] != nil { - in, out := &(*in)[i], &(*out)[i] - *out = new(Node) - **out = **in - } - } - } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Clone. @@ -201,17 +190,6 @@ func (in *Instance) DeepCopyInto(out *Instance) { (*out)[key] = val } } - if in.Nodes != nil { - in, out := &in.Nodes, &out.Nodes - *out = make([]*Node, len(*in)) - for i := range *in { - if (*in)[i] != nil { - in, out := &(*in)[i], &(*out)[i] - *out = new(Node) - **out = **in - } - } - } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Instance. @@ -336,37 +314,6 @@ func (in *NDBServerStatus) DeepCopy() *NDBServerStatus { return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Node) DeepCopyInto(out *Node) { - *out = *in - out.Properties = in.Properties -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Node. -func (in *Node) DeepCopy() *Node { - if in == nil { - return nil - } - out := new(Node) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NodeProperties) DeepCopyInto(out *NodeProperties) { - *out = *in -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeProperties. -func (in *NodeProperties) DeepCopy() *NodeProperties { - if in == nil { - return nil - } - out := new(NodeProperties) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Profile) DeepCopyInto(out *Profile) { *out = *in diff --git a/automation/tests/provisioning/pg-ha_test/config/database.yaml b/automation/tests/provisioning/pg-ha_test/config/database.yaml deleted file mode 100644 index 675e1e96..00000000 --- a/automation/tests/provisioning/pg-ha_test/config/database.yaml +++ /dev/null @@ -1,49 +0,0 @@ -apiVersion: ndb.nutanix.com/v1alpha1 -kind: Database -metadata: - name: db-pg-ha -spec: - ndbRef: ndb-pg - databaseInstance: - Name: db-pg-ha - databaseNames: - - database_one - - database_two - - database_three - clusterId: - credentialSecret: db-secret-pg-ha - size: 10 - timezone: "UTC" - type: postgres - isHighAvailability: true - profiles: {} - timeMachine: - name: db-pg-ha_TM - description: "TM provisioned by operator" - sla : "DEFAULT_OOB_GOLD_SLA" - dailySnapshotTime: "12:34:56" - snapshotsPerDay: 4 - logCatchUpFrequency: 90 - weeklySnapshotDay: "WEDNESDAY" - monthlySnapshotDay: 24 - quarterlySnapshotMonth: "Jan" - additionalArguments: # Optional block, can specify additional arguments that are unique to database engines. - listener_port: "5432" - failover_mode: "Automatic" - deploy_haproxy: "false" - nodes: - - vmName: "test1" - properties: - node_type: database - role: Primary - failover_mode: Automatic - - vmName: "test2" - properties: - node_type: database - role: Secondary - failover_mode: Automatic - - vmName: "test3" - properties: - node_type: database - role: Secondary - failover_mode: Automatic diff --git a/automation/tests/provisioning/pg-ha_test/config/db-secret.yaml b/automation/tests/provisioning/pg-ha_test/config/db-secret.yaml deleted file mode 100644 index 84c54aa7..00000000 --- a/automation/tests/provisioning/pg-ha_test/config/db-secret.yaml +++ /dev/null @@ -1,8 +0,0 @@ -apiVersion: v1 -kind: Secret -metadata: - name: db-secret-pg-ha -type: Opaque -stringData: - password: - ssh_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCwyAhpllp2WwrUB1aO/0/DN5nIWNXJWQ3ybhuEG4U+kHl8xFFKnPOTDQtTK8UwByoSf6wqIfTr10ESAoHySOpxHk2gyVHVmUmRZ1WFiNR5tW3Q4qbq1qKpIVy1jH9ZRoTJwzg0J33W9W8SZzhM8Nj0nwuDqp6FS8ui7q9H3tgM+9bYYxETTg52NEw7jTVQx6KaZgG+p/8armoYPKh9DGhBYGY3oCmGiOYlm/phSlj3R63qghZIsBXKxeJDEs4cLolQ+9QYoRqqusdEGVCp7Ba/GtUPdBPYdTy+xuXGiALEpsCrqyUstxypHZVJEQfmqS8uy9UB8KFg2YepwhPgX1oN noname diff --git a/automation/tests/provisioning/pg-ha_test/config/ndb-secret.yaml b/automation/tests/provisioning/pg-ha_test/config/ndb-secret.yaml deleted file mode 100644 index f3ac03b0..00000000 --- a/automation/tests/provisioning/pg-ha_test/config/ndb-secret.yaml +++ /dev/null @@ -1,10 +0,0 @@ -apiVersion: v1 -kind: Secret -metadata: - name: ndb-secret-pg-ha -type: Opaque -stringData: -# username and password for the test database - username: user1 - password: user1/pwd - ca_certificate: "" diff --git a/automation/tests/provisioning/pg-ha_test/config/ndb.yaml b/automation/tests/provisioning/pg-ha_test/config/ndb.yaml deleted file mode 100644 index c0857802..00000000 --- a/automation/tests/provisioning/pg-ha_test/config/ndb.yaml +++ /dev/null @@ -1,8 +0,0 @@ -apiVersion: ndb.nutanix.com/v1alpha1 -kind: NDBServer -metadata: - name: ndb-pg -spec: - credentialSecret: ndb-secret-pg-ha - server: :8443/era/v0.9> - skipCertificateVerification: true diff --git a/automation/tests/provisioning/pg-ha_test/config/pod.yaml b/automation/tests/provisioning/pg-ha_test/config/pod.yaml deleted file mode 100644 index 24056a89..00000000 --- a/automation/tests/provisioning/pg-ha_test/config/pod.yaml +++ /dev/null @@ -1,30 +0,0 @@ -apiVersion: v1 -kind: Pod -metadata: - name: app-pg-ha - labels: - app: app-pg-ha -spec: - containers: - - name: best-app - image: manavrajvanshinx/best-app:latest - resources: - limits: - memory: 512Mi - cpu: "1" - env: - - name: DBHOST - value: db-pg-ha-svc - - name: DBPORT - value: '80' - - name: PASSWORD - valueFrom: - secretKeyRef: - name: db-secret-pg-ha - key: password - ports: - - containerPort: 3000 - initContainers: - - name: init-db - image: busybox:1.28 - command: ['sh', '-c', "until nslookup db-pg-ha-svc.$(cat /var/run/secrets/kubernetes.io/serviceaccount/namespace).svc.cluster.local; do echo waiting for database service; sleep 2; done"] diff --git a/automation/tests/provisioning/pg-ha_test/pg-ha_test.go b/automation/tests/provisioning/pg-ha_test/pg-ha_test.go deleted file mode 100644 index 3c8c75e4..00000000 --- a/automation/tests/provisioning/pg-ha_test/pg-ha_test.go +++ /dev/null @@ -1,212 +0,0 @@ -package postgres_provisoning_ha - -// Basic imports -import ( - "context" - "fmt" - "testing" - "time" - - "github.com/nutanix-cloud-native/ndb-operator/automation" - clientsetv1alpha1 "github.com/nutanix-cloud-native/ndb-operator/automation/clientset/v1alpha1" - util "github.com/nutanix-cloud-native/ndb-operator/automation/util" - "github.com/nutanix-cloud-native/ndb-operator/common" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/suite" - "k8s.io/client-go/kubernetes" - "k8s.io/client-go/rest" -) - -// A test suite is a collection of related test cases that are grouped together for testing a specific package or functionality. -// The testify package builds on top of Go's built-in testing package and enhances it with additional features like assertions and test suite management. -// PostgresProvisioningHightAvailabilityInstanceTestSuite is a test suite struct that embeds testify's suite.Suite -type PostgresProvisioningHighAvailabilityTestSuite struct { - suite.Suite - ctx context.Context - setupTypes *util.SetupTypes - v1alpha1ClientSet *clientsetv1alpha1.V1alpha1Client - clientset *kubernetes.Clientset - tsm util.TestSuiteManager -} - -// SetupSuite is called once before running the tests in the suite -func (suite *PostgresProvisioningHighAvailabilityTestSuite) SetupSuite() { - var err error - var config *rest.Config - var ctx context.Context - var v1alpha1ClientSet *clientsetv1alpha1.V1alpha1Client - var clientset *kubernetes.Clientset - var tsm util.TestSuiteManager - - // Setup logger and context - logger, err := util.SetupLogger(fmt.Sprintf("%s/pg-provisioning-ha_test.log", automation.PROVISIONING_LOG_PATH), "pg-provisioning-ha: ") - if err != nil { - suite.T().FailNow() - } - ctx = util.SetupContext(context.Background(), logger) - - logger.Println("SetupSuite() starting...") - errBaseMsg := "Error: SetupSuite() ended" - - // Setup env - if err = util.CheckRequiredEnv(ctx); err != nil { - logger.Printf("%s! %s\n", errBaseMsg, err) - suite.T().FailNow() - } - - // Setup kubeconfig - config, err = util.SetupKubeconfig(ctx) - if err != nil { - logger.Printf("%s! %s\n", errBaseMsg, err) - suite.T().FailNow() - } - - // Setup scheme and clientsets - if v1alpha1ClientSet, clientset, err = util.SetupSchemeAndClientSet(ctx, config); err != nil { - logger.Printf("%s! %s\n", errBaseMsg, err) - suite.T().FailNow() - } - - // Setup yaml types - setupTypes, err := util.SetupTypeTemplates(ctx) - if err != nil { - logger.Printf("%s! %s\n", errBaseMsg, err) - suite.T().FailNow() - } - - // Get test suite manager - tsm = util.GetTestSuiteManager(ctx, *setupTypes) - - // Provision database and wait for database and pod to be ready - if err := tsm.Setup(ctx, setupTypes, clientset, v1alpha1ClientSet, suite.T()); err != nil { - logger.Printf("%s! %s\n", errBaseMsg, err) - suite.T().FailNow() - } - - // Set variables for the entire suite - suite.ctx = ctx - suite.setupTypes = setupTypes - suite.v1alpha1ClientSet = v1alpha1ClientSet - suite.clientset = clientset - suite.tsm = tsm - - logger.Println("SetupSuite() ended!") -} - -// TearDownSuite is called once after running the tests in the suite -func (suite *PostgresProvisioningHighAvailabilityTestSuite) TearDownSuite() { - var err error - - logger := util.GetLogger(suite.ctx) - logger.Println("TearDownSuite() starting...") - errBaseMsg := "Error: SetupSuite() ended" - - // Setup yaml types - setupTypes, err := util.SetupTypeTemplates(suite.ctx) - if err != nil { - logger.Printf("%s! %s\n", errBaseMsg, err) - suite.T().FailNow() - } - - // Delete resources and de-provision database - if err = suite.tsm.TearDown(suite.ctx, setupTypes, suite.clientset, suite.v1alpha1ClientSet, suite.T()); err != nil { - logger.Printf("%s! %s\n", errBaseMsg, err) - suite.T().FailNow() - } - - logger.Println("HA TearDownSuite() completed!") -} - -// This will run right before the test starts and receives the suite and test names as input -func (suite *PostgresProvisioningHighAvailabilityTestSuite) BeforeTest(suiteName, testName string) { - util.GetLogger(suite.ctx).Printf("******************** RUNNING HA TEST %s %s ********************\n", suiteName, testName) -} - -// This will run after test finishes and receives the suite and test names as input -func (suite *PostgresProvisioningHighAvailabilityTestSuite) AfterTest(suiteName, testName string) { - util.GetLogger(suite.ctx).Printf("******************** END HA TEST %s %s ********************\n", suiteName, testName) -} - -// Tests if provisioning is succesful by checking if database status is 'READY' -func (suite *PostgresProvisioningHighAvailabilityTestSuite) TestProvisioningSuccess() { - logger := util.GetLogger(suite.ctx) - - databaseResponse, err := suite.tsm.GetDatabaseOrCloneResponse(suite.ctx, suite.setupTypes, suite.clientset, suite.v1alpha1ClientSet) - if err != nil { - logger.Printf("Error: TestProvisioningSuccess() failed! %v", err) - } else { - logger.Println("Database response retrieved.") - } - - assert := assert.New(suite.T()) - assert.Equal(common.DATABASE_CR_STATUS_READY, databaseResponse.Status, "The database status should be ready.") -} - -// Tests if app is able to connect to database via GET request -func (suite *PostgresProvisioningHighAvailabilityTestSuite) TestAppConnectivity() { - logger := util.GetLogger(suite.ctx) - - resp, err := suite.tsm.GetAppResponse(suite.ctx, suite.setupTypes, suite.clientset, automation.POSTGRES_SI_PROVISONING_LOCAL_PORT) - if err != nil { - logger.Printf("Error: TestAppConnectivity failed! %v", err) - } else { - logger.Println("App response retrieved.") - } - - assert := assert.New(suite.T()) - assert.Equal(200, resp.StatusCode, "The response status should be 200.") -} - -// Tests if creation of time machine is succesful -func (suite *PostgresProvisioningHighAvailabilityTestSuite) TestTimeMachineSuccess() { - logger := util.GetLogger(suite.ctx) - assert := assert.New(suite.T()) - - if suite.setupTypes.Database.Spec.Instance.TMInfo.SLAName == "" || suite.setupTypes.Database.Spec.Instance.TMInfo.SLAName == "NONE" { - logger.Println("No time machine specified, test automatically passing.") - return - } - - tm, err := suite.tsm.GetTimemachineResponseByDatabaseId(suite.ctx, suite.setupTypes, suite.clientset, suite.v1alpha1ClientSet) - if err != nil { - logger.Printf("Error: TestTimeMachineSuccess() failed! %v", err) - assert.FailNow("Error: TestTimeMachineSuccess() failed! %v", err) - } else { - logger.Println("Timemachine response retrieved.") - } - - err = util.CheckTmInfo(suite.ctx, suite.setupTypes.Database, tm) - if err != nil { - logger.Printf("Error: TestTimeMachineSuccess() failed! %v", err) - assert.FailNow("Error: TestTimeMachineSuccess() failed! %v", err) - } else { - logger.Println("CheckTmInfo succesful") - } - - assert.Equal(common.DATABASE_CR_STATUS_READY, tm.Status, "The tm status should be ready.") -} - -// In order for 'go test' to run this suite, we need to create -// a normal test function and pass our suite to suite.Run -func TestPostgresProvisioningHighAvailabilityTestSuite(t *testing.T) { - suite.Run(t, new(PostgresProvisioningHighAvailabilityTestSuite)) -} - -// BeforeTestLogTime will run right before the test starts and logs the start time of the test. -func (suite *PostgresProvisioningHighAvailabilityTestSuite) BeforeTestLogTime(suiteName, testName string) { - logger := util.GetLogger(suite.ctx) - startTime := time.Now() - // Store the start time in the context for use in AfterTestLogTime - ctx := context.WithValue(suite.ctx, "startTime", startTime) - suite.ctx = ctx - logger.Printf("******************** STARTING HA TEST %s %s at %v ********************\n", suiteName, testName, startTime) -} - -// AfterTestLogTime will run after the test finishes and calculates the duration of the test. -func (suite *PostgresProvisioningHighAvailabilityTestSuite) AfterTestLogTime(suiteName, testName string) { - logger := util.GetLogger(suite.ctx) - startTime := suite.ctx.Value("startTime").(time.Time) - endTime := time.Now() - duration := endTime.Sub(startTime) - logger.Printf("******************** ENDING HA TEST %s %s at %v (Duration: %v) ********************\n", suiteName, testName, endTime, duration) -} diff --git a/config/crd/bases/ndb.nutanix.com_databases.yaml b/config/crd/bases/ndb.nutanix.com_databases.yaml index 12c424ac..e38496ab 100644 --- a/config/crd/bases/ndb.nutanix.com_databases.yaml +++ b/config/crd/bases/ndb.nutanix.com_databases.yaml @@ -77,21 +77,23 @@ spec: nodes: items: properties: - properties: - properties: - failover_mode: - type: string - node_type: - type: string - role: - type: string - required: - - node_type - type: object + failoverMode: + type: string + nodeType: + type: string + nxClusterId: + type: string + nxClusterName: + type: string + remoteArchiveDestination: + type: string + role: + type: string vmName: type: string required: - - properties + - nodeType + - vmName type: object type: array profiles: @@ -185,21 +187,23 @@ spec: nodes: items: properties: - properties: - properties: - failover_mode: - type: string - node_type: - type: string - role: - type: string - required: - - node_type - type: object + failoverMode: + type: string + nodeType: + type: string + nxClusterId: + type: string + nxClusterName: + type: string + remoteArchiveDestination: + type: string + role: + type: string vmName: type: string required: - - properties + - nodeType + - vmName type: object type: array profiles: From f541e13d098aa5701d84f3a2025debf5c8ce01e6 Mon Sep 17 00:00:00 2001 From: Justin Orringer Date: Tue, 23 Apr 2024 10:25:58 -0400 Subject: [PATCH 49/60] Removing cloning functionality --- api/v1alpha1/database_types.go | 5 +- ndb_api/clone_helpers.go | 94 +--------------------------------- 2 files changed, 2 insertions(+), 97 deletions(-) diff --git a/api/v1alpha1/database_types.go b/api/v1alpha1/database_types.go index d9e05b12..0ea55aca 100644 --- a/api/v1alpha1/database_types.go +++ b/api/v1alpha1/database_types.go @@ -107,11 +107,8 @@ type Instance struct { // +optional // Additional database engine specific arguments AdditionalArguments map[string]string `json:"additionalArguments"` - // +optional - IsHighAvailability bool `json:"isHighAvailability"` - // +optional - Nodes []*Node `json:"nodes,omitempty"` } + type Node struct { // +optional VmName string `json:"vmName"` diff --git a/ndb_api/clone_helpers.go b/ndb_api/clone_helpers.go index 37239e99..ce8e4333 100644 --- a/ndb_api/clone_helpers.go +++ b/ndb_api/clone_helpers.go @@ -97,7 +97,7 @@ func GenerateCloningRequest(ctx context.Context, ndb_client ndb_client.NDBClient NetworkProfileId: profilesMap[common.PROFILE_TYPE_NETWORK].Id, DatabaseParameterProfileId: profilesMap[common.PROFILE_TYPE_DATABASE_PARAMETER].Id, } - // boolean for high availability + // boolean for high availability; unavailable for cloning isHighAvailability := false // Appending request body based on database type @@ -214,98 +214,6 @@ func (a *PostgresRequestAppender) appendCloningRequest(req *DatabaseCloneRequest return req, nil } -func setCloneNodesParameters(req *DatabaseCloneRequest, database DatabaseInterface) { - // Extract values of ComputeProfileId and NetworkProfileId - computeProfileId := req.Nodes[0].ComputeProfileId - networkProfileId := req.Nodes[0].NetworkProfileId - serverTimeZone := req.Nodes[0].NewDbServerTimeZone - - // Convert database.Instance.Nodes to the common type Nodes - req.Nodes = []Node{} - for _, node := range database.GetInstanceNodes() { - built := Node{} - if node.Properties.NodeType == "haproxy" { - built = buildHAProxyNode(req, node, database.GetClusterId()) - } else { - built = buildDatabaseNode(req, node, computeProfileId, networkProfileId, serverTimeZone, database.GetClusterId()) - } - - req.Nodes = append(req.Nodes, built) - } -} - -func buildHAProxyNode(req *DatabaseCloneRequest, node *v1alpha1.Node, clusterId string) Node { - props := make([]map[string]string, 1) - props[0] = map[string]string{ - "name": "node_type", - "value": node.Properties.NodeType, - } - return Node{ - Properties: props, - VmName: node.VmName, - NxClusterId: clusterId, - } -} - -func buildDatabaseNode(req *DatabaseCloneRequest, node *v1alpha1.Node, computeProfileId, networkProfileId, serverTimeZone, clusterId string) Node { - props := make([]map[string]string, 4) - props[0] = map[string]string{ - "name": "role", - "value": node.Properties.Role, - } - props[1] = map[string]string{ - "name": "failover_mode", - "value": node.Properties.FailoverMode, - } - props[2] = map[string]string{ - "name": "node_type", - "value": node.Properties.NodeType, - } - props[3] = map[string]string{ - "name": "remote_archive_destination", - "value": "", - } - return Node{ - ComputeProfileId: computeProfileId, - NetworkProfileId: networkProfileId, - NewDbServerTimeZone: serverTimeZone, - Properties: props, - VmName: node.VmName, - NxClusterId: clusterId, - } -} - -func (a *PostgresHARequestAppender) appendCloningRequest(req *DatabaseCloneRequest, database DatabaseInterface, reqData map[string]interface{}) (*DatabaseCloneRequest, error) { - req.SSHPublicKey = reqData[common.NDB_PARAM_SSH_PUBLIC_KEY].(string) - dbPassword := reqData[common.NDB_PARAM_PASSWORD].(string) - - req.NodeCount = len(database.GetInstanceNodes()) - setCloneNodesParameters(req, database) - - // Default action arguments - actionArguments := map[string]string{ - /* Non-Configurable from additionalArguments*/ - "vm_name": database.GetName(), - "dbserver_description": "DB Server VM for " + database.GetName(), - "db_password": dbPassword, - } - - // Appending/overwriting database actionArguments to actionArguments - if err := setConfiguredActionArguments(database, actionArguments); err != nil { - return nil, err - } - - // Converting action arguments map to list and appending to req.ActionArguments - req.ActionArguments = append(req.ActionArguments, convertMapToActionArguments(actionArguments)...) - - // Appending LCMConfig Details if specified - if err := appendLCMConfigDetailsToRequest(req, database.GetAdditionalArguments()); err != nil { - return nil, err - } - - return req, nil -} - func (a *MySqlRequestAppender) appendCloningRequest(req *DatabaseCloneRequest, database DatabaseInterface, reqData map[string]interface{}) (*DatabaseCloneRequest, error) { req.SSHPublicKey = reqData[common.NDB_PARAM_SSH_PUBLIC_KEY].(string) dbPassword := reqData[common.NDB_PARAM_PASSWORD].(string) From 7bc5b1524f6bd684378cd73bde4529891e85ee92 Mon Sep 17 00:00:00 2001 From: Justin Orringer Date: Tue, 23 Apr 2024 10:37:55 -0400 Subject: [PATCH 50/60] Removed the wrong struct params --- api/v1alpha1/database_types.go | 8 ++++---- api/v1alpha1/webhook_helpers.go | 9 +++------ 2 files changed, 7 insertions(+), 10 deletions(-) diff --git a/api/v1alpha1/database_types.go b/api/v1alpha1/database_types.go index 0ea55aca..e13e484e 100644 --- a/api/v1alpha1/database_types.go +++ b/api/v1alpha1/database_types.go @@ -107,6 +107,10 @@ type Instance struct { // +optional // Additional database engine specific arguments AdditionalArguments map[string]string `json:"additionalArguments"` + // +optional + IsHighAvailability bool `json:"isHighAvailability"` + // +optional + Nodes []*Node `json:"nodes,omitempty"` } type Node struct { @@ -147,10 +151,6 @@ type Clone struct { // +optional // Additional database engine specific arguments AdditionalArguments map[string]string `json:"additionalArguments"` - // +optional - IsHighAvailability bool `json:"isHighAvailability"` - // +optional - Nodes []*Node `json:"nodes,omitempty"` } // Time Machine details diff --git a/api/v1alpha1/webhook_helpers.go b/api/v1alpha1/webhook_helpers.go index 40ddfd39..8e1cbff3 100644 --- a/api/v1alpha1/webhook_helpers.go +++ b/api/v1alpha1/webhook_helpers.go @@ -91,15 +91,12 @@ func (v *CloningWebhookHandler) validateCreate(spec *DatabaseSpec, errors *field } } - if err := additionalArgumentsValidationCheck(spec.IsClone, clone.Type, clone.IsHighAvailability, clone.AdditionalArguments); err != nil { + // HA is not supported when cloning + isHighAvailability := false + if err := additionalArgumentsValidationCheck(spec.IsClone, clone.Type, isHighAvailability, clone.AdditionalArguments); err != nil { *errors = append(*errors, field.Invalid(clonePath.Child("additionalArguments"), clone.AdditionalArguments, err.Error())) } - // Validate nodes for HA - if err := ValidateNodes(clone.Nodes, clone.IsHighAvailability); err != nil { - *errors = append(*errors, field.Invalid(clonePath.Child("nodes"), clone.Nodes, err.Error())) - } - databaselog.Info("Exiting validateCreate for clone") } From cbfc9711592f114349d0db678e726964193d1595 Mon Sep 17 00:00:00 2001 From: Justin Orringer Date: Tue, 23 Apr 2024 12:26:17 -0400 Subject: [PATCH 51/60] Adding back the appendCloningRequest hook needed --- ndb_api/clone_helpers.go | 31 ++++++++++++++++++++++++++++++- 1 file changed, 30 insertions(+), 1 deletion(-) diff --git a/ndb_api/clone_helpers.go b/ndb_api/clone_helpers.go index ce8e4333..70fb4376 100644 --- a/ndb_api/clone_helpers.go +++ b/ndb_api/clone_helpers.go @@ -18,7 +18,6 @@ import ( "errors" "fmt" - "github.com/nutanix-cloud-native/ndb-operator/api/v1alpha1" "github.com/nutanix-cloud-native/ndb-operator/common" "github.com/nutanix-cloud-native/ndb-operator/ndb_client" ctrllog "sigs.k8s.io/controller-runtime/pkg/log" @@ -214,6 +213,36 @@ func (a *PostgresRequestAppender) appendCloningRequest(req *DatabaseCloneRequest return req, nil } +func (a *PostgresHARequestAppender) appendCloningRequest(req *DatabaseCloneRequest, database DatabaseInterface, reqData map[string]interface{}) (*DatabaseCloneRequest, error) { + req.SSHPublicKey = reqData[common.NDB_PARAM_SSH_PUBLIC_KEY].(string) + dbPassword := reqData[common.NDB_PARAM_PASSWORD].(string) + + req.NodeCount = len(database.GetInstanceNodes()) + + // Default action arguments + actionArguments := map[string]string{ + /* Non-Configurable from additionalArguments*/ + "vm_name": database.GetName(), + "dbserver_description": "DB Server VM for " + database.GetName(), + "db_password": dbPassword, + } + + // Appending/overwriting database actionArguments to actionArguments + if err := setConfiguredActionArguments(database, actionArguments); err != nil { + return nil, err + } + + // Converting action arguments map to list and appending to req.ActionArguments + req.ActionArguments = append(req.ActionArguments, convertMapToActionArguments(actionArguments)...) + + // Appending LCMConfig Details if specified + if err := appendLCMConfigDetailsToRequest(req, database.GetAdditionalArguments()); err != nil { + return nil, err + } + + return req, nil +} + func (a *MySqlRequestAppender) appendCloningRequest(req *DatabaseCloneRequest, database DatabaseInterface, reqData map[string]interface{}) (*DatabaseCloneRequest, error) { req.SSHPublicKey = reqData[common.NDB_PARAM_SSH_PUBLIC_KEY].(string) dbPassword := reqData[common.NDB_PARAM_PASSWORD].(string) From f4b05c3870c9402e77bb5dcb5965d82c091b2da1 Mon Sep 17 00:00:00 2001 From: Justin Orringer Date: Tue, 23 Apr 2024 12:27:06 -0400 Subject: [PATCH 52/60] GetInstanceIsHA on clone --- controller_adapters/database.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/controller_adapters/database.go b/controller_adapters/database.go index e6f052db..46bde5e3 100644 --- a/controller_adapters/database.go +++ b/controller_adapters/database.go @@ -147,7 +147,8 @@ func (d *Database) GetInstanceSize() int { func (d *Database) GetInstanceIsHighAvailability() bool { if d.IsClone() { - return d.Spec.Clone.IsHighAvailability + // Clone doesn't currently support HA + return false } return d.Spec.Instance.IsHighAvailability } From bf09f048d82f2e3e119ad538a60287d3f9be3e60 Mon Sep 17 00:00:00 2001 From: Justin Orringer Date: Tue, 23 Apr 2024 12:27:33 -0400 Subject: [PATCH 53/60] Fix test, no IsHighAvailability field on clone --- api/v1alpha1/webhook_suite_test.go | 1 - 1 file changed, 1 deletion(-) diff --git a/api/v1alpha1/webhook_suite_test.go b/api/v1alpha1/webhook_suite_test.go index 89219420..0cfe5018 100644 --- a/api/v1alpha1/webhook_suite_test.go +++ b/api/v1alpha1/webhook_suite_test.go @@ -641,7 +641,6 @@ func createDefaultClone(metadataName string) *Database { SnapshotId: DEFAULT_UUID, Profiles: &(Profiles{}), AdditionalArguments: map[string]string{}, - IsHighAvailability: HA, }, }, } From bf3dd2dc0b663ed80c73a5a68e2a38579b583951 Mon Sep 17 00:00:00 2001 From: Justin Orringer Date: Tue, 23 Apr 2024 13:04:59 -0400 Subject: [PATCH 54/60] Reflect cloning removed in readme --- README.md | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) diff --git a/README.md b/README.md index 4cd600ae..30df93f6 100644 --- a/README.md +++ b/README.md @@ -290,6 +290,26 @@ additionalArguments: windows_domain_profile_id: # NO Default. Must specify vm_db_server_user. vm_db_server_user: # NO Default. Must specify windows_domain_profile_id. vm_win_license_key: # NO Default. + +# Postgres High Availability +additionalArguments: + listener_port: "1111" # Default: "5432" + failover_mode: "Manual" # Default: "Automatic" + proxy_read_port: "1111" # Default: "5001" + listener_port: "1111" # Default: "5432" + proxy_write_port: "1111" # Default: "5000", + enable_synchronous_mode: "true" # Default: "true", + auto_tune_staging_drive: "false" # Default: true", + backup_policy: "primary_only" # Default: "primary_only" + provision_virtual_ip": "true" # Default: "true" + deploy_haproxy: "true" # Default: "true" + node_type: "haproxy" # Default: "database" + allocate_pg_hugepage: "false" # Default: "true" + cluster_database: "false" # Default: "true" + archive_wal_expire_days: "7" # Default: "-1" + enable_peer_auth: "false" # Default: "true" + cluster_name: "" + patroni_cluster_name: "" ``` Cloning Additional Arguments: From 2002a9609430be889d92865ed019a5f3ef0af5d8 Mon Sep 17 00:00:00 2001 From: bobdoe945 Date: Tue, 23 Apr 2024 20:58:43 -0400 Subject: [PATCH 55/60] Removed the clone tests and added db tests --- api/v1alpha1/webhook_suite_test.go | 204 +++++++++++++++++++---------- 1 file changed, 137 insertions(+), 67 deletions(-) diff --git a/api/v1alpha1/webhook_suite_test.go b/api/v1alpha1/webhook_suite_test.go index 44786c16..31d615ec 100644 --- a/api/v1alpha1/webhook_suite_test.go +++ b/api/v1alpha1/webhook_suite_test.go @@ -354,6 +354,143 @@ var _ = Describe("Webhook Tests", func() { Expect(errMsg).To(ContainSubstring(fmt.Sprintf("additional arguments validation for type: %s failed!", common.DATABASE_TYPE_MSSQL))) }) }) + + When("Postgres specified with IsHighAvailability", func() { + It("Should have zero nodes and IsHighAvailability set to true", func() { + db := createDefaultDatabase("db15") + db.Spec.Instance.AdditionalArguments = map[string]string{ + "failover_mode": "Automatic", + "proxy_read_port": "5001", + "listener_port": "5432", + "proxy_write_port": "5000", + "enable_synchronous_mode": "true", + "auto_tune_staging_drive": "true", + "backup_policy": "primary_only", + "provision_virtual_ip": "true", + "deploy_haproxy": "true", + "node_type": "database", + "allocate_pg_hugepage": "false", + "cluster_database": "false", + "archive_wal_expire_days": "-1", + "enable_peer_auth": "false", + "cluster_name": "psqlcluster", + "patroni_cluster_name": "patroni", + } + db.Spec.Instance.IsHighAvailability = true + db.Spec.Instance.Nodes = nil + + err := k8sClient.Create(context.Background(), db) + Expect(err).To(HaveOccurred()) + }) + + It("Should have 5 nodes and IsHighAvailability set to true", func() { + db := createDefaultDatabase("db16") + db.Spec.Instance.AdditionalArguments = map[string]string{ + "failover_mode": "Automatic", + "proxy_read_port": "5001", + "listener_port": "5432", + "proxy_write_port": "5000", + "enable_synchronous_mode": "true", + "auto_tune_staging_drive": "true", + "backup_policy": "primary_only", + "provision_virtual_ip": "true", + "deploy_haproxy": "true", + "node_type": "database", + "allocate_pg_hugepage": "false", + "cluster_database": "false", + "archive_wal_expire_days": "-1", + "enable_peer_auth": "false", + "cluster_name": "psqlcluster", + "patroni_cluster_name": "patroni", + } + primaryProp := createDefaultNodeProperties("database", "primary") + secondaryProp := createDefaultNodeProperties("database", "secondary") + proxyProp := createDefaultNodeProperties("haproxy", "secondary") + db.Spec.Instance.IsHighAvailability = true + db.Spec.Instance.Nodes = []*Node{ + { + VmName: "VM1", + Properties: *primaryProp, + }, + { + VmName: "VM2", + Properties: *secondaryProp, + }, + { + VmName: "VM3", + Properties: *secondaryProp, + }, + { + VmName: "VM4", + Properties: *proxyProp, + }, + { + VmName: "VM5", + Properties: *proxyProp, + }, + } + + err := k8sClient.Create(context.Background(), db) + Expect(err).ToNot(HaveOccurred()) + }) + + It("Should throw error when given 3 nodes", func() { + db := createDefaultDatabase("db17") + db.Spec.Instance.AdditionalArguments = map[string]string{ + "failover_mode": "Automatic", + "proxy_read_port": "5001", + "listener_port": "5432", + "proxy_write_port": "5000", + "enable_synchronous_mode": "true", + "auto_tune_staging_drive": "true", + "backup_policy": "primary_only", + "provision_virtual_ip": "true", + "deploy_haproxy": "true", + "node_type": "database", + "allocate_pg_hugepage": "false", + "cluster_database": "false", + "archive_wal_expire_days": "-1", + "enable_peer_auth": "false", + "cluster_name": "psqlcluster", + "patroni_cluster_name": "patroni", + } + primaryProp := createDefaultNodeProperties("database", "primary") + secondaryProp := createDefaultNodeProperties("database", "secondary") + db.Spec.Instance.IsHighAvailability = true + db.Spec.Instance.Nodes = []*Node{ + { + VmName: "VM1", + Properties: *primaryProp, + }, + { + VmName: "VM2", + Properties: *secondaryProp, + }, + { + VmName: "VM3", + Properties: *secondaryProp, + }, + } + + err := k8sClient.Create(context.Background(), db) + Expect(err).To(HaveOccurred()) + }) + + It("Should error out for invalid Postgres additionalArguments and IsHighAvailability set to true", func() { + db := createDefaultDatabase("db18") + db.Spec.Instance.AdditionalArguments = map[string]string{ + "listener_port": "5432", + "invalid": "invalid", + } + db.Spec.Instance.IsHighAvailability = true + + err := k8sClient.Create(context.Background(), db) + Expect(err).To(HaveOccurred()) + errMsg := err.(*errors.StatusError).ErrStatus.Message + Expect(errMsg).To(ContainSubstring(fmt.Sprintf("additional arguments validation for type: %s failed!", common.DATABASE_TYPE_POSTGRES))) + }) + }) + }) Context("Clone checks", func() { @@ -597,73 +734,6 @@ var _ = Describe("Webhook Tests", func() { }) }) - When("Postgres specified with IsHighAvailability", func() { - It("Should have zero nodes and IsHighAvailability set to true", func() { - clone := createDefaultClone("clone19") - clone.Spec.Clone.IsHighAvailability = true - clone.Spec.Clone.Nodes = nil - - err := k8sClient.Create(context.Background(), clone) - Expect(err).To(HaveOccurred()) - }) - - It("Should have 5 nodes and IsHighAvailability set to true", func() { - clone := createDefaultClone("clone19") - primaryProp := createDefaultNodeProperties("database", "primary") - secondaryProp := createDefaultNodeProperties("database", "secondary") - proxyProp := createDefaultNodeProperties("haproxy", "secondary") - clone.Spec.Clone.IsHighAvailability = true - clone.Spec.Clone.Nodes = []*Node{ - { - VmName: "VM1", - Properties: *primaryProp, - }, - { - VmName: "VM2", - Properties: *secondaryProp, - }, - { - VmName: "VM3", - Properties: *secondaryProp, - }, - { - VmName: "VM4", - Properties: *proxyProp, - }, - { - VmName: "VM5", - Properties: *proxyProp, - }, - } - - err := k8sClient.Create(context.Background(), clone) - Expect(err).ToNot(HaveOccurred()) - }) - - It("Should throw error when given 3 nodes", func() { - db := createDefaultDatabase("db19") - primaryProp := createDefaultNodeProperties("database", "primary") - secondaryProp := createDefaultNodeProperties("database", "secondary") - db.Spec.Instance.IsHighAvailability = true - db.Spec.Instance.Nodes = []*Node{ - { - VmName: "VM1", - Properties: *primaryProp, - }, - { - VmName: "VM2", - Properties: *secondaryProp, - }, - { - VmName: "VM3", - Properties: *secondaryProp, - }, - } - - err := k8sClient.Create(context.Background(), db) - Expect(err).To(HaveOccurred()) - }) - }) }) }) From 497a918c5506e5b57bc4ce5b3100e3bc4c265ea7 Mon Sep 17 00:00:00 2001 From: Mazin S Date: Mon, 22 Apr 2024 10:05:52 -0700 Subject: [PATCH 56/60] Release v0.5.1 (#198) * bumped to gcr.io/kubebuilder/kube-rbac-proxy:v0.16.0 from v0.15.0 to fix security vulnerability * bumped to v0.5.2 * bumped up to 0.5.1 --- Makefile | 2 +- config/default/manager_auth_proxy_patch.yaml | 2 +- config/manager/kustomization.yaml | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/Makefile b/Makefile index 732ee844..74bb880c 100644 --- a/Makefile +++ b/Makefile @@ -3,7 +3,7 @@ # To re-generate a bundle for another specific version without changing the standard setup, you can: # - use the VERSION as arg of the bundle target (e.g make bundle VERSION=0.0.8) # - use environment variables to overwrite this value (e.g export VERSION=0.0.8) -VERSION ?= 0.5.0 +VERSION ?= 0.5.1 # CHANNELS define the bundle channels used in the bundle. # Add a new line here if you would like to change its default config. (E.g CHANNELS = "candidate,fast,stable") diff --git a/config/default/manager_auth_proxy_patch.yaml b/config/default/manager_auth_proxy_patch.yaml index bd5c11b8..b82a310c 100644 --- a/config/default/manager_auth_proxy_patch.yaml +++ b/config/default/manager_auth_proxy_patch.yaml @@ -16,7 +16,7 @@ spec: capabilities: drop: - "ALL" - image: gcr.io/kubebuilder/kube-rbac-proxy:v0.15.0 + image: gcr.io/kubebuilder/kube-rbac-proxy:v0.16.0 args: - "--secure-listen-address=0.0.0.0:8443" - "--upstream=http://127.0.0.1:8080/" diff --git a/config/manager/kustomization.yaml b/config/manager/kustomization.yaml index 4410e1b2..e3356666 100644 --- a/config/manager/kustomization.yaml +++ b/config/manager/kustomization.yaml @@ -13,4 +13,4 @@ kind: Kustomization images: - name: controller newName: ghcr.io/nutanix-cloud-native/ndb-operator/controller - newTag: v0.5.0 + newTag: v0.5.1 From eb00f13c4e48689c48d23b51a4da30969f1310b1 Mon Sep 17 00:00:00 2001 From: bobdoe945 Date: Tue, 23 Apr 2024 22:21:18 -0400 Subject: [PATCH 57/60] Fixed test case --- api/v1alpha1/webhook_suite_test.go | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/api/v1alpha1/webhook_suite_test.go b/api/v1alpha1/webhook_suite_test.go index 31d615ec..6722e8b8 100644 --- a/api/v1alpha1/webhook_suite_test.go +++ b/api/v1alpha1/webhook_suite_test.go @@ -434,7 +434,7 @@ var _ = Describe("Webhook Tests", func() { Expect(err).ToNot(HaveOccurred()) }) - It("Should throw error when given 3 nodes", func() { + It("Should throw error when given 2 nodes", func() { db := createDefaultDatabase("db17") db.Spec.Instance.AdditionalArguments = map[string]string{ "failover_mode": "Automatic", @@ -466,10 +466,6 @@ var _ = Describe("Webhook Tests", func() { VmName: "VM2", Properties: *secondaryProp, }, - { - VmName: "VM3", - Properties: *secondaryProp, - }, } err := k8sClient.Create(context.Background(), db) From bbc95984cf06bacb7b00293a72b35578103bd5af Mon Sep 17 00:00:00 2001 From: Cody Irion Date: Tue, 23 Apr 2024 22:24:22 -0400 Subject: [PATCH 58/60] Added check to the end to end test to assert that more than 1 node was created --- automation/tests/provisioning/pg-ha_test/pg-ha_test.go | 1 + 1 file changed, 1 insertion(+) diff --git a/automation/tests/provisioning/pg-ha_test/pg-ha_test.go b/automation/tests/provisioning/pg-ha_test/pg-ha_test.go index 3c8c75e4..fcea9964 100644 --- a/automation/tests/provisioning/pg-ha_test/pg-ha_test.go +++ b/automation/tests/provisioning/pg-ha_test/pg-ha_test.go @@ -140,6 +140,7 @@ func (suite *PostgresProvisioningHighAvailabilityTestSuite) TestProvisioningSucc assert := assert.New(suite.T()) assert.Equal(common.DATABASE_CR_STATUS_READY, databaseResponse.Status, "The database status should be ready.") + assert.Greater(len(databaseResponse.DatabaseNodes), 1) } // Tests if app is able to connect to database via GET request From 4fc1804aade8eca8d2cdbb07b5c61d69ba3077ea Mon Sep 17 00:00:00 2001 From: Zhang Zhi Date: Mon, 22 Apr 2024 22:29:17 -0400 Subject: [PATCH 59/60] Merge pull request #8 from irion4686/ete_ha_test end to end test with custom test case --- Makefile | 17 +- api/v1alpha1/zz_generated.deepcopy.go | 53 +++++ .../pg-ha_test/config/database.yaml | 49 ++++ .../pg-ha_test/config/db-secret.yaml | 8 + .../pg-ha_test/config/ndb-secret.yaml | 10 + .../provisioning/pg-ha_test/config/ndb.yaml | 8 + .../provisioning/pg-ha_test/config/pod.yaml | 30 +++ .../provisioning/pg-ha_test/pg-ha_test.go | 212 ++++++++++++++++++ .../crd/bases/ndb.nutanix.com_databases.yaml | 52 ++--- 9 files changed, 407 insertions(+), 32 deletions(-) create mode 100644 automation/tests/provisioning/pg-ha_test/config/database.yaml create mode 100644 automation/tests/provisioning/pg-ha_test/config/db-secret.yaml create mode 100644 automation/tests/provisioning/pg-ha_test/config/ndb-secret.yaml create mode 100644 automation/tests/provisioning/pg-ha_test/config/ndb.yaml create mode 100644 automation/tests/provisioning/pg-ha_test/config/pod.yaml create mode 100644 automation/tests/provisioning/pg-ha_test/pg-ha_test.go diff --git a/Makefile b/Makefile index 74bb880c..f8c084d1 100644 --- a/Makefile +++ b/Makefile @@ -299,13 +299,22 @@ run-automation-cloning: fi; \ go test $(DEFAULT_CLONING_ROOT)$$folders -v -timeout 90m -.PHONY: run-automation-provisioning +.PHONY: run-automation-provisioning-si DEFAULT_PROVISIONING_ROOT := ./automation/tests/provisioning/ -PROVISIONING_FOLDERS := ... -run-automation-provisioning: +# change this list to every folder but the pg-ha_test one +SI_PROVISIONING_FOLDERS := ... +run-automation-provisioning-si: @read -p "Enter the test directories with spacing to run (mongo-si_test mssql-si_test mysql-si_test pg-si_test). Else all directories will be run: " folders; \ if [ -z "$$folders" ]; then \ - folders="$(PROVISIONING_FOLDERS)"; \ + folders="$(SI_PROVISIONING_FOLDERS)"; \ fi; \ go test $(DEFAULT_PROVISIONING_ROOT)$$folders -v -timeout 90m +.PHONY: run-automation-provisioning-ha +HA_PROVISIONING_FOLDERS := pg-ha_test +run-automation-provisioning-ha: + @read -p "Enter the test directories with spacing to run (pg-ha_test). Else all directories will be run: " folders; \ + if [ -z "$$folders" ]; then \ + folders="$(HA_PROVISIONING_FOLDERS)"; \ + fi; \ + go test $(DEFAULT_PROVISIONING_ROOT)$$folders -v -timeout 90m diff --git a/api/v1alpha1/zz_generated.deepcopy.go b/api/v1alpha1/zz_generated.deepcopy.go index 2a061e22..ff63d5f3 100644 --- a/api/v1alpha1/zz_generated.deepcopy.go +++ b/api/v1alpha1/zz_generated.deepcopy.go @@ -39,6 +39,17 @@ func (in *Clone) DeepCopyInto(out *Clone) { (*out)[key] = val } } + if in.Nodes != nil { + in, out := &in.Nodes, &out.Nodes + *out = make([]*Node, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(Node) + **out = **in + } + } + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Clone. @@ -190,6 +201,17 @@ func (in *Instance) DeepCopyInto(out *Instance) { (*out)[key] = val } } + if in.Nodes != nil { + in, out := &in.Nodes, &out.Nodes + *out = make([]*Node, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(Node) + **out = **in + } + } + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Instance. @@ -314,6 +336,37 @@ func (in *NDBServerStatus) DeepCopy() *NDBServerStatus { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Node) DeepCopyInto(out *Node) { + *out = *in + out.Properties = in.Properties +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Node. +func (in *Node) DeepCopy() *Node { + if in == nil { + return nil + } + out := new(Node) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeProperties) DeepCopyInto(out *NodeProperties) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeProperties. +func (in *NodeProperties) DeepCopy() *NodeProperties { + if in == nil { + return nil + } + out := new(NodeProperties) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Profile) DeepCopyInto(out *Profile) { *out = *in diff --git a/automation/tests/provisioning/pg-ha_test/config/database.yaml b/automation/tests/provisioning/pg-ha_test/config/database.yaml new file mode 100644 index 00000000..675e1e96 --- /dev/null +++ b/automation/tests/provisioning/pg-ha_test/config/database.yaml @@ -0,0 +1,49 @@ +apiVersion: ndb.nutanix.com/v1alpha1 +kind: Database +metadata: + name: db-pg-ha +spec: + ndbRef: ndb-pg + databaseInstance: + Name: db-pg-ha + databaseNames: + - database_one + - database_two + - database_three + clusterId: + credentialSecret: db-secret-pg-ha + size: 10 + timezone: "UTC" + type: postgres + isHighAvailability: true + profiles: {} + timeMachine: + name: db-pg-ha_TM + description: "TM provisioned by operator" + sla : "DEFAULT_OOB_GOLD_SLA" + dailySnapshotTime: "12:34:56" + snapshotsPerDay: 4 + logCatchUpFrequency: 90 + weeklySnapshotDay: "WEDNESDAY" + monthlySnapshotDay: 24 + quarterlySnapshotMonth: "Jan" + additionalArguments: # Optional block, can specify additional arguments that are unique to database engines. + listener_port: "5432" + failover_mode: "Automatic" + deploy_haproxy: "false" + nodes: + - vmName: "test1" + properties: + node_type: database + role: Primary + failover_mode: Automatic + - vmName: "test2" + properties: + node_type: database + role: Secondary + failover_mode: Automatic + - vmName: "test3" + properties: + node_type: database + role: Secondary + failover_mode: Automatic diff --git a/automation/tests/provisioning/pg-ha_test/config/db-secret.yaml b/automation/tests/provisioning/pg-ha_test/config/db-secret.yaml new file mode 100644 index 00000000..84c54aa7 --- /dev/null +++ b/automation/tests/provisioning/pg-ha_test/config/db-secret.yaml @@ -0,0 +1,8 @@ +apiVersion: v1 +kind: Secret +metadata: + name: db-secret-pg-ha +type: Opaque +stringData: + password: + ssh_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCwyAhpllp2WwrUB1aO/0/DN5nIWNXJWQ3ybhuEG4U+kHl8xFFKnPOTDQtTK8UwByoSf6wqIfTr10ESAoHySOpxHk2gyVHVmUmRZ1WFiNR5tW3Q4qbq1qKpIVy1jH9ZRoTJwzg0J33W9W8SZzhM8Nj0nwuDqp6FS8ui7q9H3tgM+9bYYxETTg52NEw7jTVQx6KaZgG+p/8armoYPKh9DGhBYGY3oCmGiOYlm/phSlj3R63qghZIsBXKxeJDEs4cLolQ+9QYoRqqusdEGVCp7Ba/GtUPdBPYdTy+xuXGiALEpsCrqyUstxypHZVJEQfmqS8uy9UB8KFg2YepwhPgX1oN noname diff --git a/automation/tests/provisioning/pg-ha_test/config/ndb-secret.yaml b/automation/tests/provisioning/pg-ha_test/config/ndb-secret.yaml new file mode 100644 index 00000000..f3ac03b0 --- /dev/null +++ b/automation/tests/provisioning/pg-ha_test/config/ndb-secret.yaml @@ -0,0 +1,10 @@ +apiVersion: v1 +kind: Secret +metadata: + name: ndb-secret-pg-ha +type: Opaque +stringData: +# username and password for the test database + username: user1 + password: user1/pwd + ca_certificate: "" diff --git a/automation/tests/provisioning/pg-ha_test/config/ndb.yaml b/automation/tests/provisioning/pg-ha_test/config/ndb.yaml new file mode 100644 index 00000000..c0857802 --- /dev/null +++ b/automation/tests/provisioning/pg-ha_test/config/ndb.yaml @@ -0,0 +1,8 @@ +apiVersion: ndb.nutanix.com/v1alpha1 +kind: NDBServer +metadata: + name: ndb-pg +spec: + credentialSecret: ndb-secret-pg-ha + server: :8443/era/v0.9> + skipCertificateVerification: true diff --git a/automation/tests/provisioning/pg-ha_test/config/pod.yaml b/automation/tests/provisioning/pg-ha_test/config/pod.yaml new file mode 100644 index 00000000..24056a89 --- /dev/null +++ b/automation/tests/provisioning/pg-ha_test/config/pod.yaml @@ -0,0 +1,30 @@ +apiVersion: v1 +kind: Pod +metadata: + name: app-pg-ha + labels: + app: app-pg-ha +spec: + containers: + - name: best-app + image: manavrajvanshinx/best-app:latest + resources: + limits: + memory: 512Mi + cpu: "1" + env: + - name: DBHOST + value: db-pg-ha-svc + - name: DBPORT + value: '80' + - name: PASSWORD + valueFrom: + secretKeyRef: + name: db-secret-pg-ha + key: password + ports: + - containerPort: 3000 + initContainers: + - name: init-db + image: busybox:1.28 + command: ['sh', '-c', "until nslookup db-pg-ha-svc.$(cat /var/run/secrets/kubernetes.io/serviceaccount/namespace).svc.cluster.local; do echo waiting for database service; sleep 2; done"] diff --git a/automation/tests/provisioning/pg-ha_test/pg-ha_test.go b/automation/tests/provisioning/pg-ha_test/pg-ha_test.go new file mode 100644 index 00000000..3c8c75e4 --- /dev/null +++ b/automation/tests/provisioning/pg-ha_test/pg-ha_test.go @@ -0,0 +1,212 @@ +package postgres_provisoning_ha + +// Basic imports +import ( + "context" + "fmt" + "testing" + "time" + + "github.com/nutanix-cloud-native/ndb-operator/automation" + clientsetv1alpha1 "github.com/nutanix-cloud-native/ndb-operator/automation/clientset/v1alpha1" + util "github.com/nutanix-cloud-native/ndb-operator/automation/util" + "github.com/nutanix-cloud-native/ndb-operator/common" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/suite" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" +) + +// A test suite is a collection of related test cases that are grouped together for testing a specific package or functionality. +// The testify package builds on top of Go's built-in testing package and enhances it with additional features like assertions and test suite management. +// PostgresProvisioningHightAvailabilityInstanceTestSuite is a test suite struct that embeds testify's suite.Suite +type PostgresProvisioningHighAvailabilityTestSuite struct { + suite.Suite + ctx context.Context + setupTypes *util.SetupTypes + v1alpha1ClientSet *clientsetv1alpha1.V1alpha1Client + clientset *kubernetes.Clientset + tsm util.TestSuiteManager +} + +// SetupSuite is called once before running the tests in the suite +func (suite *PostgresProvisioningHighAvailabilityTestSuite) SetupSuite() { + var err error + var config *rest.Config + var ctx context.Context + var v1alpha1ClientSet *clientsetv1alpha1.V1alpha1Client + var clientset *kubernetes.Clientset + var tsm util.TestSuiteManager + + // Setup logger and context + logger, err := util.SetupLogger(fmt.Sprintf("%s/pg-provisioning-ha_test.log", automation.PROVISIONING_LOG_PATH), "pg-provisioning-ha: ") + if err != nil { + suite.T().FailNow() + } + ctx = util.SetupContext(context.Background(), logger) + + logger.Println("SetupSuite() starting...") + errBaseMsg := "Error: SetupSuite() ended" + + // Setup env + if err = util.CheckRequiredEnv(ctx); err != nil { + logger.Printf("%s! %s\n", errBaseMsg, err) + suite.T().FailNow() + } + + // Setup kubeconfig + config, err = util.SetupKubeconfig(ctx) + if err != nil { + logger.Printf("%s! %s\n", errBaseMsg, err) + suite.T().FailNow() + } + + // Setup scheme and clientsets + if v1alpha1ClientSet, clientset, err = util.SetupSchemeAndClientSet(ctx, config); err != nil { + logger.Printf("%s! %s\n", errBaseMsg, err) + suite.T().FailNow() + } + + // Setup yaml types + setupTypes, err := util.SetupTypeTemplates(ctx) + if err != nil { + logger.Printf("%s! %s\n", errBaseMsg, err) + suite.T().FailNow() + } + + // Get test suite manager + tsm = util.GetTestSuiteManager(ctx, *setupTypes) + + // Provision database and wait for database and pod to be ready + if err := tsm.Setup(ctx, setupTypes, clientset, v1alpha1ClientSet, suite.T()); err != nil { + logger.Printf("%s! %s\n", errBaseMsg, err) + suite.T().FailNow() + } + + // Set variables for the entire suite + suite.ctx = ctx + suite.setupTypes = setupTypes + suite.v1alpha1ClientSet = v1alpha1ClientSet + suite.clientset = clientset + suite.tsm = tsm + + logger.Println("SetupSuite() ended!") +} + +// TearDownSuite is called once after running the tests in the suite +func (suite *PostgresProvisioningHighAvailabilityTestSuite) TearDownSuite() { + var err error + + logger := util.GetLogger(suite.ctx) + logger.Println("TearDownSuite() starting...") + errBaseMsg := "Error: SetupSuite() ended" + + // Setup yaml types + setupTypes, err := util.SetupTypeTemplates(suite.ctx) + if err != nil { + logger.Printf("%s! %s\n", errBaseMsg, err) + suite.T().FailNow() + } + + // Delete resources and de-provision database + if err = suite.tsm.TearDown(suite.ctx, setupTypes, suite.clientset, suite.v1alpha1ClientSet, suite.T()); err != nil { + logger.Printf("%s! %s\n", errBaseMsg, err) + suite.T().FailNow() + } + + logger.Println("HA TearDownSuite() completed!") +} + +// This will run right before the test starts and receives the suite and test names as input +func (suite *PostgresProvisioningHighAvailabilityTestSuite) BeforeTest(suiteName, testName string) { + util.GetLogger(suite.ctx).Printf("******************** RUNNING HA TEST %s %s ********************\n", suiteName, testName) +} + +// This will run after test finishes and receives the suite and test names as input +func (suite *PostgresProvisioningHighAvailabilityTestSuite) AfterTest(suiteName, testName string) { + util.GetLogger(suite.ctx).Printf("******************** END HA TEST %s %s ********************\n", suiteName, testName) +} + +// Tests if provisioning is succesful by checking if database status is 'READY' +func (suite *PostgresProvisioningHighAvailabilityTestSuite) TestProvisioningSuccess() { + logger := util.GetLogger(suite.ctx) + + databaseResponse, err := suite.tsm.GetDatabaseOrCloneResponse(suite.ctx, suite.setupTypes, suite.clientset, suite.v1alpha1ClientSet) + if err != nil { + logger.Printf("Error: TestProvisioningSuccess() failed! %v", err) + } else { + logger.Println("Database response retrieved.") + } + + assert := assert.New(suite.T()) + assert.Equal(common.DATABASE_CR_STATUS_READY, databaseResponse.Status, "The database status should be ready.") +} + +// Tests if app is able to connect to database via GET request +func (suite *PostgresProvisioningHighAvailabilityTestSuite) TestAppConnectivity() { + logger := util.GetLogger(suite.ctx) + + resp, err := suite.tsm.GetAppResponse(suite.ctx, suite.setupTypes, suite.clientset, automation.POSTGRES_SI_PROVISONING_LOCAL_PORT) + if err != nil { + logger.Printf("Error: TestAppConnectivity failed! %v", err) + } else { + logger.Println("App response retrieved.") + } + + assert := assert.New(suite.T()) + assert.Equal(200, resp.StatusCode, "The response status should be 200.") +} + +// Tests if creation of time machine is succesful +func (suite *PostgresProvisioningHighAvailabilityTestSuite) TestTimeMachineSuccess() { + logger := util.GetLogger(suite.ctx) + assert := assert.New(suite.T()) + + if suite.setupTypes.Database.Spec.Instance.TMInfo.SLAName == "" || suite.setupTypes.Database.Spec.Instance.TMInfo.SLAName == "NONE" { + logger.Println("No time machine specified, test automatically passing.") + return + } + + tm, err := suite.tsm.GetTimemachineResponseByDatabaseId(suite.ctx, suite.setupTypes, suite.clientset, suite.v1alpha1ClientSet) + if err != nil { + logger.Printf("Error: TestTimeMachineSuccess() failed! %v", err) + assert.FailNow("Error: TestTimeMachineSuccess() failed! %v", err) + } else { + logger.Println("Timemachine response retrieved.") + } + + err = util.CheckTmInfo(suite.ctx, suite.setupTypes.Database, tm) + if err != nil { + logger.Printf("Error: TestTimeMachineSuccess() failed! %v", err) + assert.FailNow("Error: TestTimeMachineSuccess() failed! %v", err) + } else { + logger.Println("CheckTmInfo succesful") + } + + assert.Equal(common.DATABASE_CR_STATUS_READY, tm.Status, "The tm status should be ready.") +} + +// In order for 'go test' to run this suite, we need to create +// a normal test function and pass our suite to suite.Run +func TestPostgresProvisioningHighAvailabilityTestSuite(t *testing.T) { + suite.Run(t, new(PostgresProvisioningHighAvailabilityTestSuite)) +} + +// BeforeTestLogTime will run right before the test starts and logs the start time of the test. +func (suite *PostgresProvisioningHighAvailabilityTestSuite) BeforeTestLogTime(suiteName, testName string) { + logger := util.GetLogger(suite.ctx) + startTime := time.Now() + // Store the start time in the context for use in AfterTestLogTime + ctx := context.WithValue(suite.ctx, "startTime", startTime) + suite.ctx = ctx + logger.Printf("******************** STARTING HA TEST %s %s at %v ********************\n", suiteName, testName, startTime) +} + +// AfterTestLogTime will run after the test finishes and calculates the duration of the test. +func (suite *PostgresProvisioningHighAvailabilityTestSuite) AfterTestLogTime(suiteName, testName string) { + logger := util.GetLogger(suite.ctx) + startTime := suite.ctx.Value("startTime").(time.Time) + endTime := time.Now() + duration := endTime.Sub(startTime) + logger.Printf("******************** ENDING HA TEST %s %s at %v (Duration: %v) ********************\n", suiteName, testName, endTime, duration) +} diff --git a/config/crd/bases/ndb.nutanix.com_databases.yaml b/config/crd/bases/ndb.nutanix.com_databases.yaml index e38496ab..12c424ac 100644 --- a/config/crd/bases/ndb.nutanix.com_databases.yaml +++ b/config/crd/bases/ndb.nutanix.com_databases.yaml @@ -77,23 +77,21 @@ spec: nodes: items: properties: - failoverMode: - type: string - nodeType: - type: string - nxClusterId: - type: string - nxClusterName: - type: string - remoteArchiveDestination: - type: string - role: - type: string + properties: + properties: + failover_mode: + type: string + node_type: + type: string + role: + type: string + required: + - node_type + type: object vmName: type: string required: - - nodeType - - vmName + - properties type: object type: array profiles: @@ -187,23 +185,21 @@ spec: nodes: items: properties: - failoverMode: - type: string - nodeType: - type: string - nxClusterId: - type: string - nxClusterName: - type: string - remoteArchiveDestination: - type: string - role: - type: string + properties: + properties: + failover_mode: + type: string + node_type: + type: string + role: + type: string + required: + - node_type + type: object vmName: type: string required: - - nodeType - - vmName + - properties type: object type: array profiles: From 71688cc1ae7ac8fe595448e4b9171a755024797d Mon Sep 17 00:00:00 2001 From: Cody Irion Date: Tue, 23 Apr 2024 22:24:22 -0400 Subject: [PATCH 60/60] Added check to the end to end test to assert that more than 1 node was created --- automation/tests/provisioning/pg-ha_test/pg-ha_test.go | 1 + 1 file changed, 1 insertion(+) diff --git a/automation/tests/provisioning/pg-ha_test/pg-ha_test.go b/automation/tests/provisioning/pg-ha_test/pg-ha_test.go index 3c8c75e4..fcea9964 100644 --- a/automation/tests/provisioning/pg-ha_test/pg-ha_test.go +++ b/automation/tests/provisioning/pg-ha_test/pg-ha_test.go @@ -140,6 +140,7 @@ func (suite *PostgresProvisioningHighAvailabilityTestSuite) TestProvisioningSucc assert := assert.New(suite.T()) assert.Equal(common.DATABASE_CR_STATUS_READY, databaseResponse.Status, "The database status should be ready.") + assert.Greater(len(databaseResponse.DatabaseNodes), 1) } // Tests if app is able to connect to database via GET request