From 31ed583475c083275ff83819c071896be0ac95e6 Mon Sep 17 00:00:00 2001 From: Humair Khan Date: Tue, 17 Oct 2023 21:54:05 -0400 Subject: [PATCH] Add support for ca bundle injection. This change will allow users to specify a configmap that contains a ca bundle. This ca bundle is injected into the api server pod. --- api/v1alpha1/dspipeline_types.go | 16 ++ api/v1alpha1/zz_generated.deepcopy.go | 20 ++ ...b.io_datasciencepipelinesapplications.yaml | 17 ++ .../apiserver/artifact_script.yaml.tmpl | 13 +- .../internal/apiserver/deployment.yaml.tmpl | 24 +- controllers/config/defaults.go | 13 +- controllers/dspipeline_controller.go | 2 +- controllers/dspipeline_params.go | 20 +- controllers/storage.go | 53 ++++- controllers/storage_test.go | 46 +++- controllers/suite_test.go | 2 +- .../created/configmap_artifact_script.yaml | 11 +- .../created/configmap_artifact_script.yaml | 11 +- .../configmap_artifact_script.yaml | 20 +- .../created/configmap_artifact_script.yaml | 11 +- .../created/configmap_artifact_script.yaml | 11 +- .../testdata/declarative/case_6/config.yaml | 12 + .../case_6/deploy/00_configmap.yaml | 6 + .../declarative/case_6/deploy/01_cr.yaml | 27 +++ .../created/apiserver_deployment.yaml | 206 ++++++++++++++++++ .../created/configmap_artifact_script.yaml | 42 ++++ controllers/util/util.go | 41 ++++ 22 files changed, 591 insertions(+), 33 deletions(-) create mode 100644 controllers/testdata/declarative/case_6/config.yaml create mode 100644 controllers/testdata/declarative/case_6/deploy/00_configmap.yaml create mode 100644 controllers/testdata/declarative/case_6/deploy/01_cr.yaml create mode 100644 controllers/testdata/declarative/case_6/expected/created/apiserver_deployment.yaml create mode 100644 controllers/testdata/declarative/case_6/expected/created/configmap_artifact_script.yaml diff --git a/api/v1alpha1/dspipeline_types.go b/api/v1alpha1/dspipeline_types.go index bed158e47..d8922956f 100644 --- a/api/v1alpha1/dspipeline_types.go +++ b/api/v1alpha1/dspipeline_types.go @@ -102,6 +102,22 @@ type APIServer struct { AutoUpdatePipelineDefaultVersion bool `json:"autoUpdatePipelineDefaultVersion"` // Specify custom Pod resource requirements for this component. Resources *ResourceRequirements `json:"resources,omitempty"` + + // If the Object store/DB is behind a TLS secured connection that is + // unrecognized by the host OpenShift/K8s cluster, then you can + // provide a PEM formatted CA bundle to be injected into the DSP + // server pod to trust this connection. CA Bundle should be provided + // as values within configmaps, mapped to keys. + CABundle *CABundle `json:"cABundle,omitempty"` +} + +type CABundle struct { + // +kubebuilder:validation:Required + ConfigMapName string `json:"configMapName"` + // Key should map to a CA bundle. The key is also used to name + // the CA bundle file (e.g. ca-bundle.crt) + // +kubebuilder:validation:Required + ConfigMapKey string `json:"configMapKey"` } type ArtifactScriptConfigMap struct { diff --git a/api/v1alpha1/zz_generated.deepcopy.go b/api/v1alpha1/zz_generated.deepcopy.go index babfd004f..864bddb54 100644 --- a/api/v1alpha1/zz_generated.deepcopy.go +++ b/api/v1alpha1/zz_generated.deepcopy.go @@ -39,6 +39,11 @@ func (in *APIServer) DeepCopyInto(out *APIServer) { *out = new(ResourceRequirements) (*in).DeepCopyInto(*out) } + if in.CABundle != nil { + in, out := &in.CABundle, &out.CABundle + *out = new(CABundle) + **out = **in + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIServer. @@ -66,6 +71,21 @@ func (in *ArtifactScriptConfigMap) DeepCopy() *ArtifactScriptConfigMap { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CABundle) DeepCopyInto(out *CABundle) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CABundle. +func (in *CABundle) DeepCopy() *CABundle { + if in == nil { + return nil + } + out := new(CABundle) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *DSPASpec) DeepCopyInto(out *DSPASpec) { *out = *in diff --git a/config/crd/bases/datasciencepipelinesapplications.opendatahub.io_datasciencepipelinesapplications.yaml b/config/crd/bases/datasciencepipelinesapplications.opendatahub.io_datasciencepipelinesapplications.yaml index 6732f52fb..5bbb80be5 100644 --- a/config/crd/bases/datasciencepipelinesapplications.opendatahub.io_datasciencepipelinesapplications.yaml +++ b/config/crd/bases/datasciencepipelinesapplications.opendatahub.io_datasciencepipelinesapplications.yaml @@ -61,6 +61,23 @@ spec: default: true description: 'Default: true' type: boolean + cABundle: + description: If the Object store/DB is behind a TLS secured connection + that is unrecognized by the host OpenShift/K8s cluster, then + you can provide a PEM formatted CA bundle to be injected into + the DSP server pod to trust this connection. CA Bundle should + be provided as values within configmaps, mapped to keys. + properties: + configMapKey: + description: Key should map to a CA bundle. The key is also + used to name the CA bundle file (e.g. ca-bundle.crt) + type: string + configMapName: + type: string + required: + - configMapKey + - configMapName + type: object cacheImage: type: string collectMetrics: diff --git a/config/internal/apiserver/artifact_script.yaml.tmpl b/config/internal/apiserver/artifact_script.yaml.tmpl index ed2d9f7d4..731cea1de 100644 --- a/config/internal/apiserver/artifact_script.yaml.tmpl +++ b/config/internal/apiserver/artifact_script.yaml.tmpl @@ -6,13 +6,22 @@ data: workspace_dir=$(echo $(context.taskRun.name) | sed -e "s/$(context.pipeline.name)-//g") workspace_dest=/workspace/${workspace_dir}/artifacts/$(context.pipelineRun.name)/$(context.taskRun.name) artifact_name=$(basename $2) + + aws_cp() { +{{ if .APIServer.CABundle }} + aws s3 --endpoint {{.ObjectStorageConnection.Endpoint}} --ca-bundle {{ .PiplinesCABundleMountPath }}/{{ .APIServer.CABundle.ConfigMapKey }} cp $1.tgz s3://{{.ObjectStorageConnection.Bucket}}/artifacts/$PIPELINERUN/$PIPELINETASK/$1.tgz +{{ else }} + aws s3 --endpoint {{.ObjectStorageConnection.Endpoint}} cp $1.tgz s3://{{.ObjectStorageConnection.Bucket}}/artifacts/$PIPELINERUN/$PIPELINETASK/$1.tgz +{{ end }} + } + if [ -f "$workspace_dest/$artifact_name" ]; then echo sending to: ${workspace_dest}/${artifact_name} tar -cvzf $1.tgz -C ${workspace_dest} ${artifact_name} - aws s3 --endpoint {{.ObjectStorageConnection.Endpoint}} cp $1.tgz s3://{{.ObjectStorageConnection.Bucket}}/artifacts/$PIPELINERUN/$PIPELINETASK/$1.tgz + aws_cp $1 elif [ -f "$2" ]; then tar -cvzf $1.tgz -C $(dirname $2) ${artifact_name} - aws s3 --endpoint {{.ObjectStorageConnection.Endpoint}} cp $1.tgz s3://{{.ObjectStorageConnection.Bucket}}/artifacts/$PIPELINERUN/$PIPELINETASK/$1.tgz + aws_cp $1 else echo "$2 file does not exist. Skip artifact tracking for $1" fi diff --git a/config/internal/apiserver/deployment.yaml.tmpl b/config/internal/apiserver/deployment.yaml.tmpl index 1d9343764..ae0d064f4 100644 --- a/config/internal/apiserver/deployment.yaml.tmpl +++ b/config/internal/apiserver/deployment.yaml.tmpl @@ -50,6 +50,14 @@ spec: value: "{{.APIServer.ArtifactImage}}" - name: ARCHIVE_LOGS value: "{{.APIServer.ArchiveLogs}}" + {{ if .APIServer.CABundle }} + - name: ARTIFACT_COPY_STEP_CABUNDLE_CONFIGMAP_NAME + value: "{{.APIServer.CABundle.ConfigMapName}}" + - name: ARTIFACT_COPY_STEP_CABUNDLE_CONFIGMAP_KEY + value: "{{.APIServer.CABundle.ConfigMapKey}}" + - name: ARTIFACT_COPY_STEP_CABUNDLE_MOUNTPATH + value: {{ .PiplinesCABundleMountPath }} + {{ end }} - name: TRACK_ARTIFACTS value: "{{.APIServer.TrackArtifacts}}" - name: STRIP_EOF @@ -145,13 +153,19 @@ spec: memory: {{.APIServer.Resources.Limits.Memory}} {{ end }} {{ end }} - {{ if .APIServer.EnableSamplePipeline }} + {{ if or .APIServer.EnableSamplePipeline .APIServer.CABundle }} volumeMounts: + {{ if .APIServer.EnableSamplePipeline }} - name: sample-config mountPath: /config/sample_config.json subPath: sample_config.json - name: sample-pipeline mountPath: /samples/ + {{ end }} + {{ if .APIServer.CABundle }} + - mountPath: {{ .APIServerPiplinesCABundleMountPath }} + name: ca-bundle + {{ end }} {{ end }} {{ if .APIServer.EnableRoute }} - name: oauth-proxy @@ -206,6 +220,14 @@ spec: - name: proxy-tls secret: secretName: ds-pipelines-proxy-tls-{{.Name}} + {{ if .APIServer.CABundle }} + - name: ca-bundle + configMap: + name: {{ .APIServer.CABundle.ConfigMapName }} + items: + - key: {{ .APIServer.CABundle.ConfigMapKey }} + path: {{ .APIServer.CABundle.ConfigMapKey }} + {{ end }} {{ if .APIServer.EnableSamplePipeline }} - name: sample-config configMap: diff --git a/controllers/config/defaults.go b/controllers/config/defaults.go index 4abfdb23a..b13ef3c78 100644 --- a/controllers/config/defaults.go +++ b/controllers/config/defaults.go @@ -24,12 +24,13 @@ import ( ) const ( - DefaultImageValue = "MustSetInConfig" - - MLPipelineUIConfigMapPrefix = "ds-pipeline-ui-configmap-" - ArtifactScriptConfigMapNamePrefix = "ds-pipeline-artifact-script-" - ArtifactScriptConfigMapKey = "artifact_script" - DSPServicePrefix = "ds-pipeline" + DefaultImageValue = "MustSetInConfig" + APIServerPiplinesCABundleMountPath = "/etc/pki/tls/certs" + PiplinesCABundleMountPath = "/etc/pki/tls/certs" + MLPipelineUIConfigMapPrefix = "ds-pipeline-ui-configmap-" + ArtifactScriptConfigMapNamePrefix = "ds-pipeline-artifact-script-" + ArtifactScriptConfigMapKey = "artifact_script" + DSPServicePrefix = "ds-pipeline" DBSecretNamePrefix = "ds-pipeline-db-" DBSecretKey = "password" diff --git a/controllers/dspipeline_controller.go b/controllers/dspipeline_controller.go index ef641f1b7..d92275ab7 100644 --- a/controllers/dspipeline_controller.go +++ b/controllers/dspipeline_controller.go @@ -226,7 +226,7 @@ func (r *DSPAReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl. // Get Prereq Status (DB and ObjStore Ready) dbAvailable := r.isDatabaseAccessible(ctx, dspa, params) objStoreAvailable := r.isObjectStorageAccessible(ctx, dspa, params) - dspaPrereqsReady := (dbAvailable && objStoreAvailable) + dspaPrereqsReady := dbAvailable && objStoreAvailable if dspaPrereqsReady { // Manage Common Manifests diff --git a/controllers/dspipeline_params.go b/controllers/dspipeline_params.go index a58429874..1d8d8d465 100644 --- a/controllers/dspipeline_params.go +++ b/controllers/dspipeline_params.go @@ -40,8 +40,11 @@ type DSPAParams struct { Namespace string Owner mf.Owner APIServer *dspa.APIServer + APIServerPiplinesCABundleMountPath string + PiplinesCABundleMountPath string APIServerDefaultResourceName string APIServerServiceName string + APICustomPemCerts []byte OAuthProxy string ScheduledWorkflow *dspa.ScheduledWorkflow ScheduledWorkflowDefaultResourceName string @@ -441,8 +444,8 @@ func (p *DSPAParams) ExtractParams(ctx context.Context, dsp *dspa.DataSciencePip p.Minio = dsp.Spec.ObjectStorage.Minio.DeepCopy() p.OAuthProxy = config.GetStringConfigWithDefault(config.OAuthProxyImagePath, config.DefaultImageValue) p.MLMD = dsp.Spec.MLMD.DeepCopy() - - // TODO: If p. is nil we should create defaults + p.APIServerPiplinesCABundleMountPath = config.APIServerPiplinesCABundleMountPath + p.PiplinesCABundleMountPath = config.PiplinesCABundleMountPath if p.APIServer != nil { @@ -464,7 +467,20 @@ func (p *DSPAParams) ExtractParams(ctx context.Context, dsp *dspa.DataSciencePip Key: config.ArtifactScriptConfigMapKey, } } + + // If a Custom CA Bundle is specified for injection into DSP API Server Pod + // then retrieve the bundle to utilize during storage health check + if p.APIServer.CABundle != nil { + cfgKey, cfgName := p.APIServer.CABundle.ConfigMapKey, p.APIServer.CABundle.ConfigMapName + err, val := util.GetConfigMapValue(ctx, cfgKey, cfgName, p.Namespace, client, log) + if err != nil { + log.Error(err, "Encountered error when attempting to retrieve CABundle from configmap") + return err + } + p.APICustomPemCerts = []byte(val) + } } + if p.PersistenceAgent != nil { persistenceAgentImageFromConfig := config.GetStringConfigWithDefault(config.PersistenceAgentImagePath, config.DefaultImageValue) setStringDefault(persistenceAgentImageFromConfig, &p.PersistenceAgent.Image) diff --git a/controllers/storage.go b/controllers/storage.go index 2d7454c9d..fedc3a2b9 100644 --- a/controllers/storage.go +++ b/controllers/storage.go @@ -18,6 +18,7 @@ package controllers import ( "context" + "crypto/x509" "encoding/base64" "errors" "fmt" @@ -26,6 +27,7 @@ import ( "github.com/minio/minio-go/v7/pkg/credentials" dspav1alpha1 "github.com/opendatahub-io/data-science-pipelines-operator/api/v1alpha1" "github.com/opendatahub-io/data-science-pipelines-operator/controllers/config" + "github.com/opendatahub-io/data-science-pipelines-operator/controllers/util" "net/http" ) @@ -67,12 +69,46 @@ func createCredentialProvidersChain(accessKey, secretKey string) *credentials.Cr return credentials.New(&credentials.Chain{Providers: providers}) } -var ConnectAndQueryObjStore = func(ctx context.Context, log logr.Logger, endpoint, bucket string, accesskey, secretkey []byte, secure bool) bool { +func getHttpsTransportWithCACert(log logr.Logger, pemCerts []byte) (*http.Transport, error) { + transport, err := minio.DefaultTransport(true) + if err != nil { + return nil, fmt.Errorf("Error creating default transport : %s", err) + } + + if transport.TLSClientConfig.RootCAs == nil { + pool, err := x509.SystemCertPool() + if err != nil { + log.Error(err, "error initializing TLS Pool: %s") + transport.TLSClientConfig.RootCAs = x509.NewCertPool() + } else { + transport.TLSClientConfig.RootCAs = pool + } + } + + if ok := transport.TLSClientConfig.RootCAs.AppendCertsFromPEM(pemCerts); !ok { + return nil, fmt.Errorf("error parsing CA Certificate, ensure provided certs are in valid PEM format") + } + return transport, nil +} + +var ConnectAndQueryObjStore = func(ctx context.Context, log logr.Logger, endpoint, bucket string, accesskey, secretkey []byte, secure bool, pemCerts []byte) bool { cred := createCredentialProvidersChain(string(accesskey), string(secretkey)) - minioClient, err := minio.New(endpoint, &minio.Options{ + + opts := &minio.Options{ Creds: cred, Secure: secure, - }) + } + + if len(pemCerts) != 0 { + tr, err := getHttpsTransportWithCACert(log, pemCerts) + if err != nil { + log.Error(err, "Encountered error when processing custom ca bundle.") + return false + } + opts.Transport = tr + } + + minioClient, err := minio.New(endpoint, opts) if err != nil { log.Info(fmt.Sprintf("Could not connect to object storage endpoint: %s", endpoint)) return false @@ -92,6 +128,15 @@ var ConnectAndQueryObjStore = func(ctx context.Context, log logr.Logger, endpoin return true } } + + if util.IsX509UnknownAuthorityError(err) { + log.Error(err, "Encountered x509 UnknownAuthorityError when connecting to ObjectStore. "+ + "If using an tls S3 connection with self-signed certs, you may specify a custom CABundle "+ + "to mount on the DSP API Server via the DSPA cr under the spec.cABundle field. If you have already "+ + "provided a CABundle, verify the validity of the provided CABundle.") + return false + } + // Every other error means the endpoint in inaccessible, or the credentials provided do not have, at a minimum GetObject, permissions log.Info(fmt.Sprintf("Could not connect to (%s), Error: %s", endpoint, err.Error())) return false @@ -129,7 +174,7 @@ func (r *DSPAReconciler) isObjectStorageAccessible(ctx context.Context, dsp *dsp return false } - verified := ConnectAndQueryObjStore(ctx, log, endpoint, params.ObjectStorageConnection.Bucket, accesskey, secretkey, *params.ObjectStorageConnection.Secure) + verified := ConnectAndQueryObjStore(ctx, log, endpoint, params.ObjectStorageConnection.Bucket, accesskey, secretkey, *params.ObjectStorageConnection.Secure, params.APICustomPemCerts) if verified { log.Info("Object Storage Health Check Successful") } else { diff --git a/controllers/storage_test.go b/controllers/storage_test.go index 9706bc049..f2dfed106 100644 --- a/controllers/storage_test.go +++ b/controllers/storage_test.go @@ -90,6 +90,7 @@ func TestDeployStorage(t *testing.T) { assert.True(t, created) assert.Nil(t, err) } + func TestDontDeployStorage(t *testing.T) { testNamespace := "testnamespace" testDSPAName := "testdspa" @@ -188,7 +189,7 @@ func TestDefaultDeployBehaviorStorage(t *testing.T) { func TestIsDatabaseAccessibleTrue(t *testing.T) { // Override the live connection function with a mock version - ConnectAndQueryObjStore = func(ctx context.Context, log logr.Logger, endpoint, bucket string, accesskey, secretkey []byte, secure bool) bool { + ConnectAndQueryObjStore = func(ctx context.Context, log logr.Logger, endpoint, bucket string, accesskey, secretkey []byte, secure bool, pemCerts []byte) bool { return true } @@ -226,7 +227,7 @@ func TestIsDatabaseAccessibleTrue(t *testing.T) { func TestIsDatabaseNotAccessibleFalse(t *testing.T) { // Override the live connection function with a mock version - ConnectAndQueryObjStore = func(ctx context.Context, log logr.Logger, endpoint, bucket string, accesskey, secretkey []byte, secure bool) bool { + ConnectAndQueryObjStore = func(ctx context.Context, log logr.Logger, endpoint, bucket string, accesskey, secretkey []byte, secure bool, pemCerts []byte) bool { return false } @@ -264,7 +265,7 @@ func TestIsDatabaseNotAccessibleFalse(t *testing.T) { func TestDisabledHealthCheckReturnsTrue(t *testing.T) { // Override the live connection function with a mock version that would always return false if called - ConnectAndQueryObjStore = func(ctx context.Context, log logr.Logger, endpoint, bucket string, accesskey, secretkey []byte, secure bool) bool { + ConnectAndQueryObjStore = func(ctx context.Context, log logr.Logger, endpoint, bucket string, accesskey, secretkey []byte, secure bool, pemCerts []byte) bool { return false } @@ -304,7 +305,7 @@ func TestDisabledHealthCheckReturnsTrue(t *testing.T) { func TestIsDatabaseAccessibleBadAccessKey(t *testing.T) { // Override the live connection function with a mock version - ConnectAndQueryObjStore = func(ctx context.Context, log logr.Logger, endpoint, bucket string, accesskey, secretkey []byte, secure bool) bool { + ConnectAndQueryObjStore = func(ctx context.Context, log logr.Logger, endpoint, bucket string, accesskey, secretkey []byte, secure bool, pemCerts []byte) bool { return true } @@ -342,7 +343,7 @@ func TestIsDatabaseAccessibleBadAccessKey(t *testing.T) { func TestIsDatabaseAccessibleBadSecretKey(t *testing.T) { // Override the live connection function with a mock version - ConnectAndQueryObjStore = func(ctx context.Context, log logr.Logger, endpoint, bucket string, accesskey, secretkey []byte, secure bool) bool { + ConnectAndQueryObjStore = func(ctx context.Context, log logr.Logger, endpoint, bucket string, accesskey, secretkey []byte, secure bool, pemCerts []byte) bool { return true } @@ -440,3 +441,38 @@ func TestCreateCredentialProvidersChain(t *testing.T) { assert.Equal(t, test.expectedSigType, actualSigType) } } + +func TestGetHttpsTransportWithCACert(t *testing.T) { + validCert := ` +-----BEGIN CERTIFICATE----- +MIIDUTCCAjmgAwIBAgIINk8kYK1jtAYwDQYJKoZIhvcNAQELBQAwNjE0MDIGA1UE +Awwrb3BlbnNoaWZ0LXNlcnZpY2Utc2VydmluZy1zaWduZXJAMTY5NzQ4MDY4NjAe +Fw0yMzEwMTYxODI0NDVaFw0yNTEyMTQxODI0NDZaMDYxNDAyBgNVBAMMK29wZW5z +aGlmdC1zZXJ2aWNlLXNlcnZpbmctc2lnbmVyQDE2OTc0ODA2ODYwggEiMA0GCSqG +SIb3DQEBAQUAA4IBDwAwggEKAoIBAQDzSg9LmRucYyv9OUbMjbTGlvLFXl9+vsKd +rdZEq+jR5jr+lhxvU06rezHcTn7hXmm9g66YQhjfJ239VSh/YkQFqlaGY89lEtfr +fJzAkxpX0xmPhjAQ4fpsBs6LfkgC2v846oR2+gsI5hh5VuWNRS6BJlgRIQYUHBqM +p/d8QghkST1mheZKQZh4V9L1aB4Hgo4SCPNVGa/t0Q5sBZmlvC+6JqxsZW8miF/v +rs0oqm9dwhyAsTuLdDAD4bnLPXBQD7z+aq87uBNWcOrl0p/TdJy85lhE0dmbVKS6 +c21lQ4Va5JNje25fJmtEviFDAVXc/akMWSHf94ZfbWN8eah29oHNAgMBAAGjYzBh +MA4GA1UdDwEB/wQEAwICpDAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBRxBglm +SbrHzzijOCr6EQ2LOTZi5jAfBgNVHSMEGDAWgBRxBglmSbrHzzijOCr6EQ2LOTZi +5jANBgkqhkiG9w0BAQsFAAOCAQEAeENDkKOUebgjb5Jg3d0WLHjqF+xMofXo1Gvg +wkfrZ35hQTMOiFUAffyPRoMfZOJ5x4zUVPkXN1qjVe/oIc19EFgb7ppDXUTJDndu +4RfZCF/yim5C6vUFmPPHjbFxnJIo85pKWGLwGg79iTnExDYMUUg5pRfK1uNgfro9 +jEtEoP3F3YVZ8g75TF70Ad9AHPWD2c1D8xOI4XwFvyi5BJJ+jsChl1e3v8D07ohj +Em/2fyF49JL+vAPFMWRFpaExUr3gMbELo4YABQGg024d623LK0ienEF0p4jMVNbP +S9IA40yOaVHMI51Fr1i1EIWvP8oJY8rAPWq45JnfFen3tOqKfw== +-----END CERTIFICATE----- +` + _, _, reconciler := CreateNewTestObjects() + + transport, err := getHttpsTransportWithCACert(reconciler.Log, []byte(validCert)) + assert.Nil(t, err) + assert.NotNil(t, transport) + + invalidCert := "invalidCert" + transport, err = getHttpsTransportWithCACert(reconciler.Log, []byte(invalidCert)) + assert.NotNil(t, err) + assert.Nil(t, transport) +} diff --git a/controllers/suite_test.go b/controllers/suite_test.go index 16853901f..c8e9c2801 100644 --- a/controllers/suite_test.go +++ b/controllers/suite_test.go @@ -76,7 +76,7 @@ var _ = BeforeEach(func() { ConnectAndQueryDatabase = func(host string, port string, username string, password string, dbname string) bool { return true } - ConnectAndQueryObjStore = func(ctx context.Context, log logr.Logger, endpoint, bucket string, accesskey, secretkey []byte, secure bool) bool { + ConnectAndQueryObjStore = func(ctx context.Context, log logr.Logger, endpoint, bucket string, accesskey, secretkey []byte, secure bool, pemCerts []byte) bool { return true } }) diff --git a/controllers/testdata/declarative/case_0/expected/created/configmap_artifact_script.yaml b/controllers/testdata/declarative/case_0/expected/created/configmap_artifact_script.yaml index 5863fd2d1..cc7a02b1a 100644 --- a/controllers/testdata/declarative/case_0/expected/created/configmap_artifact_script.yaml +++ b/controllers/testdata/declarative/case_0/expected/created/configmap_artifact_script.yaml @@ -6,13 +6,20 @@ data: workspace_dir=$(echo $(context.taskRun.name) | sed -e "s/$(context.pipeline.name)-//g") workspace_dest=/workspace/${workspace_dir}/artifacts/$(context.pipelineRun.name)/$(context.taskRun.name) artifact_name=$(basename $2) + + aws_cp() { + + aws s3 --endpoint http://minio-testdsp0.default.svc.cluster.local:9000 cp $1.tgz s3://mlpipeline/artifacts/$PIPELINERUN/$PIPELINETASK/$1.tgz + + } + if [ -f "$workspace_dest/$artifact_name" ]; then echo sending to: ${workspace_dest}/${artifact_name} tar -cvzf $1.tgz -C ${workspace_dest} ${artifact_name} - aws s3 --endpoint http://minio-testdsp0.default.svc.cluster.local:9000 cp $1.tgz s3://mlpipeline/artifacts/$PIPELINERUN/$PIPELINETASK/$1.tgz + aws_cp $1 elif [ -f "$2" ]; then tar -cvzf $1.tgz -C $(dirname $2) ${artifact_name} - aws s3 --endpoint http://minio-testdsp0.default.svc.cluster.local:9000 cp $1.tgz s3://mlpipeline/artifacts/$PIPELINERUN/$PIPELINETASK/$1.tgz + aws_cp $1 else echo "$2 file does not exist. Skip artifact tracking for $1" fi diff --git a/controllers/testdata/declarative/case_2/expected/created/configmap_artifact_script.yaml b/controllers/testdata/declarative/case_2/expected/created/configmap_artifact_script.yaml index beb358966..ad0f15ce8 100644 --- a/controllers/testdata/declarative/case_2/expected/created/configmap_artifact_script.yaml +++ b/controllers/testdata/declarative/case_2/expected/created/configmap_artifact_script.yaml @@ -6,13 +6,20 @@ data: workspace_dir=$(echo $(context.taskRun.name) | sed -e "s/$(context.pipeline.name)-//g") workspace_dest=/workspace/${workspace_dir}/artifacts/$(context.pipelineRun.name)/$(context.taskRun.name) artifact_name=$(basename $2) + + aws_cp() { + + aws s3 --endpoint http://minio-testdsp2.default.svc.cluster.local:9000 cp $1.tgz s3://mlpipeline/artifacts/$PIPELINERUN/$PIPELINETASK/$1.tgz + + } + if [ -f "$workspace_dest/$artifact_name" ]; then echo sending to: ${workspace_dest}/${artifact_name} tar -cvzf $1.tgz -C ${workspace_dest} ${artifact_name} - aws s3 --endpoint http://minio-testdsp2.default.svc.cluster.local:9000 cp $1.tgz s3://mlpipeline/artifacts/$PIPELINERUN/$PIPELINETASK/$1.tgz + aws_cp $1 elif [ -f "$2" ]; then tar -cvzf $1.tgz -C $(dirname $2) ${artifact_name} - aws s3 --endpoint http://minio-testdsp2.default.svc.cluster.local:9000 cp $1.tgz s3://mlpipeline/artifacts/$PIPELINERUN/$PIPELINETASK/$1.tgz + aws_cp $1 else echo "$2 file does not exist. Skip artifact tracking for $1" fi diff --git a/controllers/testdata/declarative/case_3/expected/not_created/configmap_artifact_script.yaml b/controllers/testdata/declarative/case_3/expected/not_created/configmap_artifact_script.yaml index 2c1b87ec7..3c41745dc 100644 --- a/controllers/testdata/declarative/case_3/expected/not_created/configmap_artifact_script.yaml +++ b/controllers/testdata/declarative/case_3/expected/not_created/configmap_artifact_script.yaml @@ -3,9 +3,23 @@ data: somekey: |- #!/usr/bin/env sh push_artifact() { - if [ -f "$2" ]; then - tar -cvzf $1.tgz $2 - aws s3 --endpoint http://minio-testdsp3.default.svc.cluster.local:9000 cp $1.tgz s3://mlpipeline/artifacts/$PIPELINERUN/$PIPELINETASK/$1.tgz + workspace_dir=$(echo $(context.taskRun.name) | sed -e "s/$(context.pipeline.name)-//g") + workspace_dest=/workspace/${workspace_dir}/artifacts/$(context.pipelineRun.name)/$(context.taskRun.name) + artifact_name=$(basename $2) + + aws_cp() { + + aws s3 --endpoint http://minio-testdsp3.default.svc.cluster.local:9000 cp $1.tgz s3://mlpipeline/artifacts/$PIPELINERUN/$PIPELINETASK/$1.tgz + + } + + if [ -f "$workspace_dest/$artifact_name" ]; then + echo sending to: ${workspace_dest}/${artifact_name} + tar -cvzf $1.tgz -C ${workspace_dest} ${artifact_name} + aws_cp $1 + elif [ -f "$2" ]; then + tar -cvzf $1.tgz -C $(dirname $2) ${artifact_name} + aws_cp $1 else echo "$2 file does not exist. Skip artifact tracking for $1" fi diff --git a/controllers/testdata/declarative/case_4/expected/created/configmap_artifact_script.yaml b/controllers/testdata/declarative/case_4/expected/created/configmap_artifact_script.yaml index cc4ba319b..e0bddf319 100644 --- a/controllers/testdata/declarative/case_4/expected/created/configmap_artifact_script.yaml +++ b/controllers/testdata/declarative/case_4/expected/created/configmap_artifact_script.yaml @@ -6,13 +6,20 @@ data: workspace_dir=$(echo $(context.taskRun.name) | sed -e "s/$(context.pipeline.name)-//g") workspace_dest=/workspace/${workspace_dir}/artifacts/$(context.pipelineRun.name)/$(context.taskRun.name) artifact_name=$(basename $2) + + aws_cp() { + + aws s3 --endpoint http://minio-testdsp4.default.svc.cluster.local:9000 cp $1.tgz s3://mlpipeline/artifacts/$PIPELINERUN/$PIPELINETASK/$1.tgz + + } + if [ -f "$workspace_dest/$artifact_name" ]; then echo sending to: ${workspace_dest}/${artifact_name} tar -cvzf $1.tgz -C ${workspace_dest} ${artifact_name} - aws s3 --endpoint http://minio-testdsp4.default.svc.cluster.local:9000 cp $1.tgz s3://mlpipeline/artifacts/$PIPELINERUN/$PIPELINETASK/$1.tgz + aws_cp $1 elif [ -f "$2" ]; then tar -cvzf $1.tgz -C $(dirname $2) ${artifact_name} - aws s3 --endpoint http://minio-testdsp4.default.svc.cluster.local:9000 cp $1.tgz s3://mlpipeline/artifacts/$PIPELINERUN/$PIPELINETASK/$1.tgz + aws_cp $1 else echo "$2 file does not exist. Skip artifact tracking for $1" fi diff --git a/controllers/testdata/declarative/case_5/expected/created/configmap_artifact_script.yaml b/controllers/testdata/declarative/case_5/expected/created/configmap_artifact_script.yaml index e384c59cb..33aebad09 100644 --- a/controllers/testdata/declarative/case_5/expected/created/configmap_artifact_script.yaml +++ b/controllers/testdata/declarative/case_5/expected/created/configmap_artifact_script.yaml @@ -6,13 +6,20 @@ data: workspace_dir=$(echo $(context.taskRun.name) | sed -e "s/$(context.pipeline.name)-//g") workspace_dest=/workspace/${workspace_dir}/artifacts/$(context.pipelineRun.name)/$(context.taskRun.name) artifact_name=$(basename $2) + + aws_cp() { + + aws s3 --endpoint http://minio-testdsp5.default.svc.cluster.local:9000 cp $1.tgz s3://mlpipeline/artifacts/$PIPELINERUN/$PIPELINETASK/$1.tgz + + } + if [ -f "$workspace_dest/$artifact_name" ]; then echo sending to: ${workspace_dest}/${artifact_name} tar -cvzf $1.tgz -C ${workspace_dest} ${artifact_name} - aws s3 --endpoint http://minio-testdsp5.default.svc.cluster.local:9000 cp $1.tgz s3://mlpipeline/artifacts/$PIPELINERUN/$PIPELINETASK/$1.tgz + aws_cp $1 elif [ -f "$2" ]; then tar -cvzf $1.tgz -C $(dirname $2) ${artifact_name} - aws s3 --endpoint http://minio-testdsp5.default.svc.cluster.local:9000 cp $1.tgz s3://mlpipeline/artifacts/$PIPELINERUN/$PIPELINETASK/$1.tgz + aws_cp $1 else echo "$2 file does not exist. Skip artifact tracking for $1" fi diff --git a/controllers/testdata/declarative/case_6/config.yaml b/controllers/testdata/declarative/case_6/config.yaml new file mode 100644 index 000000000..35a3506f9 --- /dev/null +++ b/controllers/testdata/declarative/case_6/config.yaml @@ -0,0 +1,12 @@ +# When a minimal DSPA is deployed +Images: + ApiServer: api-server:test6 + Artifact: artifact-manager:test6 + PersistentAgent: persistenceagent:test6 + ScheduledWorkflow: scheduledworkflow:test6 + Cache: ubi-minimal:test6 + MoveResultsImage: busybox:test6 + MlPipelineUI: frontend:test6 + MariaDB: mariadb:test6 + Minio: minio:test6 + OAuthProxy: oauth-proxy:test6 diff --git a/controllers/testdata/declarative/case_6/deploy/00_configmap.yaml b/controllers/testdata/declarative/case_6/deploy/00_configmap.yaml new file mode 100644 index 000000000..0b36acbab --- /dev/null +++ b/controllers/testdata/declarative/case_6/deploy/00_configmap.yaml @@ -0,0 +1,6 @@ +kind: ConfigMap +apiVersion: v1 +metadata: + name: testcabundleconfigmap6 +data: + testcabundleconfigmapkey6.crt: testcabundleconfigmapvalue6 diff --git a/controllers/testdata/declarative/case_6/deploy/01_cr.yaml b/controllers/testdata/declarative/case_6/deploy/01_cr.yaml new file mode 100644 index 000000000..a3e909ecd --- /dev/null +++ b/controllers/testdata/declarative/case_6/deploy/01_cr.yaml @@ -0,0 +1,27 @@ +apiVersion: datasciencepipelinesapplications.opendatahub.io/v1alpha1 +kind: DataSciencePipelinesApplication +metadata: + name: testdsp6 +spec: + apiServer: + deploy: true + enableSamplePipeline: false + cABundle: + configMapName: testcabundleconfigmap6 + configMapKey: testcabundleconfigmapkey6.crt + persistenceAgent: + deploy: false + scheduledWorkflow: + deploy: false + mlpipelineUI: + deploy: false + image: frontend:test0 + database: + mariaDB: + deploy: false + objectStorage: + minio: + deploy: false + image: minio:test0 + mlmd: + deploy: false diff --git a/controllers/testdata/declarative/case_6/expected/created/apiserver_deployment.yaml b/controllers/testdata/declarative/case_6/expected/created/apiserver_deployment.yaml new file mode 100644 index 000000000..96c5967e8 --- /dev/null +++ b/controllers/testdata/declarative/case_6/expected/created/apiserver_deployment.yaml @@ -0,0 +1,206 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: ds-pipeline-testdsp6 + namespace: default + labels: + app: ds-pipeline-testdsp6 + component: data-science-pipelines + dspa: testdsp6 +spec: + selector: + matchLabels: + app: ds-pipeline-testdsp6 + component: data-science-pipelines + dspa: testdsp6 + template: + metadata: + labels: + app: ds-pipeline-testdsp6 + component: data-science-pipelines + dspa: testdsp6 + spec: + containers: + - env: + - name: POD_NAMESPACE + value: "default" + - name: DBCONFIG_USER + value: "mlpipeline" + - name: DBCONFIG_PASSWORD + valueFrom: + secretKeyRef: + key: "password" + name: "ds-pipeline-db-testdsp6" + - name: DBCONFIG_DBNAME + value: "mlpipeline" + - name: DBCONFIG_HOST + value: "mariadb-testdsp6.default.svc.cluster.local" + - name: DBCONFIG_PORT + value: "3306" + - name: ARTIFACT_BUCKET + value: "mlpipeline" + - name: ARTIFACT_ENDPOINT + value: "http://minio-testdsp6.default.svc.cluster.local:9000" + - name: ARTIFACT_SCRIPT + valueFrom: + configMapKeyRef: + key: "artifact_script" + name: "ds-pipeline-artifact-script-testdsp6" + - name: ARTIFACT_IMAGE + value: "artifact-manager:test6" + - name: ARCHIVE_LOGS + value: "false" + - name: ARTIFACT_COPY_STEP_CABUNDLE_CONFIGMAP_NAME + value: testcabundleconfigmap6 + - name: ARTIFACT_COPY_STEP_CABUNDLE_CONFIGMAP_KEY + value: testcabundleconfigmapkey6.crt + - name: ARTIFACT_COPY_STEP_CABUNDLE_MOUNTPATH + value: /etc/pki/tls/certs + - name: TRACK_ARTIFACTS + value: "true" + - name: STRIP_EOF + value: "true" + - name: PIPELINE_RUNTIME + value: "tekton" + - name: DEFAULTPIPELINERUNNERSERVICEACCOUNT + value: "pipeline-runner-testdsp6" + - name: INJECT_DEFAULT_SCRIPT + value: "true" + - name: APPLY_TEKTON_CUSTOM_RESOURCE + value: "true" + - name: TERMINATE_STATUS + value: "Cancelled" + - name: AUTO_UPDATE_PIPELINE_DEFAULT_VERSION + value: "true" + - name: DBCONFIG_CONMAXLIFETIMESEC + value: "120" + - name: ML_PIPELINE_VISUALIZATIONSERVER_SERVICE_HOST + value: "ds-pipeline-visualizationserver" + - name: ML_PIPELINE_VISUALIZATIONSERVER_SERVICE_PORT + value: "8888" + - name: OBJECTSTORECONFIG_BUCKETNAME + value: "mlpipeline" + - name: OBJECTSTORECONFIG_ACCESSKEY + valueFrom: + secretKeyRef: + key: "accesskey" + name: "mlpipeline-minio-artifact" + - name: OBJECTSTORECONFIG_SECRETACCESSKEY + valueFrom: + secretKeyRef: + key: "secretkey" + name: "mlpipeline-minio-artifact" + - name: OBJECTSTORECONFIG_SECURE + value: "false" + - name: MINIO_SERVICE_SERVICE_HOST + value: "minio-testdsp6.default.svc.cluster.local" + - name: MINIO_SERVICE_SERVICE_PORT + value: "9000" + - name: CACHE_IMAGE + value: "ubi-minimal:test6" + - name: MOVERESULTS_IMAGE + value: "busybox:test6" + image: api-server:test6 + imagePullPolicy: Always + name: ds-pipeline-api-server + ports: + - containerPort: 8888 + name: http + protocol: TCP + - containerPort: 8887 + name: grpc + protocol: TCP + livenessProbe: + exec: + command: + - wget + - -q + - -S + - -O + - '-' + - http://localhost:8888/apis/v1beta1/healthz + initialDelaySeconds: 3 + periodSeconds: 5 + timeoutSeconds: 2 + readinessProbe: + exec: + command: + - wget + - -q + - -S + - -O + - '-' + - http://localhost:8888/apis/v1beta1/healthz + initialDelaySeconds: 3 + periodSeconds: 5 + timeoutSeconds: 2 + resources: + requests: + cpu: 250m + memory: 500Mi + limits: + cpu: 500m + memory: 1Gi + volumeMounts: + - name: ca-bundle + mountPath: /etc/pki/tls/certs + - name: oauth-proxy + args: + - --https-address=:8443 + - --provider=openshift + - --openshift-service-account=ds-pipeline-testdsp6 + - --upstream=http://localhost:8888 + - --tls-cert=/etc/tls/private/tls.crt + - --tls-key=/etc/tls/private/tls.key + - --cookie-secret=SECRET + - '--openshift-delegate-urls={"/": {"group":"route.openshift.io","resource":"routes","verb":"get","name":"ds-pipeline-testdsp6","namespace":"default"}}' + - '--openshift-sar={"namespace":"default","resource":"routes","resourceName":"ds-pipeline-testdsp6","verb":"get","resourceAPIGroup":"route.openshift.io"}' + - --skip-auth-regex='(^/metrics|^/apis/v1beta1/healthz)' + image: oauth-proxy:test6 + ports: + - containerPort: 8443 + name: oauth + protocol: TCP + livenessProbe: + httpGet: + path: /oauth/healthz + port: oauth + scheme: HTTPS + initialDelaySeconds: 30 + timeoutSeconds: 1 + periodSeconds: 5 + successThreshold: 1 + failureThreshold: 3 + readinessProbe: + httpGet: + path: /oauth/healthz + port: oauth + scheme: HTTPS + initialDelaySeconds: 5 + timeoutSeconds: 1 + periodSeconds: 5 + successThreshold: 1 + failureThreshold: 3 + resources: + limits: + cpu: 100m + memory: 256Mi + requests: + cpu: 100m + memory: 256Mi + volumeMounts: + - mountPath: /etc/tls/private + name: proxy-tls + volumes: + - name: proxy-tls + secret: + secretName: ds-pipelines-proxy-tls-testdsp6 + defaultMode: 420 + - name: ca-bundle + configMap: + name: testcabundleconfigmap6 + items: + - key: testcabundleconfigmapkey6.crt + path: testcabundleconfigmapkey6.crt + defaultMode: 420 + serviceAccountName: ds-pipeline-testdsp6 diff --git a/controllers/testdata/declarative/case_6/expected/created/configmap_artifact_script.yaml b/controllers/testdata/declarative/case_6/expected/created/configmap_artifact_script.yaml new file mode 100644 index 000000000..2cbb8402e --- /dev/null +++ b/controllers/testdata/declarative/case_6/expected/created/configmap_artifact_script.yaml @@ -0,0 +1,42 @@ +apiVersion: v1 +data: + artifact_script: |- + #!/usr/bin/env sh + push_artifact() { + workspace_dir=$(echo $(context.taskRun.name) | sed -e "s/$(context.pipeline.name)-//g") + workspace_dest=/workspace/${workspace_dir}/artifacts/$(context.pipelineRun.name)/$(context.taskRun.name) + artifact_name=$(basename $2) + + aws_cp() { + + aws s3 --endpoint http://minio-testdsp6.default.svc.cluster.local:9000 --ca-bundle /etc/pki/tls/certs/testcabundleconfigmapkey6.crt cp $1.tgz s3://mlpipeline/artifacts/$PIPELINERUN/$PIPELINETASK/$1.tgz + + } + + if [ -f "$workspace_dest/$artifact_name" ]; then + echo sending to: ${workspace_dest}/${artifact_name} + tar -cvzf $1.tgz -C ${workspace_dest} ${artifact_name} + aws_cp $1 + elif [ -f "$2" ]; then + tar -cvzf $1.tgz -C $(dirname $2) ${artifact_name} + aws_cp $1 + else + echo "$2 file does not exist. Skip artifact tracking for $1" + fi + } + push_log() { + cat /var/log/containers/$PODNAME*$NAMESPACE*step-main*.log > step-main.log + push_artifact main-log step-main.log + } + strip_eof() { + if [ -f "$2" ]; then + awk 'NF' $2 | head -c -1 > $1_temp_save && cp $1_temp_save $2 + fi + } +kind: ConfigMap +metadata: + name: ds-pipeline-artifact-script-testdsp6 + namespace: default + labels: + app: ds-pipeline-testdsp5 + component: data-science-pipelines diff --git a/controllers/util/util.go b/controllers/util/util.go index 4e86338fc..555ce0938 100644 --- a/controllers/util/util.go +++ b/controllers/util/util.go @@ -17,8 +17,17 @@ limitations under the License. package util import ( + "context" + "crypto/x509" + "fmt" + "github.com/go-logr/logr" appsv1 "k8s.io/api/apps/v1" + v1 "k8s.io/api/core/v1" + apierrs "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "net/url" + "sigs.k8s.io/controller-runtime/pkg/client" ) // GetConditionByType returns condition of type condType if it exists in conditions, otherwise @@ -44,3 +53,35 @@ func GetDeploymentCondition(status appsv1.DeploymentStatus, condType appsv1.Depl func BoolPointer(b bool) *bool { return &b } + +// IsX509UnknownAuthorityError checks whether an error is of type x509.UnknownAuthorityError. +func IsX509UnknownAuthorityError(err error) bool { + urlErr, ok := err.(*url.Error) + if !ok { + return false + } + _, ok = urlErr.Err.(x509.UnknownAuthorityError) + return ok +} + +// GetConfigMapValue fetches the value for the provided configmap mapped to a given key +func GetConfigMapValue(ctx context.Context, cfgKey, cfgName, ns string, client client.Client, log logr.Logger) (error, string) { + cfgMap := &v1.ConfigMap{} + namespacedName := types.NamespacedName{ + Name: cfgName, + Namespace: ns, + } + err := client.Get(ctx, namespacedName, cfgMap) + if err != nil && apierrs.IsNotFound(err) { + log.Error(err, fmt.Sprintf("ConfigMap [%s] was not found in namespace [%s]", cfgName, ns)) + return err, "" + } else if err != nil { + log.Error(err, fmt.Sprintf("Encountered error when attempting to fetch ConfigMap. [%s]..", cfgName)) + return err, "" + } + if val, ok := cfgMap.Data[cfgKey]; ok { + return nil, val + } else { + return fmt.Errorf("ConfigMap %s sdoes not contain specified key %s", cfgName, cfgKey), "" + } +}