From 4403aa432aa4982157a4203bdb77b18754be8070 Mon Sep 17 00:00:00 2001 From: Tristan Holaday <40547442+TristanHoladay@users.noreply.github.com> Date: Wed, 6 Nov 2024 12:56:25 -0700 Subject: [PATCH] feat: switch currently viewed cluster (#499) --- .github/workflows/pr-tests.yaml | 2 +- src/pkg/api/docs/docs.go | 62 +++++++ src/pkg/api/docs/swagger.json | 62 +++++++ src/pkg/api/docs/swagger.yaml | 39 +++++ src/pkg/api/handlers.go | 20 +++ src/pkg/api/start.go | 12 +- src/pkg/k8s/client/client.go | 106 ++++++++++-- src/pkg/k8s/session/session.go | 120 ++++++++++--- src/pkg/k8s/session/session_test.go | 34 ++-- src/pkg/stream/common.go | 3 +- src/test/e2e/api_test.go | 37 ++++ tasks/setup.yaml | 6 +- tasks/test.yaml | 12 +- ui/package.json | 5 +- ui/playwright.config.ts | 12 +- ui/src/lib/features/navigation/index.ts | 4 +- .../navbar/clustermenu/component.svelte | 105 ++++++++++++ .../navbar/clustermenu/component.test.ts | 162 ++++++++++++++++++ .../navbar/clustermenu/loading.svelte | 41 +++++ .../navigation/navbar/clustermenu/store.ts | 20 +++ .../navigation/navbar/component.svelte | 7 + .../utils/cluster-check/cluster-check.test.ts | 21 +++ .../lib/utils/cluster-check/cluster-check.ts | 7 + ui/src/routes/+layout.svelte | 15 +- ui/src/routes/+layout.ts | 8 + .../{comonent.test.ts => component.test.ts} | 0 .../{comonent.test.ts => component.test.ts} | 0 ui/tests/connections.spec.ts | 120 +++++++++++++ ui/tests/reconnect.spec.ts | 51 ------ 29 files changed, 960 insertions(+), 133 deletions(-) create mode 100644 ui/src/lib/features/navigation/navbar/clustermenu/component.svelte create mode 100644 ui/src/lib/features/navigation/navbar/clustermenu/component.test.ts create mode 100644 ui/src/lib/features/navigation/navbar/clustermenu/loading.svelte create mode 100644 ui/src/lib/features/navigation/navbar/clustermenu/store.ts rename ui/src/routes/monitor/pepr/[[stream]]/(details)/denied-details/{comonent.test.ts => component.test.ts} (100%) rename ui/src/routes/monitor/pepr/[[stream]]/(details)/mutated-details/{comonent.test.ts => component.test.ts} (100%) create mode 100644 ui/tests/connections.spec.ts delete mode 100644 ui/tests/reconnect.spec.ts diff --git a/.github/workflows/pr-tests.yaml b/.github/workflows/pr-tests.yaml index 829584186..be08a3dd6 100644 --- a/.github/workflows/pr-tests.yaml +++ b/.github/workflows/pr-tests.yaml @@ -31,7 +31,7 @@ jobs: runs-on: ubuntu-latest strategy: matrix: - type: [unit, e2e, e2e-in-cluster, api, local-auth, e2e-reconnect] + type: [unit, e2e, e2e-in-cluster, api, local-auth, e2e-connections] steps: - name: Checkout uses: actions/checkout@eef61447b9ff4aafe5dcd4e0bbf5d482be7e7871 # v4.2.1 diff --git a/src/pkg/api/docs/docs.go b/src/pkg/api/docs/docs.go index 39527315c..b026e6254 100644 --- a/src/pkg/api/docs/docs.go +++ b/src/pkg/api/docs/docs.go @@ -28,6 +28,52 @@ const docTemplate = `{ } } }, + "/api/v1/cluster": { + "post": { + "description": "Switch the currently viewed cluster", + "consumes": [ + "application/json" + ], + "tags": [ + "cluster" + ], + "parameters": [ + { + "description": "example body: {", + "name": "request", + "in": "body", + "required": true, + "schema": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/client.ClusterInfo" + } + } + } + ], + "responses": { + "200": { + "description": "OK" + } + } + } + }, + "/api/v1/clusters": { + "get": { + "description": "get list of clusters with context names from local kubeconfig", + "produces": [ + "application/json" + ], + "tags": [ + "clusters" + ], + "responses": { + "200": { + "description": "OK" + } + } + } + }, "/api/v1/resources/cluster-ops/hpas": { "get": { "description": "Get HPAs", @@ -3213,6 +3259,22 @@ const docTemplate = `{ } } } + }, + "definitions": { + "client.ClusterInfo": { + "type": "object", + "properties": { + "context": { + "type": "string" + }, + "name": { + "type": "string" + }, + "selected": { + "type": "boolean" + } + } + } } }` diff --git a/src/pkg/api/docs/swagger.json b/src/pkg/api/docs/swagger.json index 7cdb68b81..8715a0b67 100644 --- a/src/pkg/api/docs/swagger.json +++ b/src/pkg/api/docs/swagger.json @@ -17,6 +17,52 @@ } } }, + "/api/v1/cluster": { + "post": { + "description": "Switch the currently viewed cluster", + "consumes": [ + "application/json" + ], + "tags": [ + "cluster" + ], + "parameters": [ + { + "description": "example body: {", + "name": "request", + "in": "body", + "required": true, + "schema": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/client.ClusterInfo" + } + } + } + ], + "responses": { + "200": { + "description": "OK" + } + } + } + }, + "/api/v1/clusters": { + "get": { + "description": "get list of clusters with context names from local kubeconfig", + "produces": [ + "application/json" + ], + "tags": [ + "clusters" + ], + "responses": { + "200": { + "description": "OK" + } + } + } + }, "/api/v1/resources/cluster-ops/hpas": { "get": { "description": "Get HPAs", @@ -3202,5 +3248,21 @@ } } } + }, + "definitions": { + "client.ClusterInfo": { + "type": "object", + "properties": { + "context": { + "type": "string" + }, + "name": { + "type": "string" + }, + "selected": { + "type": "boolean" + } + } + } } } \ No newline at end of file diff --git a/src/pkg/api/docs/swagger.yaml b/src/pkg/api/docs/swagger.yaml index dcdbf11d9..cf7ecbce3 100644 --- a/src/pkg/api/docs/swagger.yaml +++ b/src/pkg/api/docs/swagger.yaml @@ -1,3 +1,13 @@ +definitions: + client.ClusterInfo: + properties: + context: + type: string + name: + type: string + selected: + type: boolean + type: object info: contact: {} paths: @@ -10,6 +20,35 @@ paths: description: OK tags: - auth + /api/v1/cluster: + post: + consumes: + - application/json + description: Switch the currently viewed cluster + parameters: + - description: 'example body: {' + in: body + name: request + required: true + schema: + additionalProperties: + $ref: '#/definitions/client.ClusterInfo' + type: object + responses: + "200": + description: OK + tags: + - cluster + /api/v1/clusters: + get: + description: get list of clusters with context names from local kubeconfig + produces: + - application/json + responses: + "200": + description: OK + tags: + - clusters /api/v1/resources/cluster-ops/hpas: get: consumes: diff --git a/src/pkg/api/handlers.go b/src/pkg/api/handlers.go index b475e2f3d..f5cb1b5db 100644 --- a/src/pkg/api/handlers.go +++ b/src/pkg/api/handlers.go @@ -13,6 +13,7 @@ import ( _ "github.com/defenseunicorns/uds-runtime/src/pkg/api/docs" //nolint:staticcheck "github.com/defenseunicorns/uds-runtime/src/pkg/api/resources" "github.com/defenseunicorns/uds-runtime/src/pkg/api/rest" + "github.com/defenseunicorns/uds-runtime/src/pkg/k8s/client" "github.com/defenseunicorns/uds-runtime/src/pkg/k8s/session" ) @@ -965,3 +966,22 @@ func healthz(w http.ResponseWriter, _ *http.Request) { http.Error(w, "Internal Server Error", http.StatusInternalServerError) } } + +// @Description get list of clusters with context names from local kubeconfig +// @Tags clusters +// @Produce json +// @Success 200 +// @Router /api/v1/clusters [get] +func getClusters(ks *session.K8sSession) func(w http.ResponseWriter, r *http.Request) { + return client.ServeClusters(ks.InCluster, ks.CurrentCtx) +} + +// @Description Switch the currently viewed cluster +// @Tags cluster +// @Param request body map[string]client.ClusterInfo true "example body: {"cluster": {"name": string, "context": string, "selected": bool}}" +// @Accept json +// @Success 200 +// @Router /api/v1/cluster [post] +func switchCluster(ks *session.K8sSession) func(w http.ResponseWriter, r *http.Request) { + return ks.Switch() +} diff --git a/src/pkg/api/start.go b/src/pkg/api/start.go index cb1435aa0..0df2336a8 100644 --- a/src/pkg/api/start.go +++ b/src/pkg/api/start.go @@ -36,7 +36,6 @@ var inAirgap bool // @BasePath /api/v1 // @schemes http https func Setup(assets *embed.FS) (*chi.Mux, bool, error) { - // Create a k8s session k8sSession, err := session.CreateK8sSession() if err != nil { return nil, false, fmt.Errorf("failed to setup k8s session: %w", err) @@ -45,7 +44,6 @@ func Setup(assets *embed.FS) (*chi.Mux, bool, error) { inCluster := k8sSession.InCluster if !inCluster { - // Start the cluster monitoring goroutine go k8sSession.StartClusterMonitoring() } @@ -66,10 +64,12 @@ func Setup(assets *embed.FS) (*chi.Mux, bool, error) { r.Get("/cluster-check", checkClusterConnection(k8sSession)) r.Route("/api/v1", func(r chi.Router) { r.Get("/auth", authHandler) + r.Get("/clusters", withLatestSession(k8sSession, getClusters)) + r.Post("/cluster", switchCluster(k8sSession)) r.Route("/monitor", func(r chi.Router) { r.Get("/pepr/", monitor.Pepr) r.Get("/pepr/{stream}", monitor.Pepr) - r.Get("/cluster-overview", monitor.BindClusterOverviewHandler(k8sSession.Cache)) + r.Get("/cluster-overview", withLatestCache(k8sSession, monitor.BindClusterOverviewHandler)) }) r.Route("/resources", func(r chi.Router) { @@ -307,3 +307,9 @@ func withLatestCache(k8sSession *session.K8sSession, handler func(cache *resourc handler(k8sSession.Cache)(w, r) } } + +func withLatestSession(k8sSession *session.K8sSession, handler func(k8sSession *session.K8sSession) func(w http.ResponseWriter, r *http.Request)) http.HandlerFunc { + return func(w http.ResponseWriter, r *http.Request) { + handler(k8sSession)(w, r) + } +} diff --git a/src/pkg/k8s/client/client.go b/src/pkg/k8s/client/client.go index 79857b851..9e54edf11 100644 --- a/src/pkg/k8s/client/client.go +++ b/src/pkg/k8s/client/client.go @@ -4,11 +4,15 @@ package client import ( + "encoding/json" "fmt" + "log/slog" + "net/http" "k8s.io/client-go/kubernetes" "k8s.io/client-go/rest" "k8s.io/client-go/tools/clientcmd" + clientCmdAPI "k8s.io/client-go/tools/clientcmd/api" metricsv "k8s.io/metrics/pkg/client/clientset/versioned" ) @@ -19,11 +23,22 @@ type Clients struct { Config *rest.Config } -// NewClient creates new Kubernetes cluster clients -func NewClient() (*Clients, error) { +type ClusterInfo struct { + Name string `json:"name"` + Context string `json:"context"` + Selected bool `json:"selected"` +} + +var ServerCheck = func(k8sClient *Clients) error { + _, err := k8sClient.Clientset.ServerVersion() + return err +} + +// New creates a Kubernetes cluster client with the option of overriding the default config +func New(overrides *clientcmd.ConfigOverrides) (*Clients, error) { config, err := clientcmd.NewNonInteractiveDeferredLoadingClientConfig( clientcmd.NewDefaultClientConfigLoadingRules(), - &clientcmd.ConfigOverrides{}).ClientConfig() + overrides).ClientConfig() if err != nil { return nil, err @@ -46,24 +61,22 @@ func NewClient() (*Clients, error) { }, nil } -// IsRunningInCluster checks if the application is running in cluster -func IsRunningInCluster() (bool, error) { - _, err := rest.InClusterConfig() +func rawConfig() (clientCmdAPI.Config, error) { + rules := clientcmd.NewDefaultClientConfigLoadingRules() + return clientcmd.NewNonInteractiveDeferredLoadingClientConfig(rules, &clientcmd.ConfigOverrides{}).RawConfig() +} - if err == rest.ErrNotInCluster { - return false, nil - } else if err != nil { - return true, err +func Contexts() (map[string]*clientCmdAPI.Context, error) { + config, err := rawConfig() + if err != nil { + return nil, err } - - return true, nil + return config.Contexts, nil } -// Declare GetCurrentContext as a variable so it can be mocked -var GetCurrentContext = func() (string, string, error) { - // Actual implementation of GetCurrentContext - rules := clientcmd.NewDefaultClientConfigLoadingRules() - config, err := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(rules, &clientcmd.ConfigOverrides{}).RawConfig() +// CurrentContext returns the current context defined in the kubeconfig +func CurrentContext() (string, string, error) { + config, err := rawConfig() if err != nil { return "", "", err } @@ -74,3 +87,62 @@ var GetCurrentContext = func() (string, string, error) { } return contextName, context.Cluster, nil } + +func ServeClusters(inCluster bool, currentContext string) http.HandlerFunc { + return func(w http.ResponseWriter, _ *http.Request) { + slog.Debug("ServeClusters called", "inCluster", inCluster, "currentContext", currentContext) + w.Header().Set("Content-Type", "application/json; charset=utf-8") + w.Header().Set("Cache-Control", "no-cache") + + // running in cluster return [] + if inCluster { + data, err := json.Marshal([]interface{}{}) + if err != nil { + slog.Error("Failed to marshal empty data", "error", err) + http.Error(w, "JSON marshalling error", http.StatusInternalServerError) + return + } + + // Write the data to the response + if _, err := w.Write(data); err != nil { + http.Error(w, "Failed to write data", http.StatusInternalServerError) + } + + return + } + + contexts, err := Contexts() + if err != nil { + slog.Error("Failed to get contexts", "error", err) + http.Error(w, "Failed to get contexts", http.StatusInternalServerError) + return + } + + var clusters []ClusterInfo + for ctxName, context := range contexts { + clusters = append(clusters, ClusterInfo{Name: context.Cluster, Context: ctxName, Selected: currentContext == ctxName}) + } + + data, err := json.Marshal(clusters) + if err != nil { + slog.Error("Failed to marshal contexts", "error", err) + http.Error(w, "JSON marshalling error", http.StatusInternalServerError) + return + } + + // Write the data to the response + if _, err := w.Write(data); err != nil { + http.Error(w, "Failed to write data", http.StatusInternalServerError) + } + } +} + +// IsRunningInCluster checks if the application is running in cluster +func IsRunningInCluster() (bool, error) { + _, err := rest.InClusterConfig() + + if err == rest.ErrNotInCluster { + return false, nil + } + return true, err +} diff --git a/src/pkg/k8s/session/session.go b/src/pkg/k8s/session/session.go index fbffa448a..7bf4403d1 100644 --- a/src/pkg/k8s/session/session.go +++ b/src/pkg/k8s/session/session.go @@ -5,8 +5,9 @@ package session import ( "context" + "encoding/json" "fmt" - "log" + "log/slog" "net/http" "os" "strconv" @@ -15,6 +16,7 @@ import ( "github.com/defenseunicorns/uds-runtime/src/pkg/api/resources" "github.com/defenseunicorns/uds-runtime/src/pkg/api/rest" "github.com/defenseunicorns/uds-runtime/src/pkg/k8s/client" + "k8s.io/client-go/tools/clientcmd" ) type K8sSession struct { @@ -30,14 +32,14 @@ type K8sSession struct { createClient createClient } -type createClient func() (*client.Clients, error) +type createClient func(overrides *clientcmd.ConfigOverrides) (*client.Clients, error) type createCache func(ctx context.Context, client *client.Clients) (*resources.Cache, error) var lastStatus string // CreateK8sSession creates a new k8s session func CreateK8sSession() (*K8sSession, error) { - k8sClient, err := client.NewClient() + k8sClient, err := client.New(&clientcmd.ConfigOverrides{}) if err != nil { return nil, fmt.Errorf("failed to create k8s client: %w", err) } @@ -57,7 +59,8 @@ func CreateK8sSession() (*K8sSession, error) { var currentCtx, currentCluster string if !inCluster { // get the current context and cluster - currentCtx, currentCluster, err = client.GetCurrentContext() + currentCtx, currentCluster, err = client.CurrentContext() + slog.Info("Current context", "context", currentCtx, "cluster", currentCluster) if err != nil { cancel() return nil, fmt.Errorf("failed to get current context: %w", err) @@ -74,7 +77,7 @@ func CreateK8sSession() (*K8sSession, error) { Status: make(chan string), ready: true, createCache: resources.NewCache, - createClient: client.NewClient, + createClient: client.New, } return session, nil @@ -98,7 +101,7 @@ func (ks *K8sSession) StartClusterMonitoring() { defer ticker.Stop() // Initial cluster health check - _, err := ks.Clients.Clientset.ServerVersion() + err := client.ServerCheck(ks.Clients) handleConnStatus(ks, err) for range ticker.C { @@ -106,46 +109,47 @@ func (ks *K8sSession) StartClusterMonitoring() { if !ks.ready { continue } - _, err := ks.Clients.Clientset.ServerVersion() + err := client.ServerCheck(ks.Clients) handleConnStatus(ks, err) } } // HandleReconnection infinitely retries to re-create the client and cache of the formerly connected cluster func (ks *K8sSession) HandleReconnection() { - log.Println("Disconnected error received") - + // slog.Info("Disconnected error received, attempting to reconnect to cluster") // Set ready to false to block cluster check ticker ks.ready = false + // Cancel the previous context + ks.Cancel() for { - // Cancel the previous context - ks.Cancel() time.Sleep(getRetryInterval()) - currentCtx, currentCluster, err := client.GetCurrentContext() - if err != nil { - log.Printf("Error fetching current context: %v\n", err) - continue + // Break out of reconnection loop if switch to a new cluster occurs + if lastStatus == "switched" { + slog.Info("Switched to a new cluster, breaking out of reconnection loop") + break } - // If the current context or cluster is different from the original, skip reconnection - if currentCtx != ks.CurrentCtx || currentCluster != ks.CurrentCluster { - log.Println("Current context has changed. Skipping reconnection.") + k8sClient, err := ks.createClient(&clientcmd.ConfigOverrides{CurrentContext: ks.CurrentCtx}) + if err != nil { + slog.Debug("Retrying to create k8s client") continue } - k8sClient, err := ks.createClient() + // Ping server before recreating cache in case client was created but server is still unreachable. + // This avoids getting stuck in the cache.WaithForCacheSync() call that polls indefinitely and does not error out immediately. + err = client.ServerCheck(k8sClient) if err != nil { - log.Printf("Retrying to create k8s client: %v\n", err) + slog.Debug("Failed to ping server on new client instance", "error", err) continue } - // Create a new context and cache ctx, cancel := context.WithCancel(context.Background()) cache, err := ks.createCache(ctx, k8sClient) if err != nil { - log.Printf("Retrying to create cache: %v\n", err) + slog.Debug("Retrying to create cache") + cancel() continue } @@ -157,8 +161,8 @@ func (ks *K8sSession) HandleReconnection() { // immediately send success status to client now that cache is recreated ks.Status <- "success" lastStatus = "success" - log.Println("Successfully reconnected to cluster and recreated cache") + slog.Info("Successfully reconnected to cluster and recreated cache") break } } @@ -212,3 +216,73 @@ func (ks *K8sSession) ServeConnStatus() http.HandlerFunc { } } } + +// Switch returns a handler function that switches the current cluster and updates the K8sSession +func (ks *K8sSession) Switch() http.HandlerFunc { + return func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json; charset=utf-8") + w.Header().Set("Cache-Control", "no-cache") + + if ks.InCluster { + http.Error(w, "cannot switch clusters while in-cluster", http.StatusBadRequest) + return + } + + // Set ready to false to block cluster check ticker since switching + ks.ready = false + + defer r.Body.Close() + + var body map[string]client.ClusterInfo + if err := json.NewDecoder(r.Body).Decode(&body); err != nil { + http.Error(w, "invalid request format", http.StatusBadRequest) + return + } + + targetCluster := body["cluster"] + ks.CurrentCtx = targetCluster.Context + ks.CurrentCluster = targetCluster.Name + + slog.Info("Switching cluster", "clusterInfo", targetCluster) + + // Cancel the previous context and informers + ks.Cancel() + + targetClients, err := client.New(&clientcmd.ConfigOverrides{CurrentContext: targetCluster.Context}) + if err != nil { + http.Error(w, fmt.Sprintf("failed to create client: %v", err), http.StatusInternalServerError) + return + } + ks.Clients = targetClients + + // Ping server before recreating cache in case client was created but server is still unreachable. + // This avoids getting stuck in the cache.WaithForCacheSync() call that polls indefinitely and does not error out immediately. + err = client.ServerCheck(targetClients) + if err != nil { + http.Error(w, fmt.Sprintf("failed to connect: %v", err), http.StatusInternalServerError) + return + } + + ctx, cancel := context.WithCancel(context.Background()) + ks.Cancel = cancel + targetCache, err := ks.createCache(ctx, targetClients) + if err != nil { + http.Error(w, fmt.Sprintf("failed to create cache: %v", err), http.StatusInternalServerError) + ks.Cancel() + return + } + ks.Cache = targetCache + // informer the client that the switch was successful + ks.Status <- "switched" + // In case switch occurs during reconnection attempts to previous cluster, we need to break out of the reconnection loop + // this has to be done before updating the K8sSession.CurrentCtx + lastStatus = "switched" + ks.ready = true + + slog.Info("Successfully switched cluster", "context", targetCluster.Context, "cluster", targetCluster.Name) + + if _, err := w.Write([]byte("Cluster switched successfully")); err != nil { + http.Error(w, "Failed to write data", http.StatusInternalServerError) + } + } +} diff --git a/src/pkg/k8s/session/session_test.go b/src/pkg/k8s/session/session_test.go index 5f1c04926..8b82366a6 100644 --- a/src/pkg/k8s/session/session_test.go +++ b/src/pkg/k8s/session/session_test.go @@ -12,20 +12,21 @@ import ( "github.com/defenseunicorns/uds-runtime/src/pkg/api/resources" "github.com/defenseunicorns/uds-runtime/src/pkg/k8s/client" + "github.com/stretchr/testify/require" "k8s.io/client-go/kubernetes" + "k8s.io/client-go/tools/clientcmd" ) func TestHandleReconnection(t *testing.T) { os.Setenv("CONNECTION_RETRY_MS", "100") defer os.Unsetenv("CONNECTION_RETRY_MS") - // Mock GetCurrentContext to return the same context and cluster as the original - client.GetCurrentContext = func() (string, string, error) { - return "original-context", "original-cluster", nil + client.ServerCheck = func(k8sClient *client.Clients) error { + return nil } - createClientMock := func() (*client.Clients, error) { + createClientMock := func(overrides *clientcmd.ConfigOverrides) (*client.Clients, error) { return &client.Clients{Clientset: &kubernetes.Clientset{}}, nil } @@ -62,12 +63,7 @@ func TestHandleReconnectionCreateClientError(t *testing.T) { os.Setenv("CONNECTION_RETRY_MS", "100") defer os.Unsetenv("CONNECTION_RETRY_MS") - // Mock GetCurrentContext to return the same context and cluster as the original - client.GetCurrentContext = func() (string, string, error) { - return "original-context", "original-cluster", nil - } - - createClientMock := func() (*client.Clients, error) { + createClientMock := func(overrides *clientcmd.ConfigOverrides) (*client.Clients, error) { return nil, fmt.Errorf("failed to create client") } @@ -101,12 +97,11 @@ func TestHandleReconnectionCreateCacheError(t *testing.T) { os.Setenv("CONNECTION_RETRY_MS", "100") defer os.Unsetenv("CONNECTION_RETRY_MS") - // Mock GetCurrentContext to return the same context and cluster as the original - client.GetCurrentContext = func() (string, string, error) { - return "original-context", "original-cluster", nil + client.ServerCheck = func(k8sClient *client.Clients) error { + return nil } - createClientMock := func() (*client.Clients, error) { + createClientMock := func(overrides *clientcmd.ConfigOverrides) (*client.Clients, error) { return &client.Clients{Clientset: &kubernetes.Clientset{}}, nil } @@ -135,16 +130,15 @@ func TestHandleReconnectionCreateCacheError(t *testing.T) { require.Nil(t, k8sSession.Cache.Pods) } -func TestHandleReconnectionContextChanged(t *testing.T) { +func TestHandleReconnectionServerCheckError(t *testing.T) { os.Setenv("CONNECTION_RETRY_MS", "100") defer os.Unsetenv("CONNECTION_RETRY_MS") - // Mock GetCurrentContext to return a different context and cluster - client.GetCurrentContext = func() (string, string, error) { - return "new-context", "new-cluster", nil + client.ServerCheck = func(k8sClient *client.Clients) error { + return fmt.Errorf("failed to check server") } - createClientMock := func() (*client.Clients, error) { + createClientMock := func(overrides *clientcmd.ConfigOverrides) (*client.Clients, error) { return &client.Clients{Clientset: &kubernetes.Clientset{}}, nil } @@ -168,7 +162,7 @@ func TestHandleReconnectionContextChanged(t *testing.T) { // Wait for the reconnection logic to complete time.Sleep(200 * time.Millisecond) - // Verify that the K8sResources struct was not updated since the context/cluster has changed + // Verify that the K8sResources struct was not updated since server check failed require.Nil(t, k8sSession.Clients.Clientset) require.Nil(t, k8sSession.Cache.Pods) } diff --git a/src/pkg/stream/common.go b/src/pkg/stream/common.go index 345e2fec5..90ce25ec3 100644 --- a/src/pkg/stream/common.go +++ b/src/pkg/stream/common.go @@ -16,6 +16,7 @@ import ( corev1 "k8s.io/api/core/v1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes" + "k8s.io/client-go/tools/clientcmd" ) type Reader interface { @@ -52,7 +53,7 @@ func NewStream(writer io.Writer, reader Reader, namespace string) *Stream { func (s *Stream) Start(ctx context.Context) error { // Create a new client if one is not provided (usually for testing) if s.Client == nil { - c, err := client.NewClient() + c, err := client.New(&clientcmd.ConfigOverrides{}) if err != nil { return fmt.Errorf("unable to connect to the cluster: %v", err) } diff --git a/src/test/e2e/api_test.go b/src/test/e2e/api_test.go index 228de84f6..17682b1a4 100644 --- a/src/test/e2e/api_test.go +++ b/src/test/e2e/api_test.go @@ -316,6 +316,43 @@ func TestClusterHealth(t *testing.T) { }) } +func TestGetClusters(t *testing.T) { + r, err := setup() + require.NoError(t, err) + + t.Run("list available clusters", func(t *testing.T) { + rr := httptest.NewRecorder() + req := httptest.NewRequest("GET", "/api/v1/clusters", nil) + r.ServeHTTP(rr, req) + require.Equal(t, http.StatusOK, rr.Code) + require.Contains(t, rr.Body.String(), "k3d-runtime") + }) +} + +func TestSwitchCLuster(t *testing.T) { + r, err := setup() + require.NoError(t, err) + t.Run("switch cluster", func(t *testing.T) { + + // request /cluster-check so that client is listening for ks.status so next request does not block on the channel + rr1 := httptest.NewRecorder() + req1 := httptest.NewRequest("GET", "/cluster-check", nil) + + // Start serving the request + go func() { + r.ServeHTTP(rr1, req1) + }() + + body := strings.NewReader(`{"cluster": {"name": "k3d-runtime-2", "context": "k3d-runtime-2", "selected": false }}`) + rr2 := httptest.NewRecorder() + req2 := httptest.NewRequest("POST", "/api/v1/cluster", body) + + r.ServeHTTP(rr2, req2) + + require.Equal(t, http.StatusOK, rr2.Code) + }) +} + func TestSwagger(t *testing.T) { r, err := setup() require.NoError(t, err) diff --git a/tasks/setup.yaml b/tasks/setup.yaml index 1345bb6fd..033b0465c 100644 --- a/tasks/setup.yaml +++ b/tasks/setup.yaml @@ -24,9 +24,13 @@ tasks: - name: k3d description: "start a plain k3d cluster" + inputs: + CLUSTER_NAME: + description: "Name of the k3d cluster" + default: "runtime" actions: - cmd: | - k3d cluster delete runtime && k3d cluster create runtime --k3s-arg "--disable=traefik@server:*" --k3s-arg "--disable=servicelb@server:*" + k3d cluster delete $INPUT_CLUSTER_NAME && k3d cluster create $INPUT_CLUSTER_NAME --k3s-arg "--disable=traefik@server:*" --k3s-arg "--disable=servicelb@server:*" - name: golangci description: "Install golangci-lint to GOPATH using install.sh" diff --git a/tasks/test.yaml b/tasks/test.yaml index f2384a9c1..1b85fc1e4 100644 --- a/tasks/test.yaml +++ b/tasks/test.yaml @@ -87,7 +87,7 @@ tasks: - cmd: npm run test:e2e dir: ui - - name: e2e-reconnect + - name: e2e-connections description: "run end-to-end tests for cluster disconnect and reconnect" actions: - task: build:ui @@ -95,7 +95,10 @@ tasks: dir: ui - task: build:api - task: setup:k3d - - cmd: npm run test:reconnect + - task: setup:k3d + with: + CLUSTER_NAME: runtime-2 + - cmd: npm run test:connections dir: ui - name: e2e-in-cluster @@ -136,6 +139,11 @@ tasks: - description: "build ui since embedded in main.go" task: build:ui - task: setup:k3d + with: + CLUSTER_NAME: runtime-2 + description: "create an additional test cluster so there are 2 in the kubeconfig" + - task: setup:k3d + description: "create runtime after runtime-2 so it is default current context when test runs" - task: deploy-load - cmd: npm ci && npm run load:api dir: hack/load-test diff --git a/ui/package.json b/ui/package.json index 7a1e19776..b029ed062 100644 --- a/ui/package.json +++ b/ui/package.json @@ -13,11 +13,12 @@ "lint": "prettier --check . && eslint .", "format": "prettier --write .", "test:local-auth": "TEST_CFG=localAuth playwright test", - "test:reconnect": "TEST_CFG=reconnect playwright test", + "test:connections": "TEST_CFG=connections playwright test", "test:e2e": "TEST_CFG=default playwright test", "test:e2e-in-cluster": "TEST_CFG=inCluster playwright test", "test:install": "playwright install", - "test:unit": "vitest run" + "test:unit": "vitest run", + "test:unit-watch": "vitest watch" }, "devDependencies": { "@ianvs/prettier-plugin-sort-imports": "^4.3.1", diff --git a/ui/playwright.config.ts b/ui/playwright.config.ts index 0fbab79b5..d9542530f 100644 --- a/ui/playwright.config.ts +++ b/ui/playwright.config.ts @@ -28,15 +28,15 @@ const defaultConfig: PlaywrightTestConfig = { /* Run tests in files in parallel */ fullyParallel: true, retries: process.env.CI ? 2 : 1, - testMatch: /^(?!.*local-auth|.*reconnect|.*in-cluster)(.+\.)?(test|spec)\.[jt]s$/, + testMatch: /^(?!.*local-auth|.*connections|.*in-cluster)(.+\.)?(test|spec)\.[jt]s$/, use: { baseURL: `${protocol}://${host}:${port}/`, }, } // For testing reconnecting to the cluster -const reconnect: PlaywrightTestConfig = { - name: 'reconnect', +const connections: PlaywrightTestConfig = { + name: 'connections', webServer: { command: '../build/uds-runtime', url: `${protocol}://${host}:${port}`, @@ -47,7 +47,7 @@ const reconnect: PlaywrightTestConfig = { testDir: 'tests', fullyParallel: false, retries: process.env.CI ? 2 : 1, - testMatch: 'reconnect.spec.ts', + testMatch: 'connections.spec.ts', use: { baseURL: `https://runtime-local.uds.dev:${port}/`, }, @@ -75,7 +75,7 @@ const inCluster: PlaywrightTestConfig = { /* Run tests in files in parallel */ fullyParallel: true, retries: process.env.CI ? 2 : 1, - testMatch: /^(?!.*local-auth|.*reconnect)(.+\.)?(test|spec)\.[jt]s$/, + testMatch: /^(?!.*local-auth|.*connections)(.+\.)?(test|spec)\.[jt]s$/, use: { baseURL: `${protocol}://runtime.admin.uds.dev/`, storageState: './tests/state.json', @@ -87,7 +87,7 @@ const configs = { default: defaultConfig, localAuth, inCluster, - reconnect, + connections, } export default defineConfig({ ...configs[TEST_CONFIG] }) diff --git a/ui/src/lib/features/navigation/index.ts b/ui/src/lib/features/navigation/index.ts index 4d9cac86a..50d1c59d9 100644 --- a/ui/src/lib/features/navigation/index.ts +++ b/ui/src/lib/features/navigation/index.ts @@ -1,8 +1,10 @@ // Copyright 2024 Defense Unicorns // SPDX-License-Identifier: AGPL-3.0-or-later OR LicenseRef-Defense-Unicorns-Commercial +export { default as ClusterMenu } from './navbar/clustermenu/component.svelte' +export { default as LoadingCluster } from './navbar/clustermenu/loading.svelte' export { default as Navbar } from './navbar/component.svelte' -export { default as Sidebar } from './sidebar/component.svelte' export { default as UserMenu } from './navbar/usermenu/component.svelte' +export { default as Sidebar } from './sidebar/component.svelte' export { isSidebarExpanded } from './store' diff --git a/ui/src/lib/features/navigation/navbar/clustermenu/component.svelte b/ui/src/lib/features/navigation/navbar/clustermenu/component.svelte new file mode 100644 index 000000000..71f64b619 --- /dev/null +++ b/ui/src/lib/features/navigation/navbar/clustermenu/component.svelte @@ -0,0 +1,105 @@ + + + + + +