Skip to content

Commit

Permalink
fix node-servant convert not use yss template to deploy yurthub (open…
Browse files Browse the repository at this point in the history
…yurtio#1633)

* fix node-servant convert not use yss template to deploy yurthub

* update
  • Loading branch information
JameKeal authored Aug 18, 2023
1 parent efe75d5 commit d15078f
Show file tree
Hide file tree
Showing 4 changed files with 199 additions and 20 deletions.
37 changes: 20 additions & 17 deletions pkg/node-servant/components/yurthub.go
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,6 @@ import (
"net/url"
"os"
"path/filepath"
"strconv"
"strings"
"time"

Expand All @@ -34,7 +33,7 @@ import (

"github.com/openyurtio/openyurt/pkg/projectinfo"
kubeconfigutil "github.com/openyurtio/openyurt/pkg/util/kubeconfig"
"github.com/openyurtio/openyurt/pkg/util/templates"
tmplutil "github.com/openyurtio/openyurt/pkg/util/templates"
"github.com/openyurtio/openyurt/pkg/yurtadm/constants"
enutil "github.com/openyurtio/openyurt/pkg/yurtadm/util/edgenode"
"github.com/openyurtio/openyurt/pkg/yurthub/storage/disk"
Expand All @@ -46,6 +45,8 @@ const (
fileMode = 0666
DefaultRootDir = "/var/lib"
DefaultCaPath = "/etc/kubernetes/pki/ca.crt"
yurthubYurtStaticSetName = "yurthub"
defaultConfigmapPath = "/data"
)

type yurtHubOperator struct {
Expand Down Expand Up @@ -77,20 +78,8 @@ func (op *yurtHubOperator) Install() error {

// 1. put yurt-hub yaml into /etc/kubernetes/manifests
klog.Infof("setting up yurthub on node")
// 1-1. replace variables in yaml file
klog.Infof("setting up yurthub apiServer addr")
yurthubTemplate, err := templates.SubsituteTemplate(constants.YurthubTemplate, map[string]string{
"yurthubBindingAddr": constants.DefaultYurtHubServerAddr,
"kubernetesServerAddr": op.apiServerAddr,
"image": op.yurthubImage,
"bootstrapFile": constants.YurtHubBootstrapConfig,
"workingMode": string(op.workingMode),
"enableDummyIf": strconv.FormatBool(op.enableDummyIf),
"enableNodePool": strconv.FormatBool(op.enableNodePool),
})
if err != nil {
return err
}
// 1-1. get configmap data path
configMapDataPath := filepath.Join(defaultConfigmapPath, yurthubYurtStaticSetName)

// 1-2. create /var/lib/yurthub/bootstrap-hub.conf
if err := enutil.EnsureDir(constants.YurtHubWorkdir); err != nil {
Expand All @@ -106,10 +95,24 @@ func (op *yurtHubOperator) Install() error {
if err := enutil.EnsureDir(podManifestPath); err != nil {
return err
}
if err := os.WriteFile(getYurthubYaml(podManifestPath), []byte(yurthubTemplate), fileMode); err != nil {
content, err := os.ReadFile(configMapDataPath)
if err != nil {
return fmt.Errorf("failed to read source file %s: %w", configMapDataPath, err)
}
klog.Infof("yurt-hub.yaml apiServerAddr: %+v", op.apiServerAddr)
yssYurtHub, err := tmplutil.SubsituteTemplate(string(content), map[string]string{
"kubernetesServerAddr": op.apiServerAddr,
})
if err != nil {
return err
}
if err = os.WriteFile(getYurthubYaml(podManifestPath), []byte(yssYurtHub), fileMode); err != nil {
return err
}

klog.Infof("create the %s/yurt-hub.yaml", podManifestPath)
klog.Infof("yurt-hub.yaml: %+v", configMapDataPath)
klog.Infof("yurt-hub.yaml content: %+v", yssYurtHub)

// 2. wait yurthub pod to be ready
return hubHealthcheck(op.yurthubHealthCheckTimeout)
Expand Down
6 changes: 6 additions & 0 deletions pkg/node-servant/constant.go
Original file line number Diff line number Diff line change
Expand Up @@ -42,6 +42,10 @@ spec:
hostPath:
path: /
type: Directory
- name: configmap
configMap:
defaultMode: 420
name: {{.configmap_name}}
containers:
- name: node-servant-servant
image: {{.node_servant_image}}
Expand All @@ -56,6 +60,8 @@ spec:
volumeMounts:
- mountPath: /openyurt
name: host-root
- mountPath: /openyurt/data
name: configmap
env:
- name: NODE_NAME
valueFrom:
Expand Down
133 changes: 133 additions & 0 deletions test/e2e/cmd/init/constants/constants.go
Original file line number Diff line number Diff line change
Expand Up @@ -187,4 +187,137 @@ data:
discardcloudservice: ""
masterservice: ""
`

YurthubCloudYurtStaticSet = `
apiVersion: apps.openyurt.io/v1alpha1
kind: YurtStaticSet
metadata:
name: yurt-hub-cloud
namespace: "kube-system"
spec:
staticPodManifest: yurthub
template:
metadata:
labels:
k8s-app: yurt-hub-cloud
spec:
volumes:
- name: hub-dir
hostPath:
path: /var/lib/yurthub
type: DirectoryOrCreate
- name: kubernetes
hostPath:
path: /etc/kubernetes
type: Directory
containers:
- name: yurt-hub
image: {{.yurthub_image}}
imagePullPolicy: IfNotPresent
volumeMounts:
- name: hub-dir
mountPath: /var/lib/yurthub
- name: kubernetes
mountPath: /etc/kubernetes
command:
- yurthub
- --v=2
- --bind-address=127.0.0.1
- --server-addr={{.kubernetesServerAddr}}
- --node-name=$(NODE_NAME)
- --bootstrap-file=/var/lib/yurthub/bootstrap-hub.conf
- --working-mode=cloud
- --namespace="kube-system"
livenessProbe:
httpGet:
host: 127.0.0.1
path: /v1/healthz
port: 10267
initialDelaySeconds: 300
periodSeconds: 5
failureThreshold: 3
resources:
requests:
cpu: 150m
memory: 150Mi
limits:
memory: 300Mi
securityContext:
capabilities:
add: [ "NET_ADMIN", "NET_RAW" ]
env:
- name: NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
hostNetwork: true
priorityClassName: system-node-critical
priority: 2000001000
`
YurthubYurtStaticSet = `
apiVersion: apps.openyurt.io/v1alpha1
kind: YurtStaticSet
metadata:
name: yurt-hub
namespace: "kube-system"
spec:
staticPodManifest: yurthub
template:
metadata:
labels:
k8s-app: yurt-hub
spec:
volumes:
- name: hub-dir
hostPath:
path: /var/lib/yurthub
type: DirectoryOrCreate
- name: kubernetes
hostPath:
path: /etc/kubernetes
type: Directory
containers:
- name: yurt-hub
image: {{.yurthub_image}}
imagePullPolicy: IfNotPresent
volumeMounts:
- name: hub-dir
mountPath: /var/lib/yurthub
- name: kubernetes
mountPath: /etc/kubernetes
command:
- yurthub
- --v=2
- --bind-address=127.0.0.1
- --server-addr={{.kubernetesServerAddr}}
- --node-name=$(NODE_NAME)
- --bootstrap-file=/var/lib/yurthub/bootstrap-hub.conf
- --working-mode=edge
- --namespace="kube-system"
livenessProbe:
httpGet:
host: 127.0.0.1
path: /v1/healthz
port: 10267
initialDelaySeconds: 300
periodSeconds: 5
failureThreshold: 3
resources:
requests:
cpu: 150m
memory: 150Mi
limits:
memory: 300Mi
securityContext:
capabilities:
add: [ "NET_ADMIN", "NET_RAW" ]
env:
- name: NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
hostNetwork: true
priorityClassName: system-node-critical
priority: 2000001000
`
)
43 changes: 40 additions & 3 deletions test/e2e/cmd/init/converter.go
Original file line number Diff line number Diff line change
Expand Up @@ -39,14 +39,18 @@ import (
nodeservant "github.com/openyurtio/openyurt/pkg/node-servant"
kubeadmapi "github.com/openyurtio/openyurt/pkg/util/kubernetes/kubeadm/app/phases/bootstraptoken/clusterinfo"
strutil "github.com/openyurtio/openyurt/pkg/util/strings"
tmplutil "github.com/openyurtio/openyurt/pkg/util/templates"
"github.com/openyurtio/openyurt/pkg/yurthub/util"
"github.com/openyurtio/openyurt/test/e2e/cmd/init/constants"
"github.com/openyurtio/openyurt/test/e2e/cmd/init/lock"
kubeutil "github.com/openyurtio/openyurt/test/e2e/cmd/init/util/kubernetes"
)

const (
// defaultYurthubHealthCheckTimeout defines the default timeout for yurthub health check phase
defaultYurthubHealthCheckTimeout = 2 * time.Minute
yssYurtHubCloudName = "yurt-static-set-yurt-hub-cloud"
yssYurtHubName = "yurt-static-set-yurt-hub"
)

type ClusterConverter struct {
Expand Down Expand Up @@ -124,14 +128,45 @@ func (c *ClusterConverter) deployYurthub() error {
// The node-servant will detect the kubeadm_conf_path automatically
// It will be either "/usr/lib/systemd/system/kubelet.service.d/10-kubeadm.conf"
// or "/etc/systemd/system/kubelet.service.d/10-kubeadm.conf".
"kubeadm_conf_path": "",
"working_mode": string(util.WorkingModeEdge),
"enable_dummy_if": strconv.FormatBool(c.EnableDummyIf),
"kubeadm_conf_path": "",
"working_mode": string(util.WorkingModeEdge),
"enable_dummy_if": strconv.FormatBool(c.EnableDummyIf),
"kubernetesServerAddr": "{{.kubernetesServerAddr}}",
}
if c.YurthubHealthCheckTimeout != defaultYurthubHealthCheckTimeout {
convertCtx["yurthub_healthcheck_timeout"] = c.YurthubHealthCheckTimeout.String()
}

// create the yurthub-cloud and yurthub yss
tempDir, err := os.MkdirTemp(c.RootDir, "yurt-hub")
if err != nil {
return err
}
defer os.RemoveAll(tempDir)
tempFile := filepath.Join(tempDir, "yurthub-cloud-yurtstaticset.yaml")
yssYurtHubCloud, err := tmplutil.SubsituteTemplate(constants.YurthubCloudYurtStaticSet, convertCtx)
if err != nil {
return err
}
if err = os.WriteFile(tempFile, []byte(yssYurtHubCloud), 0644); err != nil {
return err
}
if err = c.ComponentsBuilder.InstallComponents(tempFile, false); err != nil {
return err
}

tempFile = filepath.Join(tempDir, "yurthub-yurtstaticset.yaml")
yssYurtHub, err := tmplutil.SubsituteTemplate(constants.YurthubYurtStaticSet, convertCtx)
if err != nil {
return err
}
if err = os.WriteFile(tempFile, []byte(yssYurtHub), 0644); err != nil {
return err
}
if err = c.ComponentsBuilder.InstallComponents(tempFile, false); err != nil {
return err
}

npExist, err := nodePoolResourceExists(c.ClientSet)
if err != nil {
return err
Expand All @@ -141,6 +176,7 @@ func (c *ClusterConverter) deployYurthub() error {

if len(c.EdgeNodes) != 0 {
convertCtx["working_mode"] = string(util.WorkingModeEdge)
convertCtx["configmap_name"] = yssYurtHubName
if err = kubeutil.RunServantJobs(c.ClientSet, c.WaitServantJobTimeout, func(nodeName string) (*batchv1.Job, error) {
return nodeservant.RenderNodeServantJob("convert", convertCtx, nodeName)
}, c.EdgeNodes, os.Stderr, false); err != nil {
Expand Down Expand Up @@ -175,6 +211,7 @@ func (c *ClusterConverter) deployYurthub() error {

// deploy yurt-hub and reset the kubelet service on cloud nodes
convertCtx["working_mode"] = string(util.WorkingModeCloud)
convertCtx["configmap_name"] = yssYurtHubCloudName
klog.Infof("convert context for cloud nodes(%q): %#+v", c.CloudNodes, convertCtx)
if err = kubeutil.RunServantJobs(c.ClientSet, c.WaitServantJobTimeout, func(nodeName string) (*batchv1.Job, error) {
return nodeservant.RenderNodeServantJob("convert", convertCtx, nodeName)
Expand Down

0 comments on commit d15078f

Please sign in to comment.