Renamed ControlAgent to NodeControl

This commit is contained in:
2026-04-25 04:38:23 +08:00
parent 1354e83813
commit 8fae920fc8
20 changed files with 404 additions and 73 deletions

View File

@@ -16,8 +16,10 @@ var (
AltPartDeviceLink = "/dev/mksaltpart"
BootStateFile = "/run/monok8s/boot-state.env"
CatalogURL = "https://example.com/monok8s.io/v1alpha1/catalog.yaml"
ControlAgentName = "control-agent"
ControlAgentKey = "monok8s.io/control-agent"
NodeControlKey = "monok8s.io/node-control"
NodeControlName = "node-control"
ControllerName = "node-controller"
NodeAgentName = "node-agent"
EnvConfigDir = "/opt/monok8s/config"
Label = "monok8s.io/label"
MonoKSConfigCRD = "monoksconfigs.monok8s.io"

View File

@@ -25,7 +25,7 @@ type MonoKSConfigSpec struct {
ClusterDomain string `json:"clusterDomain,omitempty" yaml:"clusterDomain,omitempty"`
ClusterRole string `json:"clusterRole,omitempty" yaml:"clusterRole,omitempty"`
InitControlPlane bool `json:"initControlPlane,omitempty" yaml:"initControlPlane,omitempty"`
EnableControlAgent bool `json:"enableControlAgent,omitempty" yaml:"enableControlAgent,omitempty"`
EnableNodeControl bool `json:"enableNodeControl,omitempty" yaml:"enableNodeControl,omitempty"`
PodSubnet string `json:"podSubnet,omitempty" yaml:"podSubnet,omitempty"`
ServiceSubnet string `json:"serviceSubnet,omitempty" yaml:"serviceSubnet,omitempty"`
APIServerAdvertiseAddress string `json:"apiServerAdvertiseAddress,omitempty" yaml:"apiServerAdvertiseAddress,omitempty"`

View File

@@ -23,7 +23,7 @@ func NewRegistry(ctx *node.NodeContext) *Registry {
return &Registry{
steps: map[string]node.Step{
"ApplyControlAgentDaemonSetResources": node.ApplyControlAgentDaemonSetResources,
"ApplyNodeControlDaemonSetResources": node.ApplyNodeControlDaemonSetResources,
"ApplyLocalNodeMetadataIfPossible": node.ApplyLocalNodeMetadataIfPossible,
"CheckForVersionSkew": node.CheckForVersionSkew,
"ClassifyBootstrapAction": node.ClassifyBootstrapAction,

View File

@@ -158,7 +158,7 @@ func NewRunner(cfg *monov1alpha1.MonoKSConfig) *Runner {
Desc: "Make A/B booting possible",
},
{
RegKey: "ApplyControlAgentDaemonSetResources",
RegKey: "ApplyNodeControlDaemonSetResources",
Name: "Apply daemonset for control agent",
Desc: "Control agent handles OSUpgrade resources",
},

View File

@@ -1,9 +1,11 @@
package create
import (
"bytes"
"fmt"
"github.com/spf13/cobra"
"k8s.io/cli-runtime/pkg/genericclioptions"
"os"
render "example.com/monok8s/pkg/render"
)
@@ -58,5 +60,52 @@ func NewCmdCreate(flags *genericclioptions.ConfigFlags) *cobra.Command {
},
},
)
var authorizedKeysPath string
sshdcmd := cobra.Command{
Use: "sshd",
Short: "Print sshd deployment template",
RunE: func(cmd *cobra.Command, _ []string) error {
ns, _, err := flags.ToRawKubeConfigLoader().Namespace()
if err != nil {
return err
}
authorizedKeys, err := readAuthorizedKeysFile(authorizedKeysPath)
if err != nil {
return err
}
out, err := render.RenderSSHDDeployments(ns, authorizedKeys)
if err != nil {
return err
}
_, err = fmt.Fprint(cmd.OutOrStdout(), out)
return err
},
}
sshdcmd.Flags().StringVar(&authorizedKeysPath, "authkeys", "", "path to authorized_keys file")
cmd.AddCommand(&sshdcmd)
return cmd
}
func readAuthorizedKeysFile(path string) (string, error) {
if path == "" {
return "", fmt.Errorf("--authkeys is required")
}
b, err := os.ReadFile(path)
if err != nil {
return "", fmt.Errorf("read authorized_keys file %q: %w", path, err)
}
if len(bytes.TrimSpace(b)) == 0 {
return "", fmt.Errorf("authorized_keys file %q is empty", path)
}
return string(b), nil
}

View File

@@ -268,7 +268,7 @@ func listTargetNodeNames(
osu *monov1alpha1.OSUpgrade,
) ([]string, error) {
selector := labels.SelectorFromSet(labels.Set{
monov1alpha1.ControlAgentKey: "true",
monov1alpha1.NodeControlKey: "true",
})
if osu.Spec.NodeSelector != nil {

View File

@@ -21,15 +21,15 @@ import (
const (
controlAgentNodeSelectorValue = "true"
controlAgentImage = "localhost/monok8s/control-agent:dev"
controlAgentImage = "localhost/monok8s/node-control:dev"
kubeconfig = "/etc/kubernetes/admin.conf"
)
func ApplyControlAgentDaemonSetResources(ctx context.Context, n *NodeContext) error {
func ApplyNodeControlDaemonSetResources(ctx context.Context, n *NodeContext) error {
// Only the control-plane should bootstrap this DaemonSet definition.
// And only when the feature is enabled.
if strings.TrimSpace(n.Config.Spec.ClusterRole) != "control-plane" || !n.Config.Spec.EnableControlAgent {
klog.InfoS("skipped for", "clusterRole", n.Config.Spec.ClusterRole, "enableControlAgent", n.Config.Spec.EnableControlAgent)
if strings.TrimSpace(n.Config.Spec.ClusterRole) != "control-plane" || !n.Config.Spec.EnableNodeControl {
klog.InfoS("skipped for", "clusterRole", n.Config.Spec.ClusterRole, "enableNodeAgent", n.Config.Spec.EnableNodeControl)
return nil
}
@@ -49,10 +49,10 @@ func ApplyControlAgentDaemonSetResources(ctx context.Context, n *NodeContext) er
}
labels := map[string]string{
"app.kubernetes.io/name": monov1alpha1.ControlAgentName,
"app.kubernetes.io/name": monov1alpha1.NodeAgentName,
"app.kubernetes.io/component": "agent",
"app.kubernetes.io/part-of": "monok8s",
"app.kubernetes.io/managed-by": "ctl",
"app.kubernetes.io/managed-by": monov1alpha1.NodeControlName,
}
kubeClient := clients.Kubernetes
@@ -60,16 +60,16 @@ func ApplyControlAgentDaemonSetResources(ctx context.Context, n *NodeContext) er
if err := ensureNamespace(ctx, kubeClient, namespace, labels); err != nil {
return fmt.Errorf("ensure namespace %q: %w", namespace, err)
}
if err := applyControlAgentServiceAccount(ctx, kubeClient, namespace, labels); err != nil {
if err := applyNodeAgentServiceAccount(ctx, kubeClient, namespace, labels); err != nil {
return fmt.Errorf("apply serviceaccount: %w", err)
}
if err := applyControlAgentClusterRole(ctx, kubeClient, labels); err != nil {
if err := applyNodeAgentClusterRole(ctx, kubeClient, labels); err != nil {
return fmt.Errorf("apply clusterrole: %w", err)
}
if err := applyControlAgentClusterRoleBinding(ctx, kubeClient, namespace, labels); err != nil {
if err := applyNodeAgentClusterRoleBinding(ctx, kubeClient, namespace, labels); err != nil {
return fmt.Errorf("apply clusterrolebinding: %w", err)
}
if err := applyControlAgentDaemonSet(ctx, kubeClient, namespace, labels); err != nil {
if err := applyNodeAgentDaemonSet(ctx, kubeClient, namespace, labels); err != nil {
return fmt.Errorf("apply daemonset: %w", err)
}
@@ -116,16 +116,16 @@ func copyStringMap(in map[string]string) map[string]string {
return out
}
func applyControlAgentServiceAccount(ctx context.Context, kubeClient kubernetes.Interface, namespace string, labels map[string]string) error {
func applyNodeAgentServiceAccount(ctx context.Context, kubeClient kubernetes.Interface, namespace string, labels map[string]string) error {
want := &corev1.ServiceAccount{
ObjectMeta: metav1.ObjectMeta{
Name: monov1alpha1.ControlAgentName,
Name: monov1alpha1.NodeAgentName,
Namespace: namespace,
Labels: labels,
},
}
existing, err := kubeClient.CoreV1().ServiceAccounts(namespace).Get(ctx, monov1alpha1.ControlAgentName, metav1.GetOptions{})
existing, err := kubeClient.CoreV1().ServiceAccounts(namespace).Get(ctx, monov1alpha1.NodeAgentName, metav1.GetOptions{})
if apierrors.IsNotFound(err) {
_, err = kubeClient.CoreV1().ServiceAccounts(namespace).Create(ctx, want, metav1.CreateOptions{})
return err
@@ -148,7 +148,7 @@ func applyControlAgentServiceAccount(ctx context.Context, kubeClient kubernetes.
return err
}
func applyControlAgentClusterRole(ctx context.Context, kubeClient kubernetes.Interface, labels map[string]string) error {
func applyNodeAgentClusterRole(ctx context.Context, kubeClient kubernetes.Interface, labels map[string]string) error {
wantRules := []rbacv1.PolicyRule{
{
APIGroups: []string{monov1alpha1.Group},
@@ -174,13 +174,13 @@ func applyControlAgentClusterRole(ctx context.Context, kubeClient kubernetes.Int
want := &rbacv1.ClusterRole{
ObjectMeta: metav1.ObjectMeta{
Name: monov1alpha1.ControlAgentName,
Name: monov1alpha1.NodeAgentName,
Labels: labels,
},
Rules: wantRules,
}
existing, err := kubeClient.RbacV1().ClusterRoles().Get(ctx, monov1alpha1.ControlAgentName, metav1.GetOptions{})
existing, err := kubeClient.RbacV1().ClusterRoles().Get(ctx, monov1alpha1.NodeAgentName, metav1.GetOptions{})
if apierrors.IsNotFound(err) {
_, err = kubeClient.RbacV1().ClusterRoles().Create(ctx, want, metav1.CreateOptions{})
return err
@@ -207,30 +207,30 @@ func applyControlAgentClusterRole(ctx context.Context, kubeClient kubernetes.Int
return err
}
func applyControlAgentClusterRoleBinding(ctx context.Context, kubeClient kubernetes.Interface, namespace string, labels map[string]string) error {
func applyNodeAgentClusterRoleBinding(ctx context.Context, kubeClient kubernetes.Interface, namespace string, labels map[string]string) error {
wantRoleRef := rbacv1.RoleRef{
APIGroup: rbacv1.GroupName,
Kind: "ClusterRole",
Name: monov1alpha1.ControlAgentName,
Name: monov1alpha1.NodeAgentName,
}
wantSubjects := []rbacv1.Subject{
{
Kind: "ServiceAccount",
Name: monov1alpha1.ControlAgentName,
Name: monov1alpha1.NodeAgentName,
Namespace: namespace,
},
}
want := &rbacv1.ClusterRoleBinding{
ObjectMeta: metav1.ObjectMeta{
Name: monov1alpha1.ControlAgentName,
Name: monov1alpha1.NodeAgentName,
Labels: labels,
},
RoleRef: wantRoleRef,
Subjects: wantSubjects,
}
existing, err := kubeClient.RbacV1().ClusterRoleBindings().Get(ctx, monov1alpha1.ControlAgentName, metav1.GetOptions{})
existing, err := kubeClient.RbacV1().ClusterRoleBindings().Get(ctx, monov1alpha1.NodeAgentName, metav1.GetOptions{})
if apierrors.IsNotFound(err) {
_, err = kubeClient.RbacV1().ClusterRoleBindings().Create(ctx, want, metav1.CreateOptions{})
return err
@@ -241,7 +241,7 @@ func applyControlAgentClusterRoleBinding(ctx context.Context, kubeClient kuberne
// roleRef is immutable. If it differs, fail loudly instead of pretending we can patch it.
if !reflect.DeepEqual(existing.RoleRef, want.RoleRef) {
return fmt.Errorf("existing ClusterRoleBinding %q has different roleRef and must be recreated", monov1alpha1.ControlAgentName)
return fmt.Errorf("existing ClusterRoleBinding %q has different roleRef and must be recreated", monov1alpha1.NodeAgentName)
}
changed := false
@@ -262,26 +262,26 @@ func applyControlAgentClusterRoleBinding(ctx context.Context, kubeClient kuberne
return err
}
func applyControlAgentDaemonSet(ctx context.Context, kubeClient kubernetes.Interface, namespace string, labels map[string]string) error {
func applyNodeAgentDaemonSet(ctx context.Context, kubeClient kubernetes.Interface, namespace string, labels map[string]string) error {
privileged := true
dsLabels := map[string]string{
"app.kubernetes.io/name": monov1alpha1.ControlAgentName,
"app.kubernetes.io/name": monov1alpha1.NodeAgentName,
"app.kubernetes.io/component": "agent",
"app.kubernetes.io/part-of": "monok8s",
"app.kubernetes.io/managed-by": "ctl",
"app.kubernetes.io/managed-by": monov1alpha1.NodeControlName,
}
want := &appsv1.DaemonSet{
ObjectMeta: metav1.ObjectMeta{
Name: monov1alpha1.ControlAgentName,
Name: monov1alpha1.NodeAgentName,
Namespace: namespace,
Labels: labels,
},
Spec: appsv1.DaemonSetSpec{
Selector: &metav1.LabelSelector{
MatchLabels: map[string]string{
"app.kubernetes.io/name": monov1alpha1.ControlAgentName,
"app.kubernetes.io/name": monov1alpha1.NodeAgentName,
},
},
Template: corev1.PodTemplateSpec{
@@ -289,12 +289,12 @@ func applyControlAgentDaemonSet(ctx context.Context, kubeClient kubernetes.Inter
Labels: dsLabels,
},
Spec: corev1.PodSpec{
ServiceAccountName: monov1alpha1.ControlAgentName,
ServiceAccountName: monov1alpha1.NodeAgentName,
HostNetwork: true,
HostPID: true,
DNSPolicy: corev1.DNSClusterFirstWithHostNet,
NodeSelector: map[string]string{
monov1alpha1.ControlAgentKey: controlAgentNodeSelectorValue,
monov1alpha1.NodeControlKey: controlAgentNodeSelectorValue,
},
Tolerations: []corev1.Toleration{
{Operator: corev1.TolerationOpExists},
@@ -379,7 +379,7 @@ func applyControlAgentDaemonSet(ctx context.Context, kubeClient kubernetes.Inter
},
}
existing, err := kubeClient.AppsV1().DaemonSets(namespace).Get(ctx, monov1alpha1.ControlAgentName, metav1.GetOptions{})
existing, err := kubeClient.AppsV1().DaemonSets(namespace).Get(ctx, monov1alpha1.NodeAgentName, metav1.GetOptions{})
if apierrors.IsNotFound(err) {
_, err = kubeClient.AppsV1().DaemonSets(namespace).Create(ctx, want, metav1.CreateOptions{})
return err

View File

@@ -476,7 +476,7 @@ func buildNodeRegistration(spec monov1alpha1.MonoKSConfigSpec) NodeRegistrationO
}
func effectiveNodeLabels(spec monov1alpha1.MonoKSConfigSpec) map[string]string {
if len(spec.NodeLabels) == 0 && !spec.EnableControlAgent {
if len(spec.NodeLabels) == 0 && !spec.EnableNodeControl {
return nil
}
@@ -485,8 +485,8 @@ func effectiveNodeLabels(spec monov1alpha1.MonoKSConfigSpec) map[string]string {
labels[k] = v
}
if spec.EnableControlAgent {
labels[monov1alpha1.ControlAgentKey] = "true"
if spec.EnableNodeControl {
labels[monov1alpha1.NodeControlKey] = "true"
}
return labels

View File

@@ -14,6 +14,7 @@ import (
"k8s.io/client-go/kubernetes"
"k8s.io/klog/v2"
monov1alpha1 "example.com/monok8s/pkg/apis/monok8s/v1alpha1"
"example.com/monok8s/pkg/kube"
"example.com/monok8s/pkg/system"
)
@@ -113,7 +114,7 @@ func runUpgradeSelfHealthCheck(ctx context.Context, kubeClient kubernetes.Interf
Namespace: healthCheckNamespace,
Labels: map[string]string{
"app.kubernetes.io/name": "preupgrade-health-check",
"app.kubernetes.io/managed-by": "monok8s",
"app.kubernetes.io/managed-by": monov1alpha1.NodeControlName,
},
},
Spec: corev1.PodSpec{

View File

@@ -60,8 +60,8 @@ func ApplyLocalNodeMetadataIfPossible(ctx context.Context, nctx *NodeContext) er
}
// Additional Labels
if spec.EnableControlAgent {
node.Labels[monov1alpah1.ControlAgentKey] = controlAgentNodeSelectorValue
if spec.EnableNodeControl {
node.Labels[monov1alpah1.NodeControlKey] = controlAgentNodeSelectorValue
}
_, err = client.CoreV1().Nodes().Update(ctx, node, metav1.UpdateOptions{})

View File

@@ -13,6 +13,7 @@ import (
"k8s.io/apimachinery/pkg/util/intstr"
monov1alpha1 "example.com/monok8s/pkg/apis/monok8s/v1alpha1"
buildinfo "example.com/monok8s/pkg/buildinfo"
templates "example.com/monok8s/pkg/templates"
)
@@ -20,10 +21,10 @@ func RenderControllerDeployments(namespace string) (string, error) {
vals := templates.LoadTemplateValuesFromEnv()
labels := map[string]string{
"app.kubernetes.io/name": monov1alpha1.ControlAgentName,
"app.kubernetes.io/name": monov1alpha1.ControllerName,
"app.kubernetes.io/component": "controller",
"app.kubernetes.io/part-of": "monok8s",
"app.kubernetes.io/managed-by": "ctl",
"app.kubernetes.io/managed-by": monov1alpha1.NodeControlName,
}
objs := []runtime.Object{
@@ -66,7 +67,7 @@ func buildControllerServiceAccount(namespace string, labels map[string]string) *
Kind: "ServiceAccount",
},
ObjectMeta: metav1.ObjectMeta{
Name: monov1alpha1.ControlAgentName,
Name: monov1alpha1.ControllerName,
Namespace: namespace,
Labels: labels,
},
@@ -109,7 +110,7 @@ func buildControllerClusterRole(labels map[string]string) *rbacv1.ClusterRole {
Kind: "ClusterRole",
},
ObjectMeta: metav1.ObjectMeta{
Name: monov1alpha1.ControlAgentName,
Name: monov1alpha1.ControllerName,
Labels: labels,
},
Rules: wantRules,
@@ -121,7 +122,7 @@ func buildControllerClusterRoleBinding(namespace string, labels map[string]strin
wantSubjects := []rbacv1.Subject{
{
Kind: "ServiceAccount",
Name: monov1alpha1.ControlAgentName,
Name: monov1alpha1.ControllerName,
Namespace: namespace,
},
}
@@ -129,7 +130,7 @@ func buildControllerClusterRoleBinding(namespace string, labels map[string]strin
wantRoleRef := rbacv1.RoleRef{
APIGroup: rbacv1.GroupName,
Kind: "ClusterRole",
Name: monov1alpha1.ControlAgentName,
Name: monov1alpha1.ControllerName,
}
return &rbacv1.ClusterRoleBinding{
@@ -138,7 +139,7 @@ func buildControllerClusterRoleBinding(namespace string, labels map[string]strin
Kind: "ClusterRoleBinding",
},
ObjectMeta: metav1.ObjectMeta{
Name: monov1alpha1.ControlAgentName,
Name: monov1alpha1.ControllerName,
Labels: labels,
},
Subjects: wantSubjects,
@@ -150,7 +151,7 @@ func buildControllerDeployment(tVals templates.TemplateValues, namespace string,
replicas := int32(1)
selectorLabels := map[string]string{
"app.kubernetes.io/name": monov1alpha1.ControlAgentName,
"app.kubernetes.io/name": monov1alpha1.ControllerName,
"app.kubernetes.io/component": "controller",
}
@@ -165,7 +166,7 @@ func buildControllerDeployment(tVals templates.TemplateValues, namespace string,
Kind: "Deployment",
},
ObjectMeta: metav1.ObjectMeta{
Name: monov1alpha1.ControlAgentName,
Name: monov1alpha1.ControllerName,
Namespace: namespace,
Labels: labels,
},
@@ -179,11 +180,11 @@ func buildControllerDeployment(tVals templates.TemplateValues, namespace string,
Labels: podLabels,
},
Spec: corev1.PodSpec{
ServiceAccountName: monov1alpha1.ControlAgentName,
ServiceAccountName: monov1alpha1.ControllerName,
Containers: []corev1.Container{
{
Name: "controller",
Image: fmt.Sprintf("registry.local/control-agent:%s", tVals.KubernetesVersion),
Image: fmt.Sprintf("localhost/monok8s/node-control:%s", buildinfo.Version),
ImagePullPolicy: corev1.PullIfNotPresent,
Args: []string{
"controller",

278
clitools/pkg/render/sshd.go Normal file
View File

@@ -0,0 +1,278 @@
package render
import (
"bytes"
"fmt"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
rbacv1 "k8s.io/api/rbac/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/serializer/json"
"k8s.io/apimachinery/pkg/util/intstr"
monov1alpha1 "example.com/monok8s/pkg/apis/monok8s/v1alpha1"
"example.com/monok8s/pkg/templates"
)
const (
sshdName = "sshd"
sshdConfigName = "sshd-authorized-keys"
sshdNodePort = int32(30022)
)
func RenderSSHDDeployments(namespace, authKeys string) (string, error) {
vals := templates.LoadTemplateValuesFromEnv()
labels := map[string]string{
"app.kubernetes.io/name": sshdName,
"app.kubernetes.io/component": "host-access",
"app.kubernetes.io/part-of": "monok8s",
"app.kubernetes.io/managed-by": monov1alpha1.NodeControlName,
}
objs := []runtime.Object{
buildSSHDConfigMap(authKeys, namespace, labels),
buildSSHDService(vals, namespace, labels),
buildSSHDDeployment(vals, namespace, labels),
}
s := runtime.NewScheme()
_ = corev1.AddToScheme(s)
_ = rbacv1.AddToScheme(s)
_ = appsv1.AddToScheme(s)
serializer := json.NewYAMLSerializer(json.DefaultMetaFactory, s, s)
var buf bytes.Buffer
for i, obj := range objs {
if i > 0 {
if _, err := fmt.Fprintln(&buf, "---"); err != nil {
return "", err
}
}
if err := serializer.Encode(obj, &buf); err != nil {
return "", err
}
}
return buf.String(), nil
}
func buildSSHDConfigMap(
authorizedKeys string,
namespace string,
labels map[string]string,
) *corev1.ConfigMap {
return &corev1.ConfigMap{
TypeMeta: metav1.TypeMeta{
APIVersion: "v1",
Kind: "ConfigMap",
},
ObjectMeta: metav1.ObjectMeta{
Name: sshdConfigName,
Namespace: namespace,
Labels: labels,
},
Data: map[string]string{
"authorized_keys": authorizedKeys,
},
}
}
func buildSSHDService(
tVals templates.TemplateValues,
namespace string,
labels map[string]string,
) *corev1.Service {
selectorLabels := map[string]string{
monov1alpha1.NodeControlKey: "true",
"kubernetes.io/hostname": tVals.NodeName,
}
return &corev1.Service{
TypeMeta: metav1.TypeMeta{
APIVersion: "v1",
Kind: "Service",
},
ObjectMeta: metav1.ObjectMeta{
Name: sshdName,
Namespace: namespace,
Labels: labels,
},
Spec: corev1.ServiceSpec{
Type: corev1.ServiceTypeNodePort,
Selector: selectorLabels,
Ports: []corev1.ServicePort{
{
Name: "ssh",
Protocol: corev1.ProtocolTCP,
Port: 22,
TargetPort: intstr.FromInt32(22),
NodePort: sshdNodePort,
},
},
},
}
}
func buildSSHDDeployment(
tVals templates.TemplateValues,
namespace string,
labels map[string]string,
) *appsv1.Deployment {
replicas := int32(1)
selectorLabels := map[string]string{
monov1alpha1.NodeControlKey: "true",
"kubernetes.io/hostname": tVals.NodeName,
}
podLabels := mergeStringMaps(labels, selectorLabels)
runAsUser := int64(0)
runAsNonRoot := false
privileged := true
allowPrivilegeEscalation := true
readOnlyRootFilesystem := false
return &appsv1.Deployment{
TypeMeta: metav1.TypeMeta{
APIVersion: "apps/v1",
Kind: "Deployment",
},
ObjectMeta: metav1.ObjectMeta{
Name: sshdName,
Namespace: namespace,
Labels: labels,
},
Spec: appsv1.DeploymentSpec{
Replicas: &replicas,
Selector: &metav1.LabelSelector{
MatchLabels: selectorLabels,
},
Template: corev1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: podLabels,
},
Spec: corev1.PodSpec{
NodeSelector: selectorLabels,
Containers: []corev1.Container{
{
Name: sshdName,
Image: "alpine:latest",
Command: []string{
"/bin/sh",
"-ceu",
`
apk add --no-cache openssh-server
mkdir -p /run/sshd
mkdir -p /root/.ssh
cp /authorized-keys/authorized_keys /root/.ssh/authorized_keys
chmod 700 /root/.ssh
chmod 600 /root/.ssh/authorized_keys
ssh-keygen -A
exec /usr/sbin/sshd \
-D \
-e \
-p 22 \
-o PermitRootLogin=prohibit-password \
-o PasswordAuthentication=no \
-o KbdInteractiveAuthentication=no \
-o PubkeyAuthentication=yes \
-o AuthorizedKeysFile=/root/.ssh/authorized_keys
`,
},
Ports: []corev1.ContainerPort{
{
Name: "ssh",
ContainerPort: 22,
Protocol: corev1.ProtocolTCP,
},
},
SecurityContext: &corev1.SecurityContext{
RunAsUser: &runAsUser,
RunAsNonRoot: &runAsNonRoot,
Privileged: &privileged,
AllowPrivilegeEscalation: &allowPrivilegeEscalation,
ReadOnlyRootFilesystem: &readOnlyRootFilesystem,
},
Resources: corev1.ResourceRequirements{
Requests: corev1.ResourceList{
corev1.ResourceCPU: resource.MustParse("10m"),
corev1.ResourceMemory: resource.MustParse("32Mi"),
},
Limits: corev1.ResourceList{
corev1.ResourceCPU: resource.MustParse("200m"),
corev1.ResourceMemory: resource.MustParse("128Mi"),
},
},
VolumeMounts: []corev1.VolumeMount{
{
Name: "authorized-keys",
MountPath: "/authorized-keys",
ReadOnly: true,
},
{
Name: "host-etc",
MountPath: "/host/etc",
},
{
Name: "host-var",
MountPath: "/host/var",
},
},
},
},
Volumes: []corev1.Volume{
{
Name: "authorized-keys",
VolumeSource: corev1.VolumeSource{
ConfigMap: &corev1.ConfigMapVolumeSource{
LocalObjectReference: corev1.LocalObjectReference{
Name: sshdConfigName,
},
DefaultMode: ptrInt32(0600),
},
},
},
{
Name: "host-etc",
VolumeSource: corev1.VolumeSource{
HostPath: &corev1.HostPathVolumeSource{
Path: "/etc",
Type: ptrHostPathType(corev1.HostPathDirectory),
},
},
},
{
Name: "host-var",
VolumeSource: corev1.VolumeSource{
HostPath: &corev1.HostPathVolumeSource{
Path: "/var",
Type: ptrHostPathType(corev1.HostPathDirectory),
},
},
},
},
},
},
},
}
}
func ptrInt32(v int32) *int32 {
return &v
}
func ptrHostPathType(v corev1.HostPathType) *corev1.HostPathType {
return &v
}

View File

@@ -25,7 +25,7 @@ func DefaultMonoKSConfig(v TemplateValues) monov1alpha1.MonoKSConfig {
ClusterRole: v.ClusterRole,
InitControlPlane: v.InitControlPlane,
EnableControlAgent: v.EnableControlAgent,
EnableNodeControl: v.EnableNodeControl,
ClusterName: v.ClusterName,
ClusterDomain: v.ClusterDomain,

View File

@@ -24,9 +24,9 @@ type TemplateValues struct {
ContainerRuntimeEndpoint string
CNIPlugin string
ClusterRole string // worker, control-plane
InitControlPlane bool
EnableControlAgent bool
ClusterRole string // worker, control-plane
InitControlPlane bool
EnableNodeControl bool
AllowSchedulingOnControlPlane bool
SkipImageCheck bool
@@ -58,9 +58,9 @@ func defaultTemplateValues() TemplateValues {
ContainerRuntimeEndpoint: "unix:///var/run/crio/crio.sock",
CNIPlugin: "default",
ClusterRole: "control-plane",
InitControlPlane: true,
EnableControlAgent: true,
ClusterRole: "control-plane",
InitControlPlane: true,
EnableNodeControl: true,
AllowSchedulingOnControlPlane: true,
SkipImageCheck: false,
@@ -104,7 +104,7 @@ func LoadTemplateValuesFromEnv() TemplateValues {
v.ClusterRole = getenvDefault("MKS_CLUSTER_ROLE", v.ClusterRole)
v.InitControlPlane = getenvBoolDefault("MKS_INIT_CONTROL_PLANE", v.InitControlPlane)
v.EnableControlAgent = getenvBoolDefault("MKS_ENABLE_CONTROL_AGENT", v.EnableControlAgent)
v.EnableNodeControl = getenvBoolDefault("MKS_ENABLE_NODE_CONTROL", v.EnableNodeControl)
v.AllowSchedulingOnControlPlane = getenvBoolDefault("MKS_ALLOW_SCHEDULING_ON_CONTROL_PLANE", v.AllowSchedulingOnControlPlane)
v.SkipImageCheck = getenvBoolDefault("MKS_SKIP_IMAGE_CHECK", v.SkipImageCheck)