Renamed ControlAgent to NodeControl

This commit is contained in:
2026-04-25 04:38:23 +08:00
parent 1354e83813
commit 8fae920fc8
20 changed files with 404 additions and 73 deletions

View File

@@ -4,7 +4,7 @@ set -euo pipefail
/preload-k8s-images.sh || exit 1 /preload-k8s-images.sh || exit 1
export CTL_BIN_LAYER=$( skopeo inspect docker-daemon:localhost/monok8s/control-agent:dev | jq -r '.Layers[0] | sub("^sha256:"; "")' ) export CTL_BIN_LAYER=$( skopeo inspect docker-daemon:localhost/monok8s/node-control:dev | jq -r '.Layers[0] | sub("^sha256:"; "")' )
mkdir -p \ mkdir -p \
"$ROOTFS/dev" \ "$ROOTFS/dev" \

View File

@@ -26,7 +26,7 @@ FUSE_OVERLAYFS="${FUSE_OVERLAYFS:-/usr/bin/fuse-overlayfs}"
# ) # )
EXTRA_IMAGES=( EXTRA_IMAGES=(
"${EXTRA_IMAGES[@]:-}" "${EXTRA_IMAGES[@]:-}"
"docker-daemon:localhost/monok8s/control-agent:$TAG" "docker-daemon:localhost/monok8s/node-control:$TAG"
) )
# Keep archive cache version/arch scoped so downloads do not get mixed. # Keep archive cache version/arch scoped so downloads do not get mixed.

View File

@@ -125,7 +125,7 @@ build-agent: build uboot-tools
-f docker/ctl-agent.Dockerfile \ -f docker/ctl-agent.Dockerfile \
--build-arg VERSION=$(VERSION) \ --build-arg VERSION=$(VERSION) \
--load \ --load \
-t localhost/monok8s/control-agent:$(VERSION) . -t localhost/monok8s/node-control:$(VERSION) .
build-local: .buildinfo | $(BIN_DIR) build-local: .buildinfo | $(BIN_DIR)
docker buildx build \ docker buildx build \
@@ -138,13 +138,13 @@ build-local: .buildinfo | $(BIN_DIR)
run-agent: run-agent:
docker run --rm \ docker run --rm \
-v "$$(pwd)/out:/work/out" \ -v "$$(pwd)/out:/work/out" \
localhost/monok8s/control-agent:$(VERSION) \ localhost/monok8s/node-control:$(VERSION) \
agent --env-file /work/out/cluster.env agent --env-file /work/out/cluster.env
build: build-bin build-crds build: build-bin build-crds
clean: clean:
-docker image rm localhost/monok8s/control-agent:$(VERSION) >/dev/null 2>&1 || true -docker image rm localhost/monok8s/node-control:$(VERSION) >/dev/null 2>&1 || true
rm -rf \ rm -rf \
$(BIN_DIR) \ $(BIN_DIR) \
$(OUT_DIR)/crds \ $(OUT_DIR)/crds \
@@ -157,7 +157,7 @@ dockerclean:
@echo "Removing tagged images..." @echo "Removing tagged images..."
- docker rmi \ - docker rmi \
localhost/monok8s/ctl-build-base:$(VERSION) \ localhost/monok8s/ctl-build-base:$(VERSION) \
localhost/monok8s/control-agent:$(VERSION) \ localhost/monok8s/node-control:$(VERSION) \
localhost/monok8s/ctl-builder:$(VERSION) \ localhost/monok8s/ctl-builder:$(VERSION) \
localhost/monok8s/crdgen:$(VERSION) \ localhost/monok8s/crdgen:$(VERSION) \
2>/dev/null || true 2>/dev/null || true

View File

@@ -16,8 +16,10 @@ var (
AltPartDeviceLink = "/dev/mksaltpart" AltPartDeviceLink = "/dev/mksaltpart"
BootStateFile = "/run/monok8s/boot-state.env" BootStateFile = "/run/monok8s/boot-state.env"
CatalogURL = "https://example.com/monok8s.io/v1alpha1/catalog.yaml" CatalogURL = "https://example.com/monok8s.io/v1alpha1/catalog.yaml"
ControlAgentName = "control-agent" NodeControlKey = "monok8s.io/node-control"
ControlAgentKey = "monok8s.io/control-agent" NodeControlName = "node-control"
ControllerName = "node-controller"
NodeAgentName = "node-agent"
EnvConfigDir = "/opt/monok8s/config" EnvConfigDir = "/opt/monok8s/config"
Label = "monok8s.io/label" Label = "monok8s.io/label"
MonoKSConfigCRD = "monoksconfigs.monok8s.io" MonoKSConfigCRD = "monoksconfigs.monok8s.io"

View File

@@ -25,7 +25,7 @@ type MonoKSConfigSpec struct {
ClusterDomain string `json:"clusterDomain,omitempty" yaml:"clusterDomain,omitempty"` ClusterDomain string `json:"clusterDomain,omitempty" yaml:"clusterDomain,omitempty"`
ClusterRole string `json:"clusterRole,omitempty" yaml:"clusterRole,omitempty"` ClusterRole string `json:"clusterRole,omitempty" yaml:"clusterRole,omitempty"`
InitControlPlane bool `json:"initControlPlane,omitempty" yaml:"initControlPlane,omitempty"` InitControlPlane bool `json:"initControlPlane,omitempty" yaml:"initControlPlane,omitempty"`
EnableControlAgent bool `json:"enableControlAgent,omitempty" yaml:"enableControlAgent,omitempty"` EnableNodeControl bool `json:"enableNodeControl,omitempty" yaml:"enableNodeControl,omitempty"`
PodSubnet string `json:"podSubnet,omitempty" yaml:"podSubnet,omitempty"` PodSubnet string `json:"podSubnet,omitempty" yaml:"podSubnet,omitempty"`
ServiceSubnet string `json:"serviceSubnet,omitempty" yaml:"serviceSubnet,omitempty"` ServiceSubnet string `json:"serviceSubnet,omitempty" yaml:"serviceSubnet,omitempty"`
APIServerAdvertiseAddress string `json:"apiServerAdvertiseAddress,omitempty" yaml:"apiServerAdvertiseAddress,omitempty"` APIServerAdvertiseAddress string `json:"apiServerAdvertiseAddress,omitempty" yaml:"apiServerAdvertiseAddress,omitempty"`

View File

@@ -23,7 +23,7 @@ func NewRegistry(ctx *node.NodeContext) *Registry {
return &Registry{ return &Registry{
steps: map[string]node.Step{ steps: map[string]node.Step{
"ApplyControlAgentDaemonSetResources": node.ApplyControlAgentDaemonSetResources, "ApplyNodeControlDaemonSetResources": node.ApplyNodeControlDaemonSetResources,
"ApplyLocalNodeMetadataIfPossible": node.ApplyLocalNodeMetadataIfPossible, "ApplyLocalNodeMetadataIfPossible": node.ApplyLocalNodeMetadataIfPossible,
"CheckForVersionSkew": node.CheckForVersionSkew, "CheckForVersionSkew": node.CheckForVersionSkew,
"ClassifyBootstrapAction": node.ClassifyBootstrapAction, "ClassifyBootstrapAction": node.ClassifyBootstrapAction,

View File

@@ -158,7 +158,7 @@ func NewRunner(cfg *monov1alpha1.MonoKSConfig) *Runner {
Desc: "Make A/B booting possible", Desc: "Make A/B booting possible",
}, },
{ {
RegKey: "ApplyControlAgentDaemonSetResources", RegKey: "ApplyNodeControlDaemonSetResources",
Name: "Apply daemonset for control agent", Name: "Apply daemonset for control agent",
Desc: "Control agent handles OSUpgrade resources", Desc: "Control agent handles OSUpgrade resources",
}, },

View File

@@ -1,9 +1,11 @@
package create package create
import ( import (
"bytes"
"fmt" "fmt"
"github.com/spf13/cobra" "github.com/spf13/cobra"
"k8s.io/cli-runtime/pkg/genericclioptions" "k8s.io/cli-runtime/pkg/genericclioptions"
"os"
render "example.com/monok8s/pkg/render" render "example.com/monok8s/pkg/render"
) )
@@ -58,5 +60,52 @@ func NewCmdCreate(flags *genericclioptions.ConfigFlags) *cobra.Command {
}, },
}, },
) )
var authorizedKeysPath string
sshdcmd := cobra.Command{
Use: "sshd",
Short: "Print sshd deployment template",
RunE: func(cmd *cobra.Command, _ []string) error {
ns, _, err := flags.ToRawKubeConfigLoader().Namespace()
if err != nil {
return err
}
authorizedKeys, err := readAuthorizedKeysFile(authorizedKeysPath)
if err != nil {
return err
}
out, err := render.RenderSSHDDeployments(ns, authorizedKeys)
if err != nil {
return err
}
_, err = fmt.Fprint(cmd.OutOrStdout(), out)
return err
},
}
sshdcmd.Flags().StringVar(&authorizedKeysPath, "authkeys", "", "path to authorized_keys file")
cmd.AddCommand(&sshdcmd)
return cmd return cmd
} }
func readAuthorizedKeysFile(path string) (string, error) {
if path == "" {
return "", fmt.Errorf("--authkeys is required")
}
b, err := os.ReadFile(path)
if err != nil {
return "", fmt.Errorf("read authorized_keys file %q: %w", path, err)
}
if len(bytes.TrimSpace(b)) == 0 {
return "", fmt.Errorf("authorized_keys file %q is empty", path)
}
return string(b), nil
}

View File

@@ -268,7 +268,7 @@ func listTargetNodeNames(
osu *monov1alpha1.OSUpgrade, osu *monov1alpha1.OSUpgrade,
) ([]string, error) { ) ([]string, error) {
selector := labels.SelectorFromSet(labels.Set{ selector := labels.SelectorFromSet(labels.Set{
monov1alpha1.ControlAgentKey: "true", monov1alpha1.NodeControlKey: "true",
}) })
if osu.Spec.NodeSelector != nil { if osu.Spec.NodeSelector != nil {

View File

@@ -21,15 +21,15 @@ import (
const ( const (
controlAgentNodeSelectorValue = "true" controlAgentNodeSelectorValue = "true"
controlAgentImage = "localhost/monok8s/control-agent:dev" controlAgentImage = "localhost/monok8s/node-control:dev"
kubeconfig = "/etc/kubernetes/admin.conf" kubeconfig = "/etc/kubernetes/admin.conf"
) )
func ApplyControlAgentDaemonSetResources(ctx context.Context, n *NodeContext) error { func ApplyNodeControlDaemonSetResources(ctx context.Context, n *NodeContext) error {
// Only the control-plane should bootstrap this DaemonSet definition. // Only the control-plane should bootstrap this DaemonSet definition.
// And only when the feature is enabled. // And only when the feature is enabled.
if strings.TrimSpace(n.Config.Spec.ClusterRole) != "control-plane" || !n.Config.Spec.EnableControlAgent { if strings.TrimSpace(n.Config.Spec.ClusterRole) != "control-plane" || !n.Config.Spec.EnableNodeControl {
klog.InfoS("skipped for", "clusterRole", n.Config.Spec.ClusterRole, "enableControlAgent", n.Config.Spec.EnableControlAgent) klog.InfoS("skipped for", "clusterRole", n.Config.Spec.ClusterRole, "enableNodeAgent", n.Config.Spec.EnableNodeControl)
return nil return nil
} }
@@ -49,10 +49,10 @@ func ApplyControlAgentDaemonSetResources(ctx context.Context, n *NodeContext) er
} }
labels := map[string]string{ labels := map[string]string{
"app.kubernetes.io/name": monov1alpha1.ControlAgentName, "app.kubernetes.io/name": monov1alpha1.NodeAgentName,
"app.kubernetes.io/component": "agent", "app.kubernetes.io/component": "agent",
"app.kubernetes.io/part-of": "monok8s", "app.kubernetes.io/part-of": "monok8s",
"app.kubernetes.io/managed-by": "ctl", "app.kubernetes.io/managed-by": monov1alpha1.NodeControlName,
} }
kubeClient := clients.Kubernetes kubeClient := clients.Kubernetes
@@ -60,16 +60,16 @@ func ApplyControlAgentDaemonSetResources(ctx context.Context, n *NodeContext) er
if err := ensureNamespace(ctx, kubeClient, namespace, labels); err != nil { if err := ensureNamespace(ctx, kubeClient, namespace, labels); err != nil {
return fmt.Errorf("ensure namespace %q: %w", namespace, err) return fmt.Errorf("ensure namespace %q: %w", namespace, err)
} }
if err := applyControlAgentServiceAccount(ctx, kubeClient, namespace, labels); err != nil { if err := applyNodeAgentServiceAccount(ctx, kubeClient, namespace, labels); err != nil {
return fmt.Errorf("apply serviceaccount: %w", err) return fmt.Errorf("apply serviceaccount: %w", err)
} }
if err := applyControlAgentClusterRole(ctx, kubeClient, labels); err != nil { if err := applyNodeAgentClusterRole(ctx, kubeClient, labels); err != nil {
return fmt.Errorf("apply clusterrole: %w", err) return fmt.Errorf("apply clusterrole: %w", err)
} }
if err := applyControlAgentClusterRoleBinding(ctx, kubeClient, namespace, labels); err != nil { if err := applyNodeAgentClusterRoleBinding(ctx, kubeClient, namespace, labels); err != nil {
return fmt.Errorf("apply clusterrolebinding: %w", err) return fmt.Errorf("apply clusterrolebinding: %w", err)
} }
if err := applyControlAgentDaemonSet(ctx, kubeClient, namespace, labels); err != nil { if err := applyNodeAgentDaemonSet(ctx, kubeClient, namespace, labels); err != nil {
return fmt.Errorf("apply daemonset: %w", err) return fmt.Errorf("apply daemonset: %w", err)
} }
@@ -116,16 +116,16 @@ func copyStringMap(in map[string]string) map[string]string {
return out return out
} }
func applyControlAgentServiceAccount(ctx context.Context, kubeClient kubernetes.Interface, namespace string, labels map[string]string) error { func applyNodeAgentServiceAccount(ctx context.Context, kubeClient kubernetes.Interface, namespace string, labels map[string]string) error {
want := &corev1.ServiceAccount{ want := &corev1.ServiceAccount{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
Name: monov1alpha1.ControlAgentName, Name: monov1alpha1.NodeAgentName,
Namespace: namespace, Namespace: namespace,
Labels: labels, Labels: labels,
}, },
} }
existing, err := kubeClient.CoreV1().ServiceAccounts(namespace).Get(ctx, monov1alpha1.ControlAgentName, metav1.GetOptions{}) existing, err := kubeClient.CoreV1().ServiceAccounts(namespace).Get(ctx, monov1alpha1.NodeAgentName, metav1.GetOptions{})
if apierrors.IsNotFound(err) { if apierrors.IsNotFound(err) {
_, err = kubeClient.CoreV1().ServiceAccounts(namespace).Create(ctx, want, metav1.CreateOptions{}) _, err = kubeClient.CoreV1().ServiceAccounts(namespace).Create(ctx, want, metav1.CreateOptions{})
return err return err
@@ -148,7 +148,7 @@ func applyControlAgentServiceAccount(ctx context.Context, kubeClient kubernetes.
return err return err
} }
func applyControlAgentClusterRole(ctx context.Context, kubeClient kubernetes.Interface, labels map[string]string) error { func applyNodeAgentClusterRole(ctx context.Context, kubeClient kubernetes.Interface, labels map[string]string) error {
wantRules := []rbacv1.PolicyRule{ wantRules := []rbacv1.PolicyRule{
{ {
APIGroups: []string{monov1alpha1.Group}, APIGroups: []string{monov1alpha1.Group},
@@ -174,13 +174,13 @@ func applyControlAgentClusterRole(ctx context.Context, kubeClient kubernetes.Int
want := &rbacv1.ClusterRole{ want := &rbacv1.ClusterRole{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
Name: monov1alpha1.ControlAgentName, Name: monov1alpha1.NodeAgentName,
Labels: labels, Labels: labels,
}, },
Rules: wantRules, Rules: wantRules,
} }
existing, err := kubeClient.RbacV1().ClusterRoles().Get(ctx, monov1alpha1.ControlAgentName, metav1.GetOptions{}) existing, err := kubeClient.RbacV1().ClusterRoles().Get(ctx, monov1alpha1.NodeAgentName, metav1.GetOptions{})
if apierrors.IsNotFound(err) { if apierrors.IsNotFound(err) {
_, err = kubeClient.RbacV1().ClusterRoles().Create(ctx, want, metav1.CreateOptions{}) _, err = kubeClient.RbacV1().ClusterRoles().Create(ctx, want, metav1.CreateOptions{})
return err return err
@@ -207,30 +207,30 @@ func applyControlAgentClusterRole(ctx context.Context, kubeClient kubernetes.Int
return err return err
} }
func applyControlAgentClusterRoleBinding(ctx context.Context, kubeClient kubernetes.Interface, namespace string, labels map[string]string) error { func applyNodeAgentClusterRoleBinding(ctx context.Context, kubeClient kubernetes.Interface, namespace string, labels map[string]string) error {
wantRoleRef := rbacv1.RoleRef{ wantRoleRef := rbacv1.RoleRef{
APIGroup: rbacv1.GroupName, APIGroup: rbacv1.GroupName,
Kind: "ClusterRole", Kind: "ClusterRole",
Name: monov1alpha1.ControlAgentName, Name: monov1alpha1.NodeAgentName,
} }
wantSubjects := []rbacv1.Subject{ wantSubjects := []rbacv1.Subject{
{ {
Kind: "ServiceAccount", Kind: "ServiceAccount",
Name: monov1alpha1.ControlAgentName, Name: monov1alpha1.NodeAgentName,
Namespace: namespace, Namespace: namespace,
}, },
} }
want := &rbacv1.ClusterRoleBinding{ want := &rbacv1.ClusterRoleBinding{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
Name: monov1alpha1.ControlAgentName, Name: monov1alpha1.NodeAgentName,
Labels: labels, Labels: labels,
}, },
RoleRef: wantRoleRef, RoleRef: wantRoleRef,
Subjects: wantSubjects, Subjects: wantSubjects,
} }
existing, err := kubeClient.RbacV1().ClusterRoleBindings().Get(ctx, monov1alpha1.ControlAgentName, metav1.GetOptions{}) existing, err := kubeClient.RbacV1().ClusterRoleBindings().Get(ctx, monov1alpha1.NodeAgentName, metav1.GetOptions{})
if apierrors.IsNotFound(err) { if apierrors.IsNotFound(err) {
_, err = kubeClient.RbacV1().ClusterRoleBindings().Create(ctx, want, metav1.CreateOptions{}) _, err = kubeClient.RbacV1().ClusterRoleBindings().Create(ctx, want, metav1.CreateOptions{})
return err return err
@@ -241,7 +241,7 @@ func applyControlAgentClusterRoleBinding(ctx context.Context, kubeClient kuberne
// roleRef is immutable. If it differs, fail loudly instead of pretending we can patch it. // roleRef is immutable. If it differs, fail loudly instead of pretending we can patch it.
if !reflect.DeepEqual(existing.RoleRef, want.RoleRef) { if !reflect.DeepEqual(existing.RoleRef, want.RoleRef) {
return fmt.Errorf("existing ClusterRoleBinding %q has different roleRef and must be recreated", monov1alpha1.ControlAgentName) return fmt.Errorf("existing ClusterRoleBinding %q has different roleRef and must be recreated", monov1alpha1.NodeAgentName)
} }
changed := false changed := false
@@ -262,26 +262,26 @@ func applyControlAgentClusterRoleBinding(ctx context.Context, kubeClient kuberne
return err return err
} }
func applyControlAgentDaemonSet(ctx context.Context, kubeClient kubernetes.Interface, namespace string, labels map[string]string) error { func applyNodeAgentDaemonSet(ctx context.Context, kubeClient kubernetes.Interface, namespace string, labels map[string]string) error {
privileged := true privileged := true
dsLabels := map[string]string{ dsLabels := map[string]string{
"app.kubernetes.io/name": monov1alpha1.ControlAgentName, "app.kubernetes.io/name": monov1alpha1.NodeAgentName,
"app.kubernetes.io/component": "agent", "app.kubernetes.io/component": "agent",
"app.kubernetes.io/part-of": "monok8s", "app.kubernetes.io/part-of": "monok8s",
"app.kubernetes.io/managed-by": "ctl", "app.kubernetes.io/managed-by": monov1alpha1.NodeControlName,
} }
want := &appsv1.DaemonSet{ want := &appsv1.DaemonSet{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
Name: monov1alpha1.ControlAgentName, Name: monov1alpha1.NodeAgentName,
Namespace: namespace, Namespace: namespace,
Labels: labels, Labels: labels,
}, },
Spec: appsv1.DaemonSetSpec{ Spec: appsv1.DaemonSetSpec{
Selector: &metav1.LabelSelector{ Selector: &metav1.LabelSelector{
MatchLabels: map[string]string{ MatchLabels: map[string]string{
"app.kubernetes.io/name": monov1alpha1.ControlAgentName, "app.kubernetes.io/name": monov1alpha1.NodeAgentName,
}, },
}, },
Template: corev1.PodTemplateSpec{ Template: corev1.PodTemplateSpec{
@@ -289,12 +289,12 @@ func applyControlAgentDaemonSet(ctx context.Context, kubeClient kubernetes.Inter
Labels: dsLabels, Labels: dsLabels,
}, },
Spec: corev1.PodSpec{ Spec: corev1.PodSpec{
ServiceAccountName: monov1alpha1.ControlAgentName, ServiceAccountName: monov1alpha1.NodeAgentName,
HostNetwork: true, HostNetwork: true,
HostPID: true, HostPID: true,
DNSPolicy: corev1.DNSClusterFirstWithHostNet, DNSPolicy: corev1.DNSClusterFirstWithHostNet,
NodeSelector: map[string]string{ NodeSelector: map[string]string{
monov1alpha1.ControlAgentKey: controlAgentNodeSelectorValue, monov1alpha1.NodeControlKey: controlAgentNodeSelectorValue,
}, },
Tolerations: []corev1.Toleration{ Tolerations: []corev1.Toleration{
{Operator: corev1.TolerationOpExists}, {Operator: corev1.TolerationOpExists},
@@ -379,7 +379,7 @@ func applyControlAgentDaemonSet(ctx context.Context, kubeClient kubernetes.Inter
}, },
} }
existing, err := kubeClient.AppsV1().DaemonSets(namespace).Get(ctx, monov1alpha1.ControlAgentName, metav1.GetOptions{}) existing, err := kubeClient.AppsV1().DaemonSets(namespace).Get(ctx, monov1alpha1.NodeAgentName, metav1.GetOptions{})
if apierrors.IsNotFound(err) { if apierrors.IsNotFound(err) {
_, err = kubeClient.AppsV1().DaemonSets(namespace).Create(ctx, want, metav1.CreateOptions{}) _, err = kubeClient.AppsV1().DaemonSets(namespace).Create(ctx, want, metav1.CreateOptions{})
return err return err

View File

@@ -476,7 +476,7 @@ func buildNodeRegistration(spec monov1alpha1.MonoKSConfigSpec) NodeRegistrationO
} }
func effectiveNodeLabels(spec monov1alpha1.MonoKSConfigSpec) map[string]string { func effectiveNodeLabels(spec monov1alpha1.MonoKSConfigSpec) map[string]string {
if len(spec.NodeLabels) == 0 && !spec.EnableControlAgent { if len(spec.NodeLabels) == 0 && !spec.EnableNodeControl {
return nil return nil
} }
@@ -485,8 +485,8 @@ func effectiveNodeLabels(spec monov1alpha1.MonoKSConfigSpec) map[string]string {
labels[k] = v labels[k] = v
} }
if spec.EnableControlAgent { if spec.EnableNodeControl {
labels[monov1alpha1.ControlAgentKey] = "true" labels[monov1alpha1.NodeControlKey] = "true"
} }
return labels return labels

View File

@@ -14,6 +14,7 @@ import (
"k8s.io/client-go/kubernetes" "k8s.io/client-go/kubernetes"
"k8s.io/klog/v2" "k8s.io/klog/v2"
monov1alpha1 "example.com/monok8s/pkg/apis/monok8s/v1alpha1"
"example.com/monok8s/pkg/kube" "example.com/monok8s/pkg/kube"
"example.com/monok8s/pkg/system" "example.com/monok8s/pkg/system"
) )
@@ -113,7 +114,7 @@ func runUpgradeSelfHealthCheck(ctx context.Context, kubeClient kubernetes.Interf
Namespace: healthCheckNamespace, Namespace: healthCheckNamespace,
Labels: map[string]string{ Labels: map[string]string{
"app.kubernetes.io/name": "preupgrade-health-check", "app.kubernetes.io/name": "preupgrade-health-check",
"app.kubernetes.io/managed-by": "monok8s", "app.kubernetes.io/managed-by": monov1alpha1.NodeControlName,
}, },
}, },
Spec: corev1.PodSpec{ Spec: corev1.PodSpec{

View File

@@ -60,8 +60,8 @@ func ApplyLocalNodeMetadataIfPossible(ctx context.Context, nctx *NodeContext) er
} }
// Additional Labels // Additional Labels
if spec.EnableControlAgent { if spec.EnableNodeControl {
node.Labels[monov1alpah1.ControlAgentKey] = controlAgentNodeSelectorValue node.Labels[monov1alpah1.NodeControlKey] = controlAgentNodeSelectorValue
} }
_, err = client.CoreV1().Nodes().Update(ctx, node, metav1.UpdateOptions{}) _, err = client.CoreV1().Nodes().Update(ctx, node, metav1.UpdateOptions{})

View File

@@ -13,6 +13,7 @@ import (
"k8s.io/apimachinery/pkg/util/intstr" "k8s.io/apimachinery/pkg/util/intstr"
monov1alpha1 "example.com/monok8s/pkg/apis/monok8s/v1alpha1" monov1alpha1 "example.com/monok8s/pkg/apis/monok8s/v1alpha1"
buildinfo "example.com/monok8s/pkg/buildinfo"
templates "example.com/monok8s/pkg/templates" templates "example.com/monok8s/pkg/templates"
) )
@@ -20,10 +21,10 @@ func RenderControllerDeployments(namespace string) (string, error) {
vals := templates.LoadTemplateValuesFromEnv() vals := templates.LoadTemplateValuesFromEnv()
labels := map[string]string{ labels := map[string]string{
"app.kubernetes.io/name": monov1alpha1.ControlAgentName, "app.kubernetes.io/name": monov1alpha1.ControllerName,
"app.kubernetes.io/component": "controller", "app.kubernetes.io/component": "controller",
"app.kubernetes.io/part-of": "monok8s", "app.kubernetes.io/part-of": "monok8s",
"app.kubernetes.io/managed-by": "ctl", "app.kubernetes.io/managed-by": monov1alpha1.NodeControlName,
} }
objs := []runtime.Object{ objs := []runtime.Object{
@@ -66,7 +67,7 @@ func buildControllerServiceAccount(namespace string, labels map[string]string) *
Kind: "ServiceAccount", Kind: "ServiceAccount",
}, },
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
Name: monov1alpha1.ControlAgentName, Name: monov1alpha1.ControllerName,
Namespace: namespace, Namespace: namespace,
Labels: labels, Labels: labels,
}, },
@@ -109,7 +110,7 @@ func buildControllerClusterRole(labels map[string]string) *rbacv1.ClusterRole {
Kind: "ClusterRole", Kind: "ClusterRole",
}, },
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
Name: monov1alpha1.ControlAgentName, Name: monov1alpha1.ControllerName,
Labels: labels, Labels: labels,
}, },
Rules: wantRules, Rules: wantRules,
@@ -121,7 +122,7 @@ func buildControllerClusterRoleBinding(namespace string, labels map[string]strin
wantSubjects := []rbacv1.Subject{ wantSubjects := []rbacv1.Subject{
{ {
Kind: "ServiceAccount", Kind: "ServiceAccount",
Name: monov1alpha1.ControlAgentName, Name: monov1alpha1.ControllerName,
Namespace: namespace, Namespace: namespace,
}, },
} }
@@ -129,7 +130,7 @@ func buildControllerClusterRoleBinding(namespace string, labels map[string]strin
wantRoleRef := rbacv1.RoleRef{ wantRoleRef := rbacv1.RoleRef{
APIGroup: rbacv1.GroupName, APIGroup: rbacv1.GroupName,
Kind: "ClusterRole", Kind: "ClusterRole",
Name: monov1alpha1.ControlAgentName, Name: monov1alpha1.ControllerName,
} }
return &rbacv1.ClusterRoleBinding{ return &rbacv1.ClusterRoleBinding{
@@ -138,7 +139,7 @@ func buildControllerClusterRoleBinding(namespace string, labels map[string]strin
Kind: "ClusterRoleBinding", Kind: "ClusterRoleBinding",
}, },
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
Name: monov1alpha1.ControlAgentName, Name: monov1alpha1.ControllerName,
Labels: labels, Labels: labels,
}, },
Subjects: wantSubjects, Subjects: wantSubjects,
@@ -150,7 +151,7 @@ func buildControllerDeployment(tVals templates.TemplateValues, namespace string,
replicas := int32(1) replicas := int32(1)
selectorLabels := map[string]string{ selectorLabels := map[string]string{
"app.kubernetes.io/name": monov1alpha1.ControlAgentName, "app.kubernetes.io/name": monov1alpha1.ControllerName,
"app.kubernetes.io/component": "controller", "app.kubernetes.io/component": "controller",
} }
@@ -165,7 +166,7 @@ func buildControllerDeployment(tVals templates.TemplateValues, namespace string,
Kind: "Deployment", Kind: "Deployment",
}, },
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
Name: monov1alpha1.ControlAgentName, Name: monov1alpha1.ControllerName,
Namespace: namespace, Namespace: namespace,
Labels: labels, Labels: labels,
}, },
@@ -179,11 +180,11 @@ func buildControllerDeployment(tVals templates.TemplateValues, namespace string,
Labels: podLabels, Labels: podLabels,
}, },
Spec: corev1.PodSpec{ Spec: corev1.PodSpec{
ServiceAccountName: monov1alpha1.ControlAgentName, ServiceAccountName: monov1alpha1.ControllerName,
Containers: []corev1.Container{ Containers: []corev1.Container{
{ {
Name: "controller", Name: "controller",
Image: fmt.Sprintf("registry.local/control-agent:%s", tVals.KubernetesVersion), Image: fmt.Sprintf("localhost/monok8s/node-control:%s", buildinfo.Version),
ImagePullPolicy: corev1.PullIfNotPresent, ImagePullPolicy: corev1.PullIfNotPresent,
Args: []string{ Args: []string{
"controller", "controller",

278
clitools/pkg/render/sshd.go Normal file
View File

@@ -0,0 +1,278 @@
package render
import (
"bytes"
"fmt"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
rbacv1 "k8s.io/api/rbac/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/serializer/json"
"k8s.io/apimachinery/pkg/util/intstr"
monov1alpha1 "example.com/monok8s/pkg/apis/monok8s/v1alpha1"
"example.com/monok8s/pkg/templates"
)
const (
sshdName = "sshd"
sshdConfigName = "sshd-authorized-keys"
sshdNodePort = int32(30022)
)
func RenderSSHDDeployments(namespace, authKeys string) (string, error) {
vals := templates.LoadTemplateValuesFromEnv()
labels := map[string]string{
"app.kubernetes.io/name": sshdName,
"app.kubernetes.io/component": "host-access",
"app.kubernetes.io/part-of": "monok8s",
"app.kubernetes.io/managed-by": monov1alpha1.NodeControlName,
}
objs := []runtime.Object{
buildSSHDConfigMap(authKeys, namespace, labels),
buildSSHDService(vals, namespace, labels),
buildSSHDDeployment(vals, namespace, labels),
}
s := runtime.NewScheme()
_ = corev1.AddToScheme(s)
_ = rbacv1.AddToScheme(s)
_ = appsv1.AddToScheme(s)
serializer := json.NewYAMLSerializer(json.DefaultMetaFactory, s, s)
var buf bytes.Buffer
for i, obj := range objs {
if i > 0 {
if _, err := fmt.Fprintln(&buf, "---"); err != nil {
return "", err
}
}
if err := serializer.Encode(obj, &buf); err != nil {
return "", err
}
}
return buf.String(), nil
}
func buildSSHDConfigMap(
authorizedKeys string,
namespace string,
labels map[string]string,
) *corev1.ConfigMap {
return &corev1.ConfigMap{
TypeMeta: metav1.TypeMeta{
APIVersion: "v1",
Kind: "ConfigMap",
},
ObjectMeta: metav1.ObjectMeta{
Name: sshdConfigName,
Namespace: namespace,
Labels: labels,
},
Data: map[string]string{
"authorized_keys": authorizedKeys,
},
}
}
func buildSSHDService(
tVals templates.TemplateValues,
namespace string,
labels map[string]string,
) *corev1.Service {
selectorLabels := map[string]string{
monov1alpha1.NodeControlKey: "true",
"kubernetes.io/hostname": tVals.NodeName,
}
return &corev1.Service{
TypeMeta: metav1.TypeMeta{
APIVersion: "v1",
Kind: "Service",
},
ObjectMeta: metav1.ObjectMeta{
Name: sshdName,
Namespace: namespace,
Labels: labels,
},
Spec: corev1.ServiceSpec{
Type: corev1.ServiceTypeNodePort,
Selector: selectorLabels,
Ports: []corev1.ServicePort{
{
Name: "ssh",
Protocol: corev1.ProtocolTCP,
Port: 22,
TargetPort: intstr.FromInt32(22),
NodePort: sshdNodePort,
},
},
},
}
}
func buildSSHDDeployment(
tVals templates.TemplateValues,
namespace string,
labels map[string]string,
) *appsv1.Deployment {
replicas := int32(1)
selectorLabels := map[string]string{
monov1alpha1.NodeControlKey: "true",
"kubernetes.io/hostname": tVals.NodeName,
}
podLabels := mergeStringMaps(labels, selectorLabels)
runAsUser := int64(0)
runAsNonRoot := false
privileged := true
allowPrivilegeEscalation := true
readOnlyRootFilesystem := false
return &appsv1.Deployment{
TypeMeta: metav1.TypeMeta{
APIVersion: "apps/v1",
Kind: "Deployment",
},
ObjectMeta: metav1.ObjectMeta{
Name: sshdName,
Namespace: namespace,
Labels: labels,
},
Spec: appsv1.DeploymentSpec{
Replicas: &replicas,
Selector: &metav1.LabelSelector{
MatchLabels: selectorLabels,
},
Template: corev1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: podLabels,
},
Spec: corev1.PodSpec{
NodeSelector: selectorLabels,
Containers: []corev1.Container{
{
Name: sshdName,
Image: "alpine:latest",
Command: []string{
"/bin/sh",
"-ceu",
`
apk add --no-cache openssh-server
mkdir -p /run/sshd
mkdir -p /root/.ssh
cp /authorized-keys/authorized_keys /root/.ssh/authorized_keys
chmod 700 /root/.ssh
chmod 600 /root/.ssh/authorized_keys
ssh-keygen -A
exec /usr/sbin/sshd \
-D \
-e \
-p 22 \
-o PermitRootLogin=prohibit-password \
-o PasswordAuthentication=no \
-o KbdInteractiveAuthentication=no \
-o PubkeyAuthentication=yes \
-o AuthorizedKeysFile=/root/.ssh/authorized_keys
`,
},
Ports: []corev1.ContainerPort{
{
Name: "ssh",
ContainerPort: 22,
Protocol: corev1.ProtocolTCP,
},
},
SecurityContext: &corev1.SecurityContext{
RunAsUser: &runAsUser,
RunAsNonRoot: &runAsNonRoot,
Privileged: &privileged,
AllowPrivilegeEscalation: &allowPrivilegeEscalation,
ReadOnlyRootFilesystem: &readOnlyRootFilesystem,
},
Resources: corev1.ResourceRequirements{
Requests: corev1.ResourceList{
corev1.ResourceCPU: resource.MustParse("10m"),
corev1.ResourceMemory: resource.MustParse("32Mi"),
},
Limits: corev1.ResourceList{
corev1.ResourceCPU: resource.MustParse("200m"),
corev1.ResourceMemory: resource.MustParse("128Mi"),
},
},
VolumeMounts: []corev1.VolumeMount{
{
Name: "authorized-keys",
MountPath: "/authorized-keys",
ReadOnly: true,
},
{
Name: "host-etc",
MountPath: "/host/etc",
},
{
Name: "host-var",
MountPath: "/host/var",
},
},
},
},
Volumes: []corev1.Volume{
{
Name: "authorized-keys",
VolumeSource: corev1.VolumeSource{
ConfigMap: &corev1.ConfigMapVolumeSource{
LocalObjectReference: corev1.LocalObjectReference{
Name: sshdConfigName,
},
DefaultMode: ptrInt32(0600),
},
},
},
{
Name: "host-etc",
VolumeSource: corev1.VolumeSource{
HostPath: &corev1.HostPathVolumeSource{
Path: "/etc",
Type: ptrHostPathType(corev1.HostPathDirectory),
},
},
},
{
Name: "host-var",
VolumeSource: corev1.VolumeSource{
HostPath: &corev1.HostPathVolumeSource{
Path: "/var",
Type: ptrHostPathType(corev1.HostPathDirectory),
},
},
},
},
},
},
},
}
}
func ptrInt32(v int32) *int32 {
return &v
}
func ptrHostPathType(v corev1.HostPathType) *corev1.HostPathType {
return &v
}

View File

@@ -25,7 +25,7 @@ func DefaultMonoKSConfig(v TemplateValues) monov1alpha1.MonoKSConfig {
ClusterRole: v.ClusterRole, ClusterRole: v.ClusterRole,
InitControlPlane: v.InitControlPlane, InitControlPlane: v.InitControlPlane,
EnableControlAgent: v.EnableControlAgent, EnableNodeControl: v.EnableNodeControl,
ClusterName: v.ClusterName, ClusterName: v.ClusterName,
ClusterDomain: v.ClusterDomain, ClusterDomain: v.ClusterDomain,

View File

@@ -26,7 +26,7 @@ type TemplateValues struct {
ClusterRole string // worker, control-plane ClusterRole string // worker, control-plane
InitControlPlane bool InitControlPlane bool
EnableControlAgent bool EnableNodeControl bool
AllowSchedulingOnControlPlane bool AllowSchedulingOnControlPlane bool
SkipImageCheck bool SkipImageCheck bool
@@ -60,7 +60,7 @@ func defaultTemplateValues() TemplateValues {
ClusterRole: "control-plane", ClusterRole: "control-plane",
InitControlPlane: true, InitControlPlane: true,
EnableControlAgent: true, EnableNodeControl: true,
AllowSchedulingOnControlPlane: true, AllowSchedulingOnControlPlane: true,
SkipImageCheck: false, SkipImageCheck: false,
@@ -104,7 +104,7 @@ func LoadTemplateValuesFromEnv() TemplateValues {
v.ClusterRole = getenvDefault("MKS_CLUSTER_ROLE", v.ClusterRole) v.ClusterRole = getenvDefault("MKS_CLUSTER_ROLE", v.ClusterRole)
v.InitControlPlane = getenvBoolDefault("MKS_INIT_CONTROL_PLANE", v.InitControlPlane) v.InitControlPlane = getenvBoolDefault("MKS_INIT_CONTROL_PLANE", v.InitControlPlane)
v.EnableControlAgent = getenvBoolDefault("MKS_ENABLE_CONTROL_AGENT", v.EnableControlAgent) v.EnableNodeControl = getenvBoolDefault("MKS_ENABLE_NODE_CONTROL", v.EnableNodeControl)
v.AllowSchedulingOnControlPlane = getenvBoolDefault("MKS_ALLOW_SCHEDULING_ON_CONTROL_PLANE", v.AllowSchedulingOnControlPlane) v.AllowSchedulingOnControlPlane = getenvBoolDefault("MKS_ALLOW_SCHEDULING_ON_CONTROL_PLANE", v.AllowSchedulingOnControlPlane)
v.SkipImageCheck = getenvBoolDefault("MKS_SKIP_IMAGE_CHECK", v.SkipImageCheck) v.SkipImageCheck = getenvBoolDefault("MKS_SKIP_IMAGE_CHECK", v.SkipImageCheck)

View File

@@ -30,8 +30,8 @@ MKS_CLUSTER_DOMAIN=cluster.local
MKS_CLUSTER_ROLE=control-plane MKS_CLUSTER_ROLE=control-plane
MKS_INIT_CONTROL_PLANE=yes MKS_INIT_CONTROL_PLANE=yes
# OSUpgrade agent # Enable if you want OTA OSUpgrade
MKS_ENABLE_CONTROL_AGENT=yes MKS_ENABLE_NODE_CONTROL=yes
# Boot configs # Boot configs
# usb, emmc # usb, emmc

View File

@@ -49,7 +49,7 @@ catalog:
``` ```
※ ConfigMap requires additional RBAC permissions which is not enabled by default. You can edit ※ ConfigMap requires additional RBAC permissions which is not enabled by default. You can edit
the control-agent's ClusterRole and add `configmaps: get` to allow this. the node-agent's ClusterRole and add `configmaps: get` to allow this.
Contents should look like this Contents should look like this
```yaml ```yaml

View File

@@ -20,11 +20,11 @@ run bootusb
## Run fw_printenv and fw_setenv from kubectl ## Run fw_printenv and fw_setenv from kubectl
``` ```
# Avoid using daemonset/control-agent if you have multiple nodes # Avoid using daemonset/node-agent if you have multiple nodes
kubectl exec -n kube-system control-agent-abcdef1 -- /ctl internal fw-setenv --key foo --value bar kubectl exec -n kube-system node-agent-abcdef1 -- /ctl internal fw-setenv --key foo --value bar
# fw_printenv # fw_printenv
kubectl exec -n kube-system ds/control-agent -- /ctl internal fw-printenv --key foo kubectl exec -n kube-system ds/node-agent -- /ctl internal fw-printenv --key foo
``` ```
## Original uboot env from mono ## Original uboot env from mono