From 145b0a4662ce314d3fe4d7122acb38f16f448de295104e169440a805a9d13463 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E6=96=9F=E9=85=8C=20=E9=B5=AC=E5=85=84?= Date: Wed, 1 Apr 2026 22:55:22 +0800 Subject: [PATCH] Added some initial daemonsets --- alpine/install-packages.sh | 2 +- clitools/pkg/apis/monok8s/v1alpha1/types.go | 1 + clitools/pkg/bootstrap/registry.go | 1 + clitools/pkg/bootstrap/runner.go | 5 + clitools/pkg/node/agent.go | 383 +++++++++++++++++++- clitools/pkg/node/metadata.go | 6 + clitools/pkg/templates/templates.go | 5 +- clitools/pkg/templates/values.go | 13 +- configs/cluster.env.default | 7 +- docker/alpine.Dockerfile | 1 - docker/initramfs.Dockerfile | 5 +- makefile | 2 +- 12 files changed, 415 insertions(+), 16 deletions(-) diff --git a/alpine/install-packages.sh b/alpine/install-packages.sh index 8c9ba6c..4a5af20 100755 --- a/alpine/install-packages.sh +++ b/alpine/install-packages.sh @@ -10,7 +10,7 @@ apk add alpine-base \ # For diagnotics apk add \ iproute2 iproute2-ss curl bind-tools procps strace tcpdump lsof jq binutils \ - openssl conntrack-tools ethtool findmnt kmod coreutils util-linux zstd + openssl conntrack-tools ethtool findmnt kmod coreutils util-linux zstd libcap-utils echo '[ -x /bin/bash ] && exec /bin/bash -l' >> "/root/.profile" # Compat layer for kubelet for now. Will look into building it myself later. If needed diff --git a/clitools/pkg/apis/monok8s/v1alpha1/types.go b/clitools/pkg/apis/monok8s/v1alpha1/types.go index 24e389f..9ee1a69 100644 --- a/clitools/pkg/apis/monok8s/v1alpha1/types.go +++ b/clitools/pkg/apis/monok8s/v1alpha1/types.go @@ -37,6 +37,7 @@ type MonoKSConfigSpec struct { ClusterDomain string `json:"clusterDomain,omitempty" yaml:"clusterDomain,omitempty"` ClusterRole string `json:"clusterRole,omitempty" yaml:"clusterRole,omitempty"` InitControlPlane bool `json:"initControlPlane,omitempty" yaml:"initControlPlane,omitempty"` + EnableControlAgent bool `json:"enableControlAgent,omitempty" yaml:"enableControlAgent,omitempty"` PodSubnet string `json:"podSubnet,omitempty" yaml:"podSubnet,omitempty"` ServiceSubnet string `json:"serviceSubnet,omitempty" yaml:"serviceSubnet,omitempty"` APIServerAdvertiseAddress string `json:"apiServerAdvertiseAddress,omitempty" yaml:"apiServerAdvertiseAddress,omitempty"` diff --git a/clitools/pkg/bootstrap/registry.go b/clitools/pkg/bootstrap/registry.go index 9a64cbe..310658b 100644 --- a/clitools/pkg/bootstrap/registry.go +++ b/clitools/pkg/bootstrap/registry.go @@ -22,6 +22,7 @@ func NewRegistry(ctx *node.NodeContext) *Registry { return &Registry{ steps: map[string]node.Step{ + "ApplyControlAgentDaemonSetResources": node.ApplyControlAgentDaemonSetResources, "ApplyLocalNodeMetadataIfPossible": node.ApplyLocalNodeMetadataIfPossible, "CheckForVersionSkew": node.CheckForVersionSkew, "ClassifyBootstrapAction": node.ClassifyBootstrapAction, diff --git a/clitools/pkg/bootstrap/runner.go b/clitools/pkg/bootstrap/runner.go index 4d4606f..9c42c66 100644 --- a/clitools/pkg/bootstrap/runner.go +++ b/clitools/pkg/bootstrap/runner.go @@ -132,6 +132,11 @@ func NewRunner(cfg *monov1alpha1.MonoKSConfig) *Runner { Name: "Apply node metadata", Desc: "Apply labels/annotations to the local node if API server is reachable", }, + { + RegKey: "ApplyControlAgentDaemonSetResources", + Name: "Apply daemonset for control agent", + Desc: "Control agent handles OSUpgrade resources", + }, }, } } diff --git a/clitools/pkg/node/agent.go b/clitools/pkg/node/agent.go index b2db5a9..0fd4706 100644 --- a/clitools/pkg/node/agent.go +++ b/clitools/pkg/node/agent.go @@ -3,22 +3,35 @@ package node import ( "context" "fmt" + "reflect" + "strings" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + rbacv1 "k8s.io/api/rbac/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes" "k8s.io/klog/v2" "undecided.project/monok8s/pkg/crds" "undecided.project/monok8s/pkg/kube" ) +const ( + controlAgentName = "control-agent" + controlAgentDefaultNamespace = "kube-system" + controlAgentNodeSelectorKey = "monok8s.io/control-agent" + controlAgentNodeSelectorValue = "true" + controlAgentImage = "localhost/monok8s/control-agent:dev" + kubeconfig = "/etc/kubernetes/admin.conf" +) + func ApplyCRDs(ctx context.Context, n *NodeContext) error { if n.Config.Spec.ClusterRole != "control-plane" { return nil } - const kubeconfig = "/etc/kubernetes/admin.conf" - clients, err := kube.NewClientsFromKubeconfig(kubeconfig) if err != nil { return fmt.Errorf("build kube clients from %s: %w", kubeconfig, err) @@ -55,3 +68,369 @@ func ApplyCRDs(ctx context.Context, n *NodeContext) error { return nil } + +func ApplyControlAgentDaemonSetResources(ctx context.Context, n *NodeContext) error { + // Only the control-plane should bootstrap this DaemonSet definition. + // And only when the feature is enabled. + if strings.TrimSpace(n.Config.Spec.ClusterRole) != "control-plane" || !n.Config.Spec.EnableControlAgent { + klog.InfoS("skipped for", "clusterRole", n.Config.Spec.ClusterRole, "enableControlAgent", n.Config.Spec.EnableControlAgent) + return nil + } + + err := ApplyCRDs(ctx, n) + if err != nil { + return err + } + + namespace := strings.TrimSpace(n.Config.Namespace) + if namespace == "" { + namespace = controlAgentDefaultNamespace + } + + clients, err := kube.NewClientsFromKubeconfig(kubeconfig) + if err != nil { + return fmt.Errorf("build kube clients from %s: %w", kubeconfig, err) + } + + labels := map[string]string{ + "app.kubernetes.io/name": controlAgentName, + "app.kubernetes.io/component": "agent", + "app.kubernetes.io/part-of": "monok8s", + "app.kubernetes.io/managed-by": "ctl", + } + + kubeClient := clients.Kubernetes + + if err := applyControlAgentServiceAccount(ctx, kubeClient, namespace, labels); err != nil { + return fmt.Errorf("apply serviceaccount: %w", err) + } + if err := applyControlAgentClusterRole(ctx, kubeClient, labels); err != nil { + return fmt.Errorf("apply clusterrole: %w", err) + } + if err := applyControlAgentClusterRoleBinding(ctx, kubeClient, namespace, labels); err != nil { + return fmt.Errorf("apply clusterrolebinding: %w", err) + } + if err := applyControlAgentDaemonSet(ctx, kubeClient, namespace, labels); err != nil { + return fmt.Errorf("apply daemonset: %w", err) + } + + return nil +} + +func applyControlAgentServiceAccount(ctx context.Context, kubeClient kubernetes.Interface, namespace string, labels map[string]string) error { + want := &corev1.ServiceAccount{ + ObjectMeta: metav1.ObjectMeta{ + Name: controlAgentName, + Namespace: namespace, + Labels: labels, + }, + } + + existing, err := kubeClient.CoreV1().ServiceAccounts(namespace).Get(ctx, controlAgentName, metav1.GetOptions{}) + if apierrors.IsNotFound(err) { + _, err = kubeClient.CoreV1().ServiceAccounts(namespace).Create(ctx, want, metav1.CreateOptions{}) + return err + } + if err != nil { + return err + } + + changed := false + if !reflect.DeepEqual(existing.Labels, want.Labels) { + existing.Labels = want.Labels + changed = true + } + + if !changed { + return nil + } + + _, err = kubeClient.CoreV1().ServiceAccounts(namespace).Update(ctx, existing, metav1.UpdateOptions{}) + return err +} + +func applyControlAgentClusterRole(ctx context.Context, kubeClient kubernetes.Interface, labels map[string]string) error { + wantRules := []rbacv1.PolicyRule{ + { + APIGroups: []string{"monok8s.io"}, + Resources: []string{"osupgrades"}, + Verbs: []string{"get", "list", "watch"}, + }, + { + APIGroups: []string{"monok8s.io"}, + Resources: []string{"osupgrades/status"}, + Verbs: []string{"get", "patch", "update"}, + }, + { + APIGroups: []string{""}, + Resources: []string{"nodes"}, + Verbs: []string{"get", "list", "watch"}, + }, + } + + want := &rbacv1.ClusterRole{ + ObjectMeta: metav1.ObjectMeta{ + Name: controlAgentName, + Labels: labels, + }, + Rules: wantRules, + } + + existing, err := kubeClient.RbacV1().ClusterRoles().Get(ctx, controlAgentName, metav1.GetOptions{}) + if apierrors.IsNotFound(err) { + _, err = kubeClient.RbacV1().ClusterRoles().Create(ctx, want, metav1.CreateOptions{}) + return err + } + if err != nil { + return err + } + + changed := false + if !reflect.DeepEqual(existing.Labels, want.Labels) { + existing.Labels = want.Labels + changed = true + } + if !reflect.DeepEqual(existing.Rules, want.Rules) { + existing.Rules = want.Rules + changed = true + } + + if !changed { + return nil + } + + _, err = kubeClient.RbacV1().ClusterRoles().Update(ctx, existing, metav1.UpdateOptions{}) + return err +} + +func applyControlAgentClusterRoleBinding(ctx context.Context, kubeClient kubernetes.Interface, namespace string, labels map[string]string) error { + wantRoleRef := rbacv1.RoleRef{ + APIGroup: rbacv1.GroupName, + Kind: "ClusterRole", + Name: controlAgentName, + } + wantSubjects := []rbacv1.Subject{ + { + Kind: "ServiceAccount", + Name: controlAgentName, + Namespace: namespace, + }, + } + + want := &rbacv1.ClusterRoleBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: controlAgentName, + Labels: labels, + }, + RoleRef: wantRoleRef, + Subjects: wantSubjects, + } + + existing, err := kubeClient.RbacV1().ClusterRoleBindings().Get(ctx, controlAgentName, metav1.GetOptions{}) + if apierrors.IsNotFound(err) { + _, err = kubeClient.RbacV1().ClusterRoleBindings().Create(ctx, want, metav1.CreateOptions{}) + return err + } + if err != nil { + return err + } + + // roleRef is immutable. If it differs, fail loudly instead of pretending we can patch it. + if !reflect.DeepEqual(existing.RoleRef, want.RoleRef) { + return fmt.Errorf("existing ClusterRoleBinding %q has different roleRef and must be recreated", controlAgentName) + } + + changed := false + if !reflect.DeepEqual(existing.Labels, want.Labels) { + existing.Labels = want.Labels + changed = true + } + if !reflect.DeepEqual(existing.Subjects, want.Subjects) { + existing.Subjects = want.Subjects + changed = true + } + + if !changed { + return nil + } + + _, err = kubeClient.RbacV1().ClusterRoleBindings().Update(ctx, existing, metav1.UpdateOptions{}) + return err +} + +func applyControlAgentDaemonSet(ctx context.Context, kubeClient kubernetes.Interface, namespace string, labels map[string]string) error { + privileged := true + + dsLabels := map[string]string{ + "app.kubernetes.io/name": controlAgentName, + "app.kubernetes.io/component": "agent", + "app.kubernetes.io/part-of": "monok8s", + "app.kubernetes.io/managed-by": "ctl", + } + + want := &appsv1.DaemonSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: controlAgentName, + Namespace: namespace, + Labels: labels, + }, + Spec: appsv1.DaemonSetSpec{ + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "app.kubernetes.io/name": controlAgentName, + }, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: dsLabels, + }, + Spec: corev1.PodSpec{ + ServiceAccountName: controlAgentName, + HostNetwork: true, + HostPID: true, + DNSPolicy: corev1.DNSClusterFirstWithHostNet, + NodeSelector: map[string]string{ + controlAgentNodeSelectorKey: controlAgentNodeSelectorValue, + }, + Tolerations: []corev1.Toleration{ + {Operator: corev1.TolerationOpExists}, + }, + Containers: []corev1.Container{ + { + Name: "agent", + Image: controlAgentImage, + ImagePullPolicy: corev1.PullNever, + Args: []string{"agent"}, + Env: []corev1.EnvVar{ + { + Name: "NODE_NAME", + ValueFrom: &corev1.EnvVarSource{ + FieldRef: &corev1.ObjectFieldSelector{ + APIVersion: "v1", + FieldPath: "spec.nodeName", + }, + }, + }, + { + Name: "CLUSTER_ENV_FILE", + Value: "/host/opt/monok8s/config/cluster.env", + }, + { + Name: "HOST_MOUNT_ROOT", + Value: "/host/mnt/control-agent", + }, + { + Name: "HOST_DEV_DIR", + Value: "/host/dev", + }, + { + Name: "HOST_PROC_DIR", + Value: "/host/proc", + }, + { + Name: "HOST_RUN_DIR", + Value: "/host/run", + }, + }, + SecurityContext: &corev1.SecurityContext{ + Privileged: &privileged, + }, + VolumeMounts: []corev1.VolumeMount{ + { + Name: "host-dev", + MountPath: "/host/dev", + }, + { + Name: "host-config", + MountPath: "/host/opt/monok8s/config", + ReadOnly: true, + }, + { + Name: "host-run", + MountPath: "/host/run", + }, + { + Name: "host-proc", + MountPath: "/host/proc", + ReadOnly: true, + }, + }, + }, + }, + Volumes: []corev1.Volume{ + { + Name: "host-dev", + VolumeSource: corev1.VolumeSource{ + HostPath: &corev1.HostPathVolumeSource{ + Path: "/dev", + Type: hostPathType(corev1.HostPathDirectory), + }, + }, + }, + { + Name: "host-config", + VolumeSource: corev1.VolumeSource{ + HostPath: &corev1.HostPathVolumeSource{ + Path: "/opt/monok8s/config", + Type: hostPathType(corev1.HostPathDirectory), + }, + }, + }, + { + Name: "host-run", + VolumeSource: corev1.VolumeSource{ + HostPath: &corev1.HostPathVolumeSource{ + Path: "/run", + Type: hostPathType(corev1.HostPathDirectory), + }, + }, + }, + { + Name: "host-proc", + VolumeSource: corev1.VolumeSource{ + HostPath: &corev1.HostPathVolumeSource{ + Path: "/proc", + Type: hostPathType(corev1.HostPathDirectory), + }, + }, + }, + }, + }, + }, + }, + } + + existing, err := kubeClient.AppsV1().DaemonSets(namespace).Get(ctx, controlAgentName, metav1.GetOptions{}) + if apierrors.IsNotFound(err) { + _, err = kubeClient.AppsV1().DaemonSets(namespace).Create(ctx, want, metav1.CreateOptions{}) + return err + } + if err != nil { + return err + } + + changed := false + if !reflect.DeepEqual(existing.Labels, want.Labels) { + existing.Labels = want.Labels + changed = true + } + if !reflect.DeepEqual(existing.Spec, want.Spec) { + existing.Spec = want.Spec + changed = true + } + + if !changed { + return nil + } + + _, err = kubeClient.AppsV1().DaemonSets(namespace).Update(ctx, existing, metav1.UpdateOptions{}) + return err +} + +func hostPathType(t corev1.HostPathType) *corev1.HostPathType { + return &t +} + +func mountPropagationMode(m corev1.MountPropagationMode) *corev1.MountPropagationMode { + return &m +} diff --git a/clitools/pkg/node/metadata.go b/clitools/pkg/node/metadata.go index 988f9b8..718927f 100644 --- a/clitools/pkg/node/metadata.go +++ b/clitools/pkg/node/metadata.go @@ -17,6 +17,7 @@ func ApplyLocalNodeMetadataIfPossible(ctx context.Context, nctx *NodeContext) er spec := nctx.Config.Spec if len(spec.NodeAnnotations) == 0 && len(spec.NodeLabels) == 0 { + klog.V(4).Infof("No annotations or labels was defined") return nil // nothing to do } @@ -60,6 +61,11 @@ func ApplyLocalNodeMetadataIfPossible(ctx context.Context, nctx *NodeContext) er node.Labels[k] = v } + // Additional Labels + if spec.EnableControlAgent { + node.Labels[controlAgentNodeSelectorKey] = controlAgentNodeSelectorValue + } + // Apply annotations for k, v := range spec.NodeAnnotations { node.Annotations[k] = v diff --git a/clitools/pkg/templates/templates.go b/clitools/pkg/templates/templates.go index 755d378..934a4da 100644 --- a/clitools/pkg/templates/templates.go +++ b/clitools/pkg/templates/templates.go @@ -21,8 +21,9 @@ func DefaultMonoKSConfig(v TemplateValues) types.MonoKSConfig { KubernetesVersion: v.KubernetesVersion, NodeName: firstNonEmpty(v.NodeName, v.Hostname), - ClusterRole: v.ClusterRole, - InitControlPlane: v.InitControlPlane, + ClusterRole: v.ClusterRole, + InitControlPlane: v.InitControlPlane, + EnableControlAgent: v.EnableControlAgent, ClusterName: v.ClusterName, ClusterDomain: v.ClusterDomain, diff --git a/clitools/pkg/templates/values.go b/clitools/pkg/templates/values.go index 2c2ad2c..573e0dc 100644 --- a/clitools/pkg/templates/values.go +++ b/clitools/pkg/templates/values.go @@ -23,8 +23,9 @@ type TemplateValues struct { ContainerRuntimeEndpoint string CNIPlugin string - ClusterRole string // worker, control-plane - InitControlPlane bool + ClusterRole string // worker, control-plane + InitControlPlane bool + EnableControlAgent bool AllowSchedulingOnControlPlane bool SkipImageCheck bool @@ -57,8 +58,9 @@ func defaultTemplateValues() TemplateValues { ContainerRuntimeEndpoint: "unix:///var/run/crio/crio.sock", CNIPlugin: "default", - InitControlPlane: true, - ClusterRole: "control-plane", + ClusterRole: "control-plane", + InitControlPlane: true, + EnableControlAgent: true, AllowSchedulingOnControlPlane: true, SkipImageCheck: false, @@ -103,8 +105,9 @@ func LoadTemplateValuesFromEnv() TemplateValues { v.ContainerRuntimeEndpoint = getenvDefault("MKS_CONTAINER_RUNTIME_ENDPOINT", v.ContainerRuntimeEndpoint) v.CNIPlugin = getenvDefault("MKS_CNI_PLUGIN", v.CNIPlugin) - v.InitControlPlane = getenvBoolDefault("MKS_INIT_CONTROL_PLANE", v.InitControlPlane) v.ClusterRole = getenvDefault("MKS_CLUSTER_ROLE", v.ClusterRole) + v.InitControlPlane = getenvBoolDefault("MKS_INIT_CONTROL_PLANE", v.InitControlPlane) + v.EnableControlAgent = getenvBoolDefault("MKS_ENABLE_CONTROL_AGENT", v.EnableControlAgent) v.AllowSchedulingOnControlPlane = getenvBoolDefault("MKS_ALLOW_SCHEDULING_ON_CONTROL_PLANE", v.AllowSchedulingOnControlPlane) v.SkipImageCheck = getenvBoolDefault("MKS_SKIP_IMAGE_CHECK", v.SkipImageCheck) diff --git a/configs/cluster.env.default b/configs/cluster.env.default index 5cb2009..b03c5a8 100644 --- a/configs/cluster.env.default +++ b/configs/cluster.env.default @@ -30,6 +30,9 @@ MKS_CLUSTER_DOMAIN=cluster.local MKS_CLUSTER_ROLE=control-plane MKS_INIT_CONTROL_PLANE=yes +# OSUpgrade agent +MKS_ENABLE_CONTROL_AGENT=yes + MKS_API_SERVER_ENDPOINT= MKS_BOOTSTRAP_TOKEN= MKS_DISCOVERY_TOKEN_CA_CERT_HASH= @@ -37,12 +40,12 @@ MKS_CONTROL_PLANE_CERT_KEY= # none: install manually # default|bridge: CRI-O default bridge CNI -MKS_CNI_PLUGIN=none +MKS_CNI_PLUGIN=default # Node registration metadata # Comma-separated key=value pairs MKS_NODE_LABELS=topology.kubernetes.io/zone=lab,node.kubernetes.io/instance-type=mono-gateway -MKS_NODE_ANNOTATIONS=mono.si/board=ls1046a,mono.si/image-version=dev +MKS_NODE_ANNOTATIONS=mono.si/board=ls1046a,monok8s.io/image-version=dev # Optional # Extra API server SANs, comma-separated diff --git a/docker/alpine.Dockerfile b/docker/alpine.Dockerfile index 858e3b5..e707def 100644 --- a/docker/alpine.Dockerfile +++ b/docker/alpine.Dockerfile @@ -26,7 +26,6 @@ RUN mkdir -p /out/rootfs/usr/local/bin/ COPY packages/kubernetes/kubelet-${KUBE_VERSION} /out/rootfs/usr/local/bin/kubelet COPY packages/kubernetes/kubeadm-${KUBE_VERSION} /out/rootfs/usr/local/bin/kubeadm COPY packages/kubernetes/kubectl-${KUBE_VERSION} /out/rootfs/usr/local/bin/kubectl -# COPY clitools/bin/ctl-linux-${ALPINE_ARCH}-${TAG} /out/rootfs/usr/local/bin/ctl RUN chmod +x /out/rootfs/usr/local/bin/* COPY alpine/rootfs-extra ./rootfs-extra diff --git a/docker/initramfs.Dockerfile b/docker/initramfs.Dockerfile index 709519b..7eca452 100644 --- a/docker/initramfs.Dockerfile +++ b/docker/initramfs.Dockerfile @@ -48,12 +48,13 @@ ARG BUILD_TAG COPY alpine/*.sh ./ COPY initramfs/*.sh ./ -COPY out/rootfs /out/rootfs COPY initramfs/rootfs-extra ./rootfs-extra +COPY out/rootfs.tar.gz ./ COPY out/build-info ./rootfs-extra/etc/build-info WORKDIR /out/initramfs -RUN /build/build-rootfs.sh +RUN tar zxf /build/rootfs.tar.gz -C "/out" \ + && /build/build-rootfs.sh FROM scratch COPY --from=build /out/initramfs.cpio.gz /initramfs.cpio.gz diff --git a/makefile b/makefile index 50539e4..48a64ae 100644 --- a/makefile +++ b/makefile @@ -179,7 +179,7 @@ $(INITRAMFS): $(INITRAMFS_DEPS) | $(OUT_DIR) test -f $@ $(CLITOOLS_BIN): $(CLITOOLS_SRCS) - $(MAKE) -C clitools + $(MAKE) -C clitools build-agent $(BOARD_ITB): $(ITB_DEPS) | $(OUT_DIR) docker build \