package node import ( "context" "fmt" "reflect" "sort" "strings" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" rbacv1 "k8s.io/api/rbac/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/client-go/kubernetes" "k8s.io/klog/v2" monov1alpha1 "example.com/monok8s/pkg/apis/monok8s/v1alpha1" "example.com/monok8s/pkg/kube" templates "example.com/monok8s/pkg/templates" ) func applyAdmissionControllerDeploymentResources(ctx context.Context, n *NodeContext) error { if strings.TrimSpace(n.Config.Spec.ClusterRole) != "control-plane" || !n.Config.Spec.EnableControlAgent { klog.InfoS("skipped admission controller deployment", "clusterRole", n.Config.Spec.ClusterRole, "enableControlAgent", n.Config.Spec.EnableControlAgent, ) return nil } if err := ApplyCRDs(ctx, n); err != nil { return err } namespace := strings.TrimSpace(n.Config.Namespace) if namespace == "" { namespace = templates.DefaultNamespace } clients, err := kube.NewClientsFromKubeconfig(adminKubeconfigPath) if err != nil { return fmt.Errorf("build kube clients from %s: %w", adminKubeconfigPath, err) } labels := map[string]string{ "app.kubernetes.io/name": controlAgentName, "app.kubernetes.io/component": "controller", "app.kubernetes.io/part-of": "monok8s", "app.kubernetes.io/managed-by": "ctl", } kubeClient := clients.Kubernetes if err := ensureNamespace(ctx, kubeClient, namespace, labels); err != nil { return fmt.Errorf("ensure namespace %q: %w", namespace, err) } if err := applyAdmissionControllerServiceAccount(ctx, kubeClient, namespace, labels); err != nil { return fmt.Errorf("apply serviceaccount: %w", err) } if err := applyAdmissionControllerClusterRole(ctx, kubeClient, labels); err != nil { return fmt.Errorf("apply clusterrole: %w", err) } if err := applyAdmissionControllerClusterRoleBinding(ctx, kubeClient, namespace, labels); err != nil { return fmt.Errorf("apply clusterrolebinding: %w", err) } if err := applyAdmissionControllerDeployment(ctx, kubeClient, namespace, labels); err != nil { return fmt.Errorf("apply deployment: %w", err) } return nil } func applyAdmissionControllerServiceAccount(ctx context.Context, kubeClient kubernetes.Interface, namespace string, labels map[string]string) error { automount := true want := &corev1.ServiceAccount{ ObjectMeta: metav1.ObjectMeta{ Name: controlAgentName, Namespace: namespace, Labels: labels, }, AutomountServiceAccountToken: &automount, } existing, err := kubeClient.CoreV1().ServiceAccounts(namespace).Get(ctx, controlAgentName, metav1.GetOptions{}) if apierrors.IsNotFound(err) { _, err = kubeClient.CoreV1().ServiceAccounts(namespace).Create(ctx, want, metav1.CreateOptions{}) return err } if err != nil { return err } changed := false if !reflect.DeepEqual(existing.Labels, want.Labels) { existing.Labels = want.Labels changed = true } if !reflect.DeepEqual(existing.AutomountServiceAccountToken, want.AutomountServiceAccountToken) { existing.AutomountServiceAccountToken = want.AutomountServiceAccountToken changed = true } if !changed { return nil } _, err = kubeClient.CoreV1().ServiceAccounts(namespace).Update(ctx, existing, metav1.UpdateOptions{}) return err } func applyAdmissionControllerClusterRole(ctx context.Context, kubeClient kubernetes.Interface, labels map[string]string) error { wantRules := []rbacv1.PolicyRule{ { APIGroups: []string{monov1alpha1.Group}, Resources: []string{"osupgrades"}, Verbs: []string{"get", "list", "watch"}, }, { APIGroups: []string{monov1alpha1.Group}, Resources: []string{"osupgrades/status"}, Verbs: []string{"get", "patch", "update"}, }, { APIGroups: []string{monov1alpha1.Group}, Resources: []string{"osupgradeprogresses"}, Verbs: []string{"get", "list", "watch", "create", "patch", "update"}, }, { APIGroups: []string{""}, Resources: []string{"nodes"}, Verbs: []string{"get", "list", "watch"}, }, } want := &rbacv1.ClusterRole{ ObjectMeta: metav1.ObjectMeta{ Name: controlAgentName, Labels: labels, }, Rules: wantRules, } existing, err := kubeClient.RbacV1().ClusterRoles().Get(ctx, controlAgentName, metav1.GetOptions{}) if apierrors.IsNotFound(err) { _, err = kubeClient.RbacV1().ClusterRoles().Create(ctx, want, metav1.CreateOptions{}) return err } if err != nil { return err } changed := false if !reflect.DeepEqual(existing.Labels, want.Labels) { existing.Labels = want.Labels changed = true } if !reflect.DeepEqual(existing.Rules, want.Rules) { existing.Rules = want.Rules changed = true } if !changed { return nil } _, err = kubeClient.RbacV1().ClusterRoles().Update(ctx, existing, metav1.UpdateOptions{}) return err } func applyAdmissionControllerClusterRoleBinding(ctx context.Context, kubeClient kubernetes.Interface, namespace string, labels map[string]string) error { wantSubjects := []rbacv1.Subject{ { Kind: "ServiceAccount", Name: controlAgentName, Namespace: namespace, }, } wantRoleRef := rbacv1.RoleRef{ APIGroup: rbacv1.GroupName, Kind: "ClusterRole", Name: controlAgentName, } want := &rbacv1.ClusterRoleBinding{ ObjectMeta: metav1.ObjectMeta{ Name: controlAgentName, Labels: labels, }, Subjects: wantSubjects, RoleRef: wantRoleRef, } existing, err := kubeClient.RbacV1().ClusterRoleBindings().Get(ctx, controlAgentName, metav1.GetOptions{}) if apierrors.IsNotFound(err) { _, err = kubeClient.RbacV1().ClusterRoleBindings().Create(ctx, want, metav1.CreateOptions{}) return err } if err != nil { return err } changed := false if !reflect.DeepEqual(existing.Labels, want.Labels) { existing.Labels = want.Labels changed = true } if !reflect.DeepEqual(existing.Subjects, want.Subjects) { existing.Subjects = want.Subjects changed = true } if !reflect.DeepEqual(existing.RoleRef, want.RoleRef) { existing.RoleRef = want.RoleRef changed = true } if !changed { return nil } _, err = kubeClient.RbacV1().ClusterRoleBindings().Update(ctx, existing, metav1.UpdateOptions{}) return err } func applyAdmissionControllerDeployment(ctx context.Context, kubeClient kubernetes.Interface, namespace string, labels map[string]string) error { replicas := int32(1) selectorLabels := map[string]string{ "app.kubernetes.io/name": controlAgentName, "app.kubernetes.io/component": "controller", } podLabels := mergeStringMaps(labels, selectorLabels) runAsNonRoot := true allowPrivilegeEscalation := false want := &appsv1.Deployment{ ObjectMeta: metav1.ObjectMeta{ Name: controlAgentName, Namespace: namespace, Labels: labels, }, Spec: appsv1.DeploymentSpec{ Replicas: &replicas, Selector: &metav1.LabelSelector{ MatchLabels: selectorLabels, }, Template: corev1.PodTemplateSpec{ ObjectMeta: metav1.ObjectMeta{ Labels: podLabels, }, Spec: corev1.PodSpec{ ServiceAccountName: controlAgentName, Containers: []corev1.Container{ { Name: "controller", Image: controlAgentImage, ImagePullPolicy: corev1.PullIfNotPresent, Args: []string{ "controller", }, Env: []corev1.EnvVar{ { Name: "POD_NAME", ValueFrom: &corev1.EnvVarSource{ FieldRef: &corev1.ObjectFieldSelector{ APIVersion: "v1", FieldPath: "metadata.name", }, }, }, { Name: "POD_NAMESPACE", ValueFrom: &corev1.EnvVarSource{ FieldRef: &corev1.ObjectFieldSelector{ APIVersion: "v1", FieldPath: "metadata.namespace", }, }, }, { Name: "NODE_NAME", ValueFrom: &corev1.EnvVarSource{ FieldRef: &corev1.ObjectFieldSelector{ APIVersion: "v1", FieldPath: "spec.nodeName", }, }, }, }, Ports: []corev1.ContainerPort{ { Name: "http", ContainerPort: 8080, Protocol: corev1.ProtocolTCP, }, }, LivenessProbe: &corev1.Probe{ ProbeHandler: corev1.ProbeHandler{ HTTPGet: &corev1.HTTPGetAction{ Path: "/healthz", Port: intstr.FromString("http"), }, }, }, ReadinessProbe: &corev1.Probe{ ProbeHandler: corev1.ProbeHandler{ HTTPGet: &corev1.HTTPGetAction{ Path: "/readyz", Port: intstr.FromString("http"), }, }, }, SecurityContext: &corev1.SecurityContext{ RunAsNonRoot: &runAsNonRoot, AllowPrivilegeEscalation: &allowPrivilegeEscalation, }, }, }, }, }, }, } existing, err := kubeClient.AppsV1().Deployments(namespace).Get(ctx, controlAgentName, metav1.GetOptions{}) if apierrors.IsNotFound(err) { _, err = kubeClient.AppsV1().Deployments(namespace).Create(ctx, want, metav1.CreateOptions{}) return err } if err != nil { return err } changed := false if !reflect.DeepEqual(existing.Labels, want.Labels) { existing.Labels = want.Labels changed = true } if !reflect.DeepEqual(existing.Spec.Replicas, want.Spec.Replicas) { existing.Spec.Replicas = want.Spec.Replicas changed = true } if !reflect.DeepEqual(existing.Spec.Selector, want.Spec.Selector) { existing.Spec.Selector = want.Spec.Selector changed = true } if !reflect.DeepEqual(existing.Spec.Template.Labels, want.Spec.Template.Labels) { existing.Spec.Template.Labels = want.Spec.Template.Labels changed = true } if !reflect.DeepEqual(existing.Spec.Template.Spec.ServiceAccountName, want.Spec.Template.Spec.ServiceAccountName) { existing.Spec.Template.Spec.ServiceAccountName = want.Spec.Template.Spec.ServiceAccountName changed = true } if !reflect.DeepEqual(existing.Spec.Template.Spec.Containers, want.Spec.Template.Spec.Containers) { existing.Spec.Template.Spec.Containers = want.Spec.Template.Spec.Containers changed = true } if !changed { return nil } _, err = kubeClient.AppsV1().Deployments(namespace).Update(ctx, existing, metav1.UpdateOptions{}) return err } func mergeStringMaps(maps ...map[string]string) map[string]string { var total int for _, m := range maps { total += len(m) } if total == 0 { return nil } out := make(map[string]string, total) for _, m := range maps { for k, v := range m { out[k] = v } } return out } func sortedKeys(m map[string]string) []string { if len(m) == 0 { return nil } keys := make([]string, 0, len(m)) for k := range m { keys = append(keys, k) } sort.Strings(keys) return keys }