Added some initial daemonsets
This commit is contained in:
@@ -37,6 +37,7 @@ type MonoKSConfigSpec struct {
|
||||
ClusterDomain string `json:"clusterDomain,omitempty" yaml:"clusterDomain,omitempty"`
|
||||
ClusterRole string `json:"clusterRole,omitempty" yaml:"clusterRole,omitempty"`
|
||||
InitControlPlane bool `json:"initControlPlane,omitempty" yaml:"initControlPlane,omitempty"`
|
||||
EnableControlAgent bool `json:"enableControlAgent,omitempty" yaml:"enableControlAgent,omitempty"`
|
||||
PodSubnet string `json:"podSubnet,omitempty" yaml:"podSubnet,omitempty"`
|
||||
ServiceSubnet string `json:"serviceSubnet,omitempty" yaml:"serviceSubnet,omitempty"`
|
||||
APIServerAdvertiseAddress string `json:"apiServerAdvertiseAddress,omitempty" yaml:"apiServerAdvertiseAddress,omitempty"`
|
||||
|
||||
@@ -22,6 +22,7 @@ func NewRegistry(ctx *node.NodeContext) *Registry {
|
||||
|
||||
return &Registry{
|
||||
steps: map[string]node.Step{
|
||||
"ApplyControlAgentDaemonSetResources": node.ApplyControlAgentDaemonSetResources,
|
||||
"ApplyLocalNodeMetadataIfPossible": node.ApplyLocalNodeMetadataIfPossible,
|
||||
"CheckForVersionSkew": node.CheckForVersionSkew,
|
||||
"ClassifyBootstrapAction": node.ClassifyBootstrapAction,
|
||||
|
||||
@@ -132,6 +132,11 @@ func NewRunner(cfg *monov1alpha1.MonoKSConfig) *Runner {
|
||||
Name: "Apply node metadata",
|
||||
Desc: "Apply labels/annotations to the local node if API server is reachable",
|
||||
},
|
||||
{
|
||||
RegKey: "ApplyControlAgentDaemonSetResources",
|
||||
Name: "Apply daemonset for control agent",
|
||||
Desc: "Control agent handles OSUpgrade resources",
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
@@ -3,22 +3,35 @@ package node
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strings"
|
||||
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
rbacv1 "k8s.io/api/rbac/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
"undecided.project/monok8s/pkg/crds"
|
||||
"undecided.project/monok8s/pkg/kube"
|
||||
)
|
||||
|
||||
const (
|
||||
controlAgentName = "control-agent"
|
||||
controlAgentDefaultNamespace = "kube-system"
|
||||
controlAgentNodeSelectorKey = "monok8s.io/control-agent"
|
||||
controlAgentNodeSelectorValue = "true"
|
||||
controlAgentImage = "localhost/monok8s/control-agent:dev"
|
||||
kubeconfig = "/etc/kubernetes/admin.conf"
|
||||
)
|
||||
|
||||
func ApplyCRDs(ctx context.Context, n *NodeContext) error {
|
||||
if n.Config.Spec.ClusterRole != "control-plane" {
|
||||
return nil
|
||||
}
|
||||
|
||||
const kubeconfig = "/etc/kubernetes/admin.conf"
|
||||
|
||||
clients, err := kube.NewClientsFromKubeconfig(kubeconfig)
|
||||
if err != nil {
|
||||
return fmt.Errorf("build kube clients from %s: %w", kubeconfig, err)
|
||||
@@ -55,3 +68,369 @@ func ApplyCRDs(ctx context.Context, n *NodeContext) error {
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func ApplyControlAgentDaemonSetResources(ctx context.Context, n *NodeContext) error {
|
||||
// Only the control-plane should bootstrap this DaemonSet definition.
|
||||
// And only when the feature is enabled.
|
||||
if strings.TrimSpace(n.Config.Spec.ClusterRole) != "control-plane" || !n.Config.Spec.EnableControlAgent {
|
||||
klog.InfoS("skipped for", "clusterRole", n.Config.Spec.ClusterRole, "enableControlAgent", n.Config.Spec.EnableControlAgent)
|
||||
return nil
|
||||
}
|
||||
|
||||
err := ApplyCRDs(ctx, n)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
namespace := strings.TrimSpace(n.Config.Namespace)
|
||||
if namespace == "" {
|
||||
namespace = controlAgentDefaultNamespace
|
||||
}
|
||||
|
||||
clients, err := kube.NewClientsFromKubeconfig(kubeconfig)
|
||||
if err != nil {
|
||||
return fmt.Errorf("build kube clients from %s: %w", kubeconfig, err)
|
||||
}
|
||||
|
||||
labels := map[string]string{
|
||||
"app.kubernetes.io/name": controlAgentName,
|
||||
"app.kubernetes.io/component": "agent",
|
||||
"app.kubernetes.io/part-of": "monok8s",
|
||||
"app.kubernetes.io/managed-by": "ctl",
|
||||
}
|
||||
|
||||
kubeClient := clients.Kubernetes
|
||||
|
||||
if err := applyControlAgentServiceAccount(ctx, kubeClient, namespace, labels); err != nil {
|
||||
return fmt.Errorf("apply serviceaccount: %w", err)
|
||||
}
|
||||
if err := applyControlAgentClusterRole(ctx, kubeClient, labels); err != nil {
|
||||
return fmt.Errorf("apply clusterrole: %w", err)
|
||||
}
|
||||
if err := applyControlAgentClusterRoleBinding(ctx, kubeClient, namespace, labels); err != nil {
|
||||
return fmt.Errorf("apply clusterrolebinding: %w", err)
|
||||
}
|
||||
if err := applyControlAgentDaemonSet(ctx, kubeClient, namespace, labels); err != nil {
|
||||
return fmt.Errorf("apply daemonset: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func applyControlAgentServiceAccount(ctx context.Context, kubeClient kubernetes.Interface, namespace string, labels map[string]string) error {
|
||||
want := &corev1.ServiceAccount{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: controlAgentName,
|
||||
Namespace: namespace,
|
||||
Labels: labels,
|
||||
},
|
||||
}
|
||||
|
||||
existing, err := kubeClient.CoreV1().ServiceAccounts(namespace).Get(ctx, controlAgentName, metav1.GetOptions{})
|
||||
if apierrors.IsNotFound(err) {
|
||||
_, err = kubeClient.CoreV1().ServiceAccounts(namespace).Create(ctx, want, metav1.CreateOptions{})
|
||||
return err
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
changed := false
|
||||
if !reflect.DeepEqual(existing.Labels, want.Labels) {
|
||||
existing.Labels = want.Labels
|
||||
changed = true
|
||||
}
|
||||
|
||||
if !changed {
|
||||
return nil
|
||||
}
|
||||
|
||||
_, err = kubeClient.CoreV1().ServiceAccounts(namespace).Update(ctx, existing, metav1.UpdateOptions{})
|
||||
return err
|
||||
}
|
||||
|
||||
func applyControlAgentClusterRole(ctx context.Context, kubeClient kubernetes.Interface, labels map[string]string) error {
|
||||
wantRules := []rbacv1.PolicyRule{
|
||||
{
|
||||
APIGroups: []string{"monok8s.io"},
|
||||
Resources: []string{"osupgrades"},
|
||||
Verbs: []string{"get", "list", "watch"},
|
||||
},
|
||||
{
|
||||
APIGroups: []string{"monok8s.io"},
|
||||
Resources: []string{"osupgrades/status"},
|
||||
Verbs: []string{"get", "patch", "update"},
|
||||
},
|
||||
{
|
||||
APIGroups: []string{""},
|
||||
Resources: []string{"nodes"},
|
||||
Verbs: []string{"get", "list", "watch"},
|
||||
},
|
||||
}
|
||||
|
||||
want := &rbacv1.ClusterRole{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: controlAgentName,
|
||||
Labels: labels,
|
||||
},
|
||||
Rules: wantRules,
|
||||
}
|
||||
|
||||
existing, err := kubeClient.RbacV1().ClusterRoles().Get(ctx, controlAgentName, metav1.GetOptions{})
|
||||
if apierrors.IsNotFound(err) {
|
||||
_, err = kubeClient.RbacV1().ClusterRoles().Create(ctx, want, metav1.CreateOptions{})
|
||||
return err
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
changed := false
|
||||
if !reflect.DeepEqual(existing.Labels, want.Labels) {
|
||||
existing.Labels = want.Labels
|
||||
changed = true
|
||||
}
|
||||
if !reflect.DeepEqual(existing.Rules, want.Rules) {
|
||||
existing.Rules = want.Rules
|
||||
changed = true
|
||||
}
|
||||
|
||||
if !changed {
|
||||
return nil
|
||||
}
|
||||
|
||||
_, err = kubeClient.RbacV1().ClusterRoles().Update(ctx, existing, metav1.UpdateOptions{})
|
||||
return err
|
||||
}
|
||||
|
||||
func applyControlAgentClusterRoleBinding(ctx context.Context, kubeClient kubernetes.Interface, namespace string, labels map[string]string) error {
|
||||
wantRoleRef := rbacv1.RoleRef{
|
||||
APIGroup: rbacv1.GroupName,
|
||||
Kind: "ClusterRole",
|
||||
Name: controlAgentName,
|
||||
}
|
||||
wantSubjects := []rbacv1.Subject{
|
||||
{
|
||||
Kind: "ServiceAccount",
|
||||
Name: controlAgentName,
|
||||
Namespace: namespace,
|
||||
},
|
||||
}
|
||||
|
||||
want := &rbacv1.ClusterRoleBinding{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: controlAgentName,
|
||||
Labels: labels,
|
||||
},
|
||||
RoleRef: wantRoleRef,
|
||||
Subjects: wantSubjects,
|
||||
}
|
||||
|
||||
existing, err := kubeClient.RbacV1().ClusterRoleBindings().Get(ctx, controlAgentName, metav1.GetOptions{})
|
||||
if apierrors.IsNotFound(err) {
|
||||
_, err = kubeClient.RbacV1().ClusterRoleBindings().Create(ctx, want, metav1.CreateOptions{})
|
||||
return err
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// roleRef is immutable. If it differs, fail loudly instead of pretending we can patch it.
|
||||
if !reflect.DeepEqual(existing.RoleRef, want.RoleRef) {
|
||||
return fmt.Errorf("existing ClusterRoleBinding %q has different roleRef and must be recreated", controlAgentName)
|
||||
}
|
||||
|
||||
changed := false
|
||||
if !reflect.DeepEqual(existing.Labels, want.Labels) {
|
||||
existing.Labels = want.Labels
|
||||
changed = true
|
||||
}
|
||||
if !reflect.DeepEqual(existing.Subjects, want.Subjects) {
|
||||
existing.Subjects = want.Subjects
|
||||
changed = true
|
||||
}
|
||||
|
||||
if !changed {
|
||||
return nil
|
||||
}
|
||||
|
||||
_, err = kubeClient.RbacV1().ClusterRoleBindings().Update(ctx, existing, metav1.UpdateOptions{})
|
||||
return err
|
||||
}
|
||||
|
||||
func applyControlAgentDaemonSet(ctx context.Context, kubeClient kubernetes.Interface, namespace string, labels map[string]string) error {
|
||||
privileged := true
|
||||
|
||||
dsLabels := map[string]string{
|
||||
"app.kubernetes.io/name": controlAgentName,
|
||||
"app.kubernetes.io/component": "agent",
|
||||
"app.kubernetes.io/part-of": "monok8s",
|
||||
"app.kubernetes.io/managed-by": "ctl",
|
||||
}
|
||||
|
||||
want := &appsv1.DaemonSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: controlAgentName,
|
||||
Namespace: namespace,
|
||||
Labels: labels,
|
||||
},
|
||||
Spec: appsv1.DaemonSetSpec{
|
||||
Selector: &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{
|
||||
"app.kubernetes.io/name": controlAgentName,
|
||||
},
|
||||
},
|
||||
Template: corev1.PodTemplateSpec{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: dsLabels,
|
||||
},
|
||||
Spec: corev1.PodSpec{
|
||||
ServiceAccountName: controlAgentName,
|
||||
HostNetwork: true,
|
||||
HostPID: true,
|
||||
DNSPolicy: corev1.DNSClusterFirstWithHostNet,
|
||||
NodeSelector: map[string]string{
|
||||
controlAgentNodeSelectorKey: controlAgentNodeSelectorValue,
|
||||
},
|
||||
Tolerations: []corev1.Toleration{
|
||||
{Operator: corev1.TolerationOpExists},
|
||||
},
|
||||
Containers: []corev1.Container{
|
||||
{
|
||||
Name: "agent",
|
||||
Image: controlAgentImage,
|
||||
ImagePullPolicy: corev1.PullNever,
|
||||
Args: []string{"agent"},
|
||||
Env: []corev1.EnvVar{
|
||||
{
|
||||
Name: "NODE_NAME",
|
||||
ValueFrom: &corev1.EnvVarSource{
|
||||
FieldRef: &corev1.ObjectFieldSelector{
|
||||
APIVersion: "v1",
|
||||
FieldPath: "spec.nodeName",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "CLUSTER_ENV_FILE",
|
||||
Value: "/host/opt/monok8s/config/cluster.env",
|
||||
},
|
||||
{
|
||||
Name: "HOST_MOUNT_ROOT",
|
||||
Value: "/host/mnt/control-agent",
|
||||
},
|
||||
{
|
||||
Name: "HOST_DEV_DIR",
|
||||
Value: "/host/dev",
|
||||
},
|
||||
{
|
||||
Name: "HOST_PROC_DIR",
|
||||
Value: "/host/proc",
|
||||
},
|
||||
{
|
||||
Name: "HOST_RUN_DIR",
|
||||
Value: "/host/run",
|
||||
},
|
||||
},
|
||||
SecurityContext: &corev1.SecurityContext{
|
||||
Privileged: &privileged,
|
||||
},
|
||||
VolumeMounts: []corev1.VolumeMount{
|
||||
{
|
||||
Name: "host-dev",
|
||||
MountPath: "/host/dev",
|
||||
},
|
||||
{
|
||||
Name: "host-config",
|
||||
MountPath: "/host/opt/monok8s/config",
|
||||
ReadOnly: true,
|
||||
},
|
||||
{
|
||||
Name: "host-run",
|
||||
MountPath: "/host/run",
|
||||
},
|
||||
{
|
||||
Name: "host-proc",
|
||||
MountPath: "/host/proc",
|
||||
ReadOnly: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Volumes: []corev1.Volume{
|
||||
{
|
||||
Name: "host-dev",
|
||||
VolumeSource: corev1.VolumeSource{
|
||||
HostPath: &corev1.HostPathVolumeSource{
|
||||
Path: "/dev",
|
||||
Type: hostPathType(corev1.HostPathDirectory),
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "host-config",
|
||||
VolumeSource: corev1.VolumeSource{
|
||||
HostPath: &corev1.HostPathVolumeSource{
|
||||
Path: "/opt/monok8s/config",
|
||||
Type: hostPathType(corev1.HostPathDirectory),
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "host-run",
|
||||
VolumeSource: corev1.VolumeSource{
|
||||
HostPath: &corev1.HostPathVolumeSource{
|
||||
Path: "/run",
|
||||
Type: hostPathType(corev1.HostPathDirectory),
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "host-proc",
|
||||
VolumeSource: corev1.VolumeSource{
|
||||
HostPath: &corev1.HostPathVolumeSource{
|
||||
Path: "/proc",
|
||||
Type: hostPathType(corev1.HostPathDirectory),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
existing, err := kubeClient.AppsV1().DaemonSets(namespace).Get(ctx, controlAgentName, metav1.GetOptions{})
|
||||
if apierrors.IsNotFound(err) {
|
||||
_, err = kubeClient.AppsV1().DaemonSets(namespace).Create(ctx, want, metav1.CreateOptions{})
|
||||
return err
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
changed := false
|
||||
if !reflect.DeepEqual(existing.Labels, want.Labels) {
|
||||
existing.Labels = want.Labels
|
||||
changed = true
|
||||
}
|
||||
if !reflect.DeepEqual(existing.Spec, want.Spec) {
|
||||
existing.Spec = want.Spec
|
||||
changed = true
|
||||
}
|
||||
|
||||
if !changed {
|
||||
return nil
|
||||
}
|
||||
|
||||
_, err = kubeClient.AppsV1().DaemonSets(namespace).Update(ctx, existing, metav1.UpdateOptions{})
|
||||
return err
|
||||
}
|
||||
|
||||
func hostPathType(t corev1.HostPathType) *corev1.HostPathType {
|
||||
return &t
|
||||
}
|
||||
|
||||
func mountPropagationMode(m corev1.MountPropagationMode) *corev1.MountPropagationMode {
|
||||
return &m
|
||||
}
|
||||
|
||||
@@ -17,6 +17,7 @@ func ApplyLocalNodeMetadataIfPossible(ctx context.Context, nctx *NodeContext) er
|
||||
spec := nctx.Config.Spec
|
||||
|
||||
if len(spec.NodeAnnotations) == 0 && len(spec.NodeLabels) == 0 {
|
||||
klog.V(4).Infof("No annotations or labels was defined")
|
||||
return nil // nothing to do
|
||||
}
|
||||
|
||||
@@ -60,6 +61,11 @@ func ApplyLocalNodeMetadataIfPossible(ctx context.Context, nctx *NodeContext) er
|
||||
node.Labels[k] = v
|
||||
}
|
||||
|
||||
// Additional Labels
|
||||
if spec.EnableControlAgent {
|
||||
node.Labels[controlAgentNodeSelectorKey] = controlAgentNodeSelectorValue
|
||||
}
|
||||
|
||||
// Apply annotations
|
||||
for k, v := range spec.NodeAnnotations {
|
||||
node.Annotations[k] = v
|
||||
|
||||
@@ -21,8 +21,9 @@ func DefaultMonoKSConfig(v TemplateValues) types.MonoKSConfig {
|
||||
KubernetesVersion: v.KubernetesVersion,
|
||||
NodeName: firstNonEmpty(v.NodeName, v.Hostname),
|
||||
|
||||
ClusterRole: v.ClusterRole,
|
||||
InitControlPlane: v.InitControlPlane,
|
||||
ClusterRole: v.ClusterRole,
|
||||
InitControlPlane: v.InitControlPlane,
|
||||
EnableControlAgent: v.EnableControlAgent,
|
||||
|
||||
ClusterName: v.ClusterName,
|
||||
ClusterDomain: v.ClusterDomain,
|
||||
|
||||
@@ -23,8 +23,9 @@ type TemplateValues struct {
|
||||
ContainerRuntimeEndpoint string
|
||||
CNIPlugin string
|
||||
|
||||
ClusterRole string // worker, control-plane
|
||||
InitControlPlane bool
|
||||
ClusterRole string // worker, control-plane
|
||||
InitControlPlane bool
|
||||
EnableControlAgent bool
|
||||
|
||||
AllowSchedulingOnControlPlane bool
|
||||
SkipImageCheck bool
|
||||
@@ -57,8 +58,9 @@ func defaultTemplateValues() TemplateValues {
|
||||
ContainerRuntimeEndpoint: "unix:///var/run/crio/crio.sock",
|
||||
CNIPlugin: "default",
|
||||
|
||||
InitControlPlane: true,
|
||||
ClusterRole: "control-plane",
|
||||
ClusterRole: "control-plane",
|
||||
InitControlPlane: true,
|
||||
EnableControlAgent: true,
|
||||
|
||||
AllowSchedulingOnControlPlane: true,
|
||||
SkipImageCheck: false,
|
||||
@@ -103,8 +105,9 @@ func LoadTemplateValuesFromEnv() TemplateValues {
|
||||
v.ContainerRuntimeEndpoint = getenvDefault("MKS_CONTAINER_RUNTIME_ENDPOINT", v.ContainerRuntimeEndpoint)
|
||||
v.CNIPlugin = getenvDefault("MKS_CNI_PLUGIN", v.CNIPlugin)
|
||||
|
||||
v.InitControlPlane = getenvBoolDefault("MKS_INIT_CONTROL_PLANE", v.InitControlPlane)
|
||||
v.ClusterRole = getenvDefault("MKS_CLUSTER_ROLE", v.ClusterRole)
|
||||
v.InitControlPlane = getenvBoolDefault("MKS_INIT_CONTROL_PLANE", v.InitControlPlane)
|
||||
v.EnableControlAgent = getenvBoolDefault("MKS_ENABLE_CONTROL_AGENT", v.EnableControlAgent)
|
||||
|
||||
v.AllowSchedulingOnControlPlane = getenvBoolDefault("MKS_ALLOW_SCHEDULING_ON_CONTROL_PLANE", v.AllowSchedulingOnControlPlane)
|
||||
v.SkipImageCheck = getenvBoolDefault("MKS_SKIP_IMAGE_CHECK", v.SkipImageCheck)
|
||||
|
||||
Reference in New Issue
Block a user