diff --git a/alpine/build-rootfs.sh b/alpine/build-rootfs.sh index f9d4a3f..db78031 100755 --- a/alpine/build-rootfs.sh +++ b/alpine/build-rootfs.sh @@ -4,7 +4,7 @@ set -euo pipefail /preload-k8s-images.sh || exit 1 -export CTL_BIN_LAYER=$( skopeo inspect docker-daemon:localhost/monok8s/control-agent:dev | jq -r '.Layers[0] | sub("^sha256:"; "")' ) +export CTL_BIN_LAYER=$( skopeo inspect docker-daemon:localhost/monok8s/node-control:dev | jq -r '.Layers[0] | sub("^sha256:"; "")' ) mkdir -p \ "$ROOTFS/dev" \ diff --git a/alpine/preload-k8s-images.sh b/alpine/preload-k8s-images.sh index 0185f1b..d963384 100755 --- a/alpine/preload-k8s-images.sh +++ b/alpine/preload-k8s-images.sh @@ -26,7 +26,7 @@ FUSE_OVERLAYFS="${FUSE_OVERLAYFS:-/usr/bin/fuse-overlayfs}" # ) EXTRA_IMAGES=( "${EXTRA_IMAGES[@]:-}" - "docker-daemon:localhost/monok8s/control-agent:$TAG" + "docker-daemon:localhost/monok8s/node-control:$TAG" ) # Keep archive cache version/arch scoped so downloads do not get mixed. diff --git a/clitools/makefile b/clitools/makefile index ae616bf..69f7950 100644 --- a/clitools/makefile +++ b/clitools/makefile @@ -125,7 +125,7 @@ build-agent: build uboot-tools -f docker/ctl-agent.Dockerfile \ --build-arg VERSION=$(VERSION) \ --load \ - -t localhost/monok8s/control-agent:$(VERSION) . + -t localhost/monok8s/node-control:$(VERSION) . build-local: .buildinfo | $(BIN_DIR) docker buildx build \ @@ -138,13 +138,13 @@ build-local: .buildinfo | $(BIN_DIR) run-agent: docker run --rm \ -v "$$(pwd)/out:/work/out" \ - localhost/monok8s/control-agent:$(VERSION) \ + localhost/monok8s/node-control:$(VERSION) \ agent --env-file /work/out/cluster.env build: build-bin build-crds clean: - -docker image rm localhost/monok8s/control-agent:$(VERSION) >/dev/null 2>&1 || true + -docker image rm localhost/monok8s/node-control:$(VERSION) >/dev/null 2>&1 || true rm -rf \ $(BIN_DIR) \ $(OUT_DIR)/crds \ @@ -157,7 +157,7 @@ dockerclean: @echo "Removing tagged images..." - docker rmi \ localhost/monok8s/ctl-build-base:$(VERSION) \ - localhost/monok8s/control-agent:$(VERSION) \ + localhost/monok8s/node-control:$(VERSION) \ localhost/monok8s/ctl-builder:$(VERSION) \ localhost/monok8s/crdgen:$(VERSION) \ 2>/dev/null || true diff --git a/clitools/pkg/apis/monok8s/v1alpha1/groupversion_info.go b/clitools/pkg/apis/monok8s/v1alpha1/groupversion_info.go index 66f091f..ca03799 100644 --- a/clitools/pkg/apis/monok8s/v1alpha1/groupversion_info.go +++ b/clitools/pkg/apis/monok8s/v1alpha1/groupversion_info.go @@ -16,8 +16,10 @@ var ( AltPartDeviceLink = "/dev/mksaltpart" BootStateFile = "/run/monok8s/boot-state.env" CatalogURL = "https://example.com/monok8s.io/v1alpha1/catalog.yaml" - ControlAgentName = "control-agent" - ControlAgentKey = "monok8s.io/control-agent" + NodeControlKey = "monok8s.io/node-control" + NodeControlName = "node-control" + ControllerName = "node-controller" + NodeAgentName = "node-agent" EnvConfigDir = "/opt/monok8s/config" Label = "monok8s.io/label" MonoKSConfigCRD = "monoksconfigs.monok8s.io" diff --git a/clitools/pkg/apis/monok8s/v1alpha1/monoksconfig.go b/clitools/pkg/apis/monok8s/v1alpha1/monoksconfig.go index e9d330d..15d7c56 100644 --- a/clitools/pkg/apis/monok8s/v1alpha1/monoksconfig.go +++ b/clitools/pkg/apis/monok8s/v1alpha1/monoksconfig.go @@ -25,7 +25,7 @@ type MonoKSConfigSpec struct { ClusterDomain string `json:"clusterDomain,omitempty" yaml:"clusterDomain,omitempty"` ClusterRole string `json:"clusterRole,omitempty" yaml:"clusterRole,omitempty"` InitControlPlane bool `json:"initControlPlane,omitempty" yaml:"initControlPlane,omitempty"` - EnableControlAgent bool `json:"enableControlAgent,omitempty" yaml:"enableControlAgent,omitempty"` + EnableNodeControl bool `json:"enableNodeControl,omitempty" yaml:"enableNodeControl,omitempty"` PodSubnet string `json:"podSubnet,omitempty" yaml:"podSubnet,omitempty"` ServiceSubnet string `json:"serviceSubnet,omitempty" yaml:"serviceSubnet,omitempty"` APIServerAdvertiseAddress string `json:"apiServerAdvertiseAddress,omitempty" yaml:"apiServerAdvertiseAddress,omitempty"` diff --git a/clitools/pkg/bootstrap/registry.go b/clitools/pkg/bootstrap/registry.go index 51097e1..1dc2812 100644 --- a/clitools/pkg/bootstrap/registry.go +++ b/clitools/pkg/bootstrap/registry.go @@ -23,7 +23,7 @@ func NewRegistry(ctx *node.NodeContext) *Registry { return &Registry{ steps: map[string]node.Step{ - "ApplyControlAgentDaemonSetResources": node.ApplyControlAgentDaemonSetResources, + "ApplyNodeControlDaemonSetResources": node.ApplyNodeControlDaemonSetResources, "ApplyLocalNodeMetadataIfPossible": node.ApplyLocalNodeMetadataIfPossible, "CheckForVersionSkew": node.CheckForVersionSkew, "ClassifyBootstrapAction": node.ClassifyBootstrapAction, diff --git a/clitools/pkg/bootstrap/runner.go b/clitools/pkg/bootstrap/runner.go index c780596..ab2af02 100644 --- a/clitools/pkg/bootstrap/runner.go +++ b/clitools/pkg/bootstrap/runner.go @@ -158,7 +158,7 @@ func NewRunner(cfg *monov1alpha1.MonoKSConfig) *Runner { Desc: "Make A/B booting possible", }, { - RegKey: "ApplyControlAgentDaemonSetResources", + RegKey: "ApplyNodeControlDaemonSetResources", Name: "Apply daemonset for control agent", Desc: "Control agent handles OSUpgrade resources", }, diff --git a/clitools/pkg/cmd/create/create.go b/clitools/pkg/cmd/create/create.go index 018c0ab..3cb9ed9 100644 --- a/clitools/pkg/cmd/create/create.go +++ b/clitools/pkg/cmd/create/create.go @@ -1,9 +1,11 @@ package create import ( + "bytes" "fmt" "github.com/spf13/cobra" "k8s.io/cli-runtime/pkg/genericclioptions" + "os" render "example.com/monok8s/pkg/render" ) @@ -58,5 +60,52 @@ func NewCmdCreate(flags *genericclioptions.ConfigFlags) *cobra.Command { }, }, ) + + var authorizedKeysPath string + + sshdcmd := cobra.Command{ + Use: "sshd", + Short: "Print sshd deployment template", + RunE: func(cmd *cobra.Command, _ []string) error { + ns, _, err := flags.ToRawKubeConfigLoader().Namespace() + if err != nil { + return err + } + + authorizedKeys, err := readAuthorizedKeysFile(authorizedKeysPath) + if err != nil { + return err + } + + out, err := render.RenderSSHDDeployments(ns, authorizedKeys) + if err != nil { + return err + } + + _, err = fmt.Fprint(cmd.OutOrStdout(), out) + return err + }, + } + + sshdcmd.Flags().StringVar(&authorizedKeysPath, "authkeys", "", "path to authorized_keys file") + + cmd.AddCommand(&sshdcmd) return cmd } + +func readAuthorizedKeysFile(path string) (string, error) { + if path == "" { + return "", fmt.Errorf("--authkeys is required") + } + + b, err := os.ReadFile(path) + if err != nil { + return "", fmt.Errorf("read authorized_keys file %q: %w", path, err) + } + + if len(bytes.TrimSpace(b)) == 0 { + return "", fmt.Errorf("authorized_keys file %q is empty", path) + } + + return string(b), nil +} diff --git a/clitools/pkg/controller/osupgrade/watch.go b/clitools/pkg/controller/osupgrade/watch.go index a4885c7..6f93b39 100644 --- a/clitools/pkg/controller/osupgrade/watch.go +++ b/clitools/pkg/controller/osupgrade/watch.go @@ -268,7 +268,7 @@ func listTargetNodeNames( osu *monov1alpha1.OSUpgrade, ) ([]string, error) { selector := labels.SelectorFromSet(labels.Set{ - monov1alpha1.ControlAgentKey: "true", + monov1alpha1.NodeControlKey: "true", }) if osu.Spec.NodeSelector != nil { diff --git a/clitools/pkg/node/agent.go b/clitools/pkg/node/agent.go index aec8b75..2eea1fb 100644 --- a/clitools/pkg/node/agent.go +++ b/clitools/pkg/node/agent.go @@ -21,15 +21,15 @@ import ( const ( controlAgentNodeSelectorValue = "true" - controlAgentImage = "localhost/monok8s/control-agent:dev" + controlAgentImage = "localhost/monok8s/node-control:dev" kubeconfig = "/etc/kubernetes/admin.conf" ) -func ApplyControlAgentDaemonSetResources(ctx context.Context, n *NodeContext) error { +func ApplyNodeControlDaemonSetResources(ctx context.Context, n *NodeContext) error { // Only the control-plane should bootstrap this DaemonSet definition. // And only when the feature is enabled. - if strings.TrimSpace(n.Config.Spec.ClusterRole) != "control-plane" || !n.Config.Spec.EnableControlAgent { - klog.InfoS("skipped for", "clusterRole", n.Config.Spec.ClusterRole, "enableControlAgent", n.Config.Spec.EnableControlAgent) + if strings.TrimSpace(n.Config.Spec.ClusterRole) != "control-plane" || !n.Config.Spec.EnableNodeControl { + klog.InfoS("skipped for", "clusterRole", n.Config.Spec.ClusterRole, "enableNodeAgent", n.Config.Spec.EnableNodeControl) return nil } @@ -49,10 +49,10 @@ func ApplyControlAgentDaemonSetResources(ctx context.Context, n *NodeContext) er } labels := map[string]string{ - "app.kubernetes.io/name": monov1alpha1.ControlAgentName, + "app.kubernetes.io/name": monov1alpha1.NodeAgentName, "app.kubernetes.io/component": "agent", "app.kubernetes.io/part-of": "monok8s", - "app.kubernetes.io/managed-by": "ctl", + "app.kubernetes.io/managed-by": monov1alpha1.NodeControlName, } kubeClient := clients.Kubernetes @@ -60,16 +60,16 @@ func ApplyControlAgentDaemonSetResources(ctx context.Context, n *NodeContext) er if err := ensureNamespace(ctx, kubeClient, namespace, labels); err != nil { return fmt.Errorf("ensure namespace %q: %w", namespace, err) } - if err := applyControlAgentServiceAccount(ctx, kubeClient, namespace, labels); err != nil { + if err := applyNodeAgentServiceAccount(ctx, kubeClient, namespace, labels); err != nil { return fmt.Errorf("apply serviceaccount: %w", err) } - if err := applyControlAgentClusterRole(ctx, kubeClient, labels); err != nil { + if err := applyNodeAgentClusterRole(ctx, kubeClient, labels); err != nil { return fmt.Errorf("apply clusterrole: %w", err) } - if err := applyControlAgentClusterRoleBinding(ctx, kubeClient, namespace, labels); err != nil { + if err := applyNodeAgentClusterRoleBinding(ctx, kubeClient, namespace, labels); err != nil { return fmt.Errorf("apply clusterrolebinding: %w", err) } - if err := applyControlAgentDaemonSet(ctx, kubeClient, namespace, labels); err != nil { + if err := applyNodeAgentDaemonSet(ctx, kubeClient, namespace, labels); err != nil { return fmt.Errorf("apply daemonset: %w", err) } @@ -116,16 +116,16 @@ func copyStringMap(in map[string]string) map[string]string { return out } -func applyControlAgentServiceAccount(ctx context.Context, kubeClient kubernetes.Interface, namespace string, labels map[string]string) error { +func applyNodeAgentServiceAccount(ctx context.Context, kubeClient kubernetes.Interface, namespace string, labels map[string]string) error { want := &corev1.ServiceAccount{ ObjectMeta: metav1.ObjectMeta{ - Name: monov1alpha1.ControlAgentName, + Name: monov1alpha1.NodeAgentName, Namespace: namespace, Labels: labels, }, } - existing, err := kubeClient.CoreV1().ServiceAccounts(namespace).Get(ctx, monov1alpha1.ControlAgentName, metav1.GetOptions{}) + existing, err := kubeClient.CoreV1().ServiceAccounts(namespace).Get(ctx, monov1alpha1.NodeAgentName, metav1.GetOptions{}) if apierrors.IsNotFound(err) { _, err = kubeClient.CoreV1().ServiceAccounts(namespace).Create(ctx, want, metav1.CreateOptions{}) return err @@ -148,7 +148,7 @@ func applyControlAgentServiceAccount(ctx context.Context, kubeClient kubernetes. return err } -func applyControlAgentClusterRole(ctx context.Context, kubeClient kubernetes.Interface, labels map[string]string) error { +func applyNodeAgentClusterRole(ctx context.Context, kubeClient kubernetes.Interface, labels map[string]string) error { wantRules := []rbacv1.PolicyRule{ { APIGroups: []string{monov1alpha1.Group}, @@ -174,13 +174,13 @@ func applyControlAgentClusterRole(ctx context.Context, kubeClient kubernetes.Int want := &rbacv1.ClusterRole{ ObjectMeta: metav1.ObjectMeta{ - Name: monov1alpha1.ControlAgentName, + Name: monov1alpha1.NodeAgentName, Labels: labels, }, Rules: wantRules, } - existing, err := kubeClient.RbacV1().ClusterRoles().Get(ctx, monov1alpha1.ControlAgentName, metav1.GetOptions{}) + existing, err := kubeClient.RbacV1().ClusterRoles().Get(ctx, monov1alpha1.NodeAgentName, metav1.GetOptions{}) if apierrors.IsNotFound(err) { _, err = kubeClient.RbacV1().ClusterRoles().Create(ctx, want, metav1.CreateOptions{}) return err @@ -207,30 +207,30 @@ func applyControlAgentClusterRole(ctx context.Context, kubeClient kubernetes.Int return err } -func applyControlAgentClusterRoleBinding(ctx context.Context, kubeClient kubernetes.Interface, namespace string, labels map[string]string) error { +func applyNodeAgentClusterRoleBinding(ctx context.Context, kubeClient kubernetes.Interface, namespace string, labels map[string]string) error { wantRoleRef := rbacv1.RoleRef{ APIGroup: rbacv1.GroupName, Kind: "ClusterRole", - Name: monov1alpha1.ControlAgentName, + Name: monov1alpha1.NodeAgentName, } wantSubjects := []rbacv1.Subject{ { Kind: "ServiceAccount", - Name: monov1alpha1.ControlAgentName, + Name: monov1alpha1.NodeAgentName, Namespace: namespace, }, } want := &rbacv1.ClusterRoleBinding{ ObjectMeta: metav1.ObjectMeta{ - Name: monov1alpha1.ControlAgentName, + Name: monov1alpha1.NodeAgentName, Labels: labels, }, RoleRef: wantRoleRef, Subjects: wantSubjects, } - existing, err := kubeClient.RbacV1().ClusterRoleBindings().Get(ctx, monov1alpha1.ControlAgentName, metav1.GetOptions{}) + existing, err := kubeClient.RbacV1().ClusterRoleBindings().Get(ctx, monov1alpha1.NodeAgentName, metav1.GetOptions{}) if apierrors.IsNotFound(err) { _, err = kubeClient.RbacV1().ClusterRoleBindings().Create(ctx, want, metav1.CreateOptions{}) return err @@ -241,7 +241,7 @@ func applyControlAgentClusterRoleBinding(ctx context.Context, kubeClient kuberne // roleRef is immutable. If it differs, fail loudly instead of pretending we can patch it. if !reflect.DeepEqual(existing.RoleRef, want.RoleRef) { - return fmt.Errorf("existing ClusterRoleBinding %q has different roleRef and must be recreated", monov1alpha1.ControlAgentName) + return fmt.Errorf("existing ClusterRoleBinding %q has different roleRef and must be recreated", monov1alpha1.NodeAgentName) } changed := false @@ -262,26 +262,26 @@ func applyControlAgentClusterRoleBinding(ctx context.Context, kubeClient kuberne return err } -func applyControlAgentDaemonSet(ctx context.Context, kubeClient kubernetes.Interface, namespace string, labels map[string]string) error { +func applyNodeAgentDaemonSet(ctx context.Context, kubeClient kubernetes.Interface, namespace string, labels map[string]string) error { privileged := true dsLabels := map[string]string{ - "app.kubernetes.io/name": monov1alpha1.ControlAgentName, + "app.kubernetes.io/name": monov1alpha1.NodeAgentName, "app.kubernetes.io/component": "agent", "app.kubernetes.io/part-of": "monok8s", - "app.kubernetes.io/managed-by": "ctl", + "app.kubernetes.io/managed-by": monov1alpha1.NodeControlName, } want := &appsv1.DaemonSet{ ObjectMeta: metav1.ObjectMeta{ - Name: monov1alpha1.ControlAgentName, + Name: monov1alpha1.NodeAgentName, Namespace: namespace, Labels: labels, }, Spec: appsv1.DaemonSetSpec{ Selector: &metav1.LabelSelector{ MatchLabels: map[string]string{ - "app.kubernetes.io/name": monov1alpha1.ControlAgentName, + "app.kubernetes.io/name": monov1alpha1.NodeAgentName, }, }, Template: corev1.PodTemplateSpec{ @@ -289,12 +289,12 @@ func applyControlAgentDaemonSet(ctx context.Context, kubeClient kubernetes.Inter Labels: dsLabels, }, Spec: corev1.PodSpec{ - ServiceAccountName: monov1alpha1.ControlAgentName, + ServiceAccountName: monov1alpha1.NodeAgentName, HostNetwork: true, HostPID: true, DNSPolicy: corev1.DNSClusterFirstWithHostNet, NodeSelector: map[string]string{ - monov1alpha1.ControlAgentKey: controlAgentNodeSelectorValue, + monov1alpha1.NodeControlKey: controlAgentNodeSelectorValue, }, Tolerations: []corev1.Toleration{ {Operator: corev1.TolerationOpExists}, @@ -379,7 +379,7 @@ func applyControlAgentDaemonSet(ctx context.Context, kubeClient kubernetes.Inter }, } - existing, err := kubeClient.AppsV1().DaemonSets(namespace).Get(ctx, monov1alpha1.ControlAgentName, metav1.GetOptions{}) + existing, err := kubeClient.AppsV1().DaemonSets(namespace).Get(ctx, monov1alpha1.NodeAgentName, metav1.GetOptions{}) if apierrors.IsNotFound(err) { _, err = kubeClient.AppsV1().DaemonSets(namespace).Create(ctx, want, metav1.CreateOptions{}) return err diff --git a/clitools/pkg/node/kubeadm.go b/clitools/pkg/node/kubeadm.go index 5a741a7..07d04e3 100644 --- a/clitools/pkg/node/kubeadm.go +++ b/clitools/pkg/node/kubeadm.go @@ -476,7 +476,7 @@ func buildNodeRegistration(spec monov1alpha1.MonoKSConfigSpec) NodeRegistrationO } func effectiveNodeLabels(spec monov1alpha1.MonoKSConfigSpec) map[string]string { - if len(spec.NodeLabels) == 0 && !spec.EnableControlAgent { + if len(spec.NodeLabels) == 0 && !spec.EnableNodeControl { return nil } @@ -485,8 +485,8 @@ func effectiveNodeLabels(spec monov1alpha1.MonoKSConfigSpec) map[string]string { labels[k] = v } - if spec.EnableControlAgent { - labels[monov1alpha1.ControlAgentKey] = "true" + if spec.EnableNodeControl { + labels[monov1alpha1.NodeControlKey] = "true" } return labels diff --git a/clitools/pkg/node/kubeadm_upgrade.go b/clitools/pkg/node/kubeadm_upgrade.go index 2276c83..ade55e1 100644 --- a/clitools/pkg/node/kubeadm_upgrade.go +++ b/clitools/pkg/node/kubeadm_upgrade.go @@ -14,6 +14,7 @@ import ( "k8s.io/client-go/kubernetes" "k8s.io/klog/v2" + monov1alpha1 "example.com/monok8s/pkg/apis/monok8s/v1alpha1" "example.com/monok8s/pkg/kube" "example.com/monok8s/pkg/system" ) @@ -113,7 +114,7 @@ func runUpgradeSelfHealthCheck(ctx context.Context, kubeClient kubernetes.Interf Namespace: healthCheckNamespace, Labels: map[string]string{ "app.kubernetes.io/name": "preupgrade-health-check", - "app.kubernetes.io/managed-by": "monok8s", + "app.kubernetes.io/managed-by": monov1alpha1.NodeControlName, }, }, Spec: corev1.PodSpec{ diff --git a/clitools/pkg/node/metadata.go b/clitools/pkg/node/metadata.go index e403505..5d3c64c 100644 --- a/clitools/pkg/node/metadata.go +++ b/clitools/pkg/node/metadata.go @@ -60,8 +60,8 @@ func ApplyLocalNodeMetadataIfPossible(ctx context.Context, nctx *NodeContext) er } // Additional Labels - if spec.EnableControlAgent { - node.Labels[monov1alpah1.ControlAgentKey] = controlAgentNodeSelectorValue + if spec.EnableNodeControl { + node.Labels[monov1alpah1.NodeControlKey] = controlAgentNodeSelectorValue } _, err = client.CoreV1().Nodes().Update(ctx, node, metav1.UpdateOptions{}) diff --git a/clitools/pkg/render/controller.go b/clitools/pkg/render/controller.go index c353e3f..6ada0b1 100644 --- a/clitools/pkg/render/controller.go +++ b/clitools/pkg/render/controller.go @@ -13,6 +13,7 @@ import ( "k8s.io/apimachinery/pkg/util/intstr" monov1alpha1 "example.com/monok8s/pkg/apis/monok8s/v1alpha1" + buildinfo "example.com/monok8s/pkg/buildinfo" templates "example.com/monok8s/pkg/templates" ) @@ -20,10 +21,10 @@ func RenderControllerDeployments(namespace string) (string, error) { vals := templates.LoadTemplateValuesFromEnv() labels := map[string]string{ - "app.kubernetes.io/name": monov1alpha1.ControlAgentName, + "app.kubernetes.io/name": monov1alpha1.ControllerName, "app.kubernetes.io/component": "controller", "app.kubernetes.io/part-of": "monok8s", - "app.kubernetes.io/managed-by": "ctl", + "app.kubernetes.io/managed-by": monov1alpha1.NodeControlName, } objs := []runtime.Object{ @@ -66,7 +67,7 @@ func buildControllerServiceAccount(namespace string, labels map[string]string) * Kind: "ServiceAccount", }, ObjectMeta: metav1.ObjectMeta{ - Name: monov1alpha1.ControlAgentName, + Name: monov1alpha1.ControllerName, Namespace: namespace, Labels: labels, }, @@ -109,7 +110,7 @@ func buildControllerClusterRole(labels map[string]string) *rbacv1.ClusterRole { Kind: "ClusterRole", }, ObjectMeta: metav1.ObjectMeta{ - Name: monov1alpha1.ControlAgentName, + Name: monov1alpha1.ControllerName, Labels: labels, }, Rules: wantRules, @@ -121,7 +122,7 @@ func buildControllerClusterRoleBinding(namespace string, labels map[string]strin wantSubjects := []rbacv1.Subject{ { Kind: "ServiceAccount", - Name: monov1alpha1.ControlAgentName, + Name: monov1alpha1.ControllerName, Namespace: namespace, }, } @@ -129,7 +130,7 @@ func buildControllerClusterRoleBinding(namespace string, labels map[string]strin wantRoleRef := rbacv1.RoleRef{ APIGroup: rbacv1.GroupName, Kind: "ClusterRole", - Name: monov1alpha1.ControlAgentName, + Name: monov1alpha1.ControllerName, } return &rbacv1.ClusterRoleBinding{ @@ -138,7 +139,7 @@ func buildControllerClusterRoleBinding(namespace string, labels map[string]strin Kind: "ClusterRoleBinding", }, ObjectMeta: metav1.ObjectMeta{ - Name: monov1alpha1.ControlAgentName, + Name: monov1alpha1.ControllerName, Labels: labels, }, Subjects: wantSubjects, @@ -150,7 +151,7 @@ func buildControllerDeployment(tVals templates.TemplateValues, namespace string, replicas := int32(1) selectorLabels := map[string]string{ - "app.kubernetes.io/name": monov1alpha1.ControlAgentName, + "app.kubernetes.io/name": monov1alpha1.ControllerName, "app.kubernetes.io/component": "controller", } @@ -165,7 +166,7 @@ func buildControllerDeployment(tVals templates.TemplateValues, namespace string, Kind: "Deployment", }, ObjectMeta: metav1.ObjectMeta{ - Name: monov1alpha1.ControlAgentName, + Name: monov1alpha1.ControllerName, Namespace: namespace, Labels: labels, }, @@ -179,11 +180,11 @@ func buildControllerDeployment(tVals templates.TemplateValues, namespace string, Labels: podLabels, }, Spec: corev1.PodSpec{ - ServiceAccountName: monov1alpha1.ControlAgentName, + ServiceAccountName: monov1alpha1.ControllerName, Containers: []corev1.Container{ { Name: "controller", - Image: fmt.Sprintf("registry.local/control-agent:%s", tVals.KubernetesVersion), + Image: fmt.Sprintf("localhost/monok8s/node-control:%s", buildinfo.Version), ImagePullPolicy: corev1.PullIfNotPresent, Args: []string{ "controller", diff --git a/clitools/pkg/render/sshd.go b/clitools/pkg/render/sshd.go new file mode 100644 index 0000000..58722dc --- /dev/null +++ b/clitools/pkg/render/sshd.go @@ -0,0 +1,278 @@ +package render + +import ( + "bytes" + "fmt" + + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + rbacv1 "k8s.io/api/rbac/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/serializer/json" + "k8s.io/apimachinery/pkg/util/intstr" + + monov1alpha1 "example.com/monok8s/pkg/apis/monok8s/v1alpha1" + "example.com/monok8s/pkg/templates" +) + +const ( + sshdName = "sshd" + sshdConfigName = "sshd-authorized-keys" + sshdNodePort = int32(30022) +) + +func RenderSSHDDeployments(namespace, authKeys string) (string, error) { + vals := templates.LoadTemplateValuesFromEnv() + + labels := map[string]string{ + "app.kubernetes.io/name": sshdName, + "app.kubernetes.io/component": "host-access", + "app.kubernetes.io/part-of": "monok8s", + "app.kubernetes.io/managed-by": monov1alpha1.NodeControlName, + } + + objs := []runtime.Object{ + buildSSHDConfigMap(authKeys, namespace, labels), + buildSSHDService(vals, namespace, labels), + buildSSHDDeployment(vals, namespace, labels), + } + + s := runtime.NewScheme() + _ = corev1.AddToScheme(s) + _ = rbacv1.AddToScheme(s) + _ = appsv1.AddToScheme(s) + + serializer := json.NewYAMLSerializer(json.DefaultMetaFactory, s, s) + + var buf bytes.Buffer + + for i, obj := range objs { + if i > 0 { + if _, err := fmt.Fprintln(&buf, "---"); err != nil { + return "", err + } + } + if err := serializer.Encode(obj, &buf); err != nil { + return "", err + } + } + + return buf.String(), nil +} + +func buildSSHDConfigMap( + authorizedKeys string, + namespace string, + labels map[string]string, +) *corev1.ConfigMap { + + return &corev1.ConfigMap{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "v1", + Kind: "ConfigMap", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: sshdConfigName, + Namespace: namespace, + Labels: labels, + }, + Data: map[string]string{ + "authorized_keys": authorizedKeys, + }, + } +} + +func buildSSHDService( + tVals templates.TemplateValues, + namespace string, + labels map[string]string, +) *corev1.Service { + selectorLabels := map[string]string{ + monov1alpha1.NodeControlKey: "true", + "kubernetes.io/hostname": tVals.NodeName, + } + + return &corev1.Service{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "v1", + Kind: "Service", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: sshdName, + Namespace: namespace, + Labels: labels, + }, + Spec: corev1.ServiceSpec{ + Type: corev1.ServiceTypeNodePort, + Selector: selectorLabels, + Ports: []corev1.ServicePort{ + { + Name: "ssh", + Protocol: corev1.ProtocolTCP, + Port: 22, + TargetPort: intstr.FromInt32(22), + NodePort: sshdNodePort, + }, + }, + }, + } +} + +func buildSSHDDeployment( + tVals templates.TemplateValues, + namespace string, + labels map[string]string, +) *appsv1.Deployment { + replicas := int32(1) + + selectorLabels := map[string]string{ + monov1alpha1.NodeControlKey: "true", + "kubernetes.io/hostname": tVals.NodeName, + } + + podLabels := mergeStringMaps(labels, selectorLabels) + + runAsUser := int64(0) + runAsNonRoot := false + privileged := true + allowPrivilegeEscalation := true + readOnlyRootFilesystem := false + + return &appsv1.Deployment{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "apps/v1", + Kind: "Deployment", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: sshdName, + Namespace: namespace, + Labels: labels, + }, + Spec: appsv1.DeploymentSpec{ + Replicas: &replicas, + Selector: &metav1.LabelSelector{ + MatchLabels: selectorLabels, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: podLabels, + }, + Spec: corev1.PodSpec{ + NodeSelector: selectorLabels, + Containers: []corev1.Container{ + { + Name: sshdName, + Image: "alpine:latest", + Command: []string{ + "/bin/sh", + "-ceu", + ` +apk add --no-cache openssh-server + +mkdir -p /run/sshd +mkdir -p /root/.ssh + +cp /authorized-keys/authorized_keys /root/.ssh/authorized_keys +chmod 700 /root/.ssh +chmod 600 /root/.ssh/authorized_keys + +ssh-keygen -A + +exec /usr/sbin/sshd \ + -D \ + -e \ + -p 22 \ + -o PermitRootLogin=prohibit-password \ + -o PasswordAuthentication=no \ + -o KbdInteractiveAuthentication=no \ + -o PubkeyAuthentication=yes \ + -o AuthorizedKeysFile=/root/.ssh/authorized_keys +`, + }, + Ports: []corev1.ContainerPort{ + { + Name: "ssh", + ContainerPort: 22, + Protocol: corev1.ProtocolTCP, + }, + }, + SecurityContext: &corev1.SecurityContext{ + RunAsUser: &runAsUser, + RunAsNonRoot: &runAsNonRoot, + Privileged: &privileged, + AllowPrivilegeEscalation: &allowPrivilegeEscalation, + ReadOnlyRootFilesystem: &readOnlyRootFilesystem, + }, + Resources: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("10m"), + corev1.ResourceMemory: resource.MustParse("32Mi"), + }, + Limits: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("200m"), + corev1.ResourceMemory: resource.MustParse("128Mi"), + }, + }, + VolumeMounts: []corev1.VolumeMount{ + { + Name: "authorized-keys", + MountPath: "/authorized-keys", + ReadOnly: true, + }, + { + Name: "host-etc", + MountPath: "/host/etc", + }, + { + Name: "host-var", + MountPath: "/host/var", + }, + }, + }, + }, + Volumes: []corev1.Volume{ + { + Name: "authorized-keys", + VolumeSource: corev1.VolumeSource{ + ConfigMap: &corev1.ConfigMapVolumeSource{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: sshdConfigName, + }, + DefaultMode: ptrInt32(0600), + }, + }, + }, + { + Name: "host-etc", + VolumeSource: corev1.VolumeSource{ + HostPath: &corev1.HostPathVolumeSource{ + Path: "/etc", + Type: ptrHostPathType(corev1.HostPathDirectory), + }, + }, + }, + { + Name: "host-var", + VolumeSource: corev1.VolumeSource{ + HostPath: &corev1.HostPathVolumeSource{ + Path: "/var", + Type: ptrHostPathType(corev1.HostPathDirectory), + }, + }, + }, + }, + }, + }, + }, + } +} + +func ptrInt32(v int32) *int32 { + return &v +} + +func ptrHostPathType(v corev1.HostPathType) *corev1.HostPathType { + return &v +} diff --git a/clitools/pkg/templates/templates.go b/clitools/pkg/templates/templates.go index 09d6797..1daf14d 100644 --- a/clitools/pkg/templates/templates.go +++ b/clitools/pkg/templates/templates.go @@ -25,7 +25,7 @@ func DefaultMonoKSConfig(v TemplateValues) monov1alpha1.MonoKSConfig { ClusterRole: v.ClusterRole, InitControlPlane: v.InitControlPlane, - EnableControlAgent: v.EnableControlAgent, + EnableNodeControl: v.EnableNodeControl, ClusterName: v.ClusterName, ClusterDomain: v.ClusterDomain, diff --git a/clitools/pkg/templates/values.go b/clitools/pkg/templates/values.go index 2bda201..6e69313 100644 --- a/clitools/pkg/templates/values.go +++ b/clitools/pkg/templates/values.go @@ -24,9 +24,9 @@ type TemplateValues struct { ContainerRuntimeEndpoint string CNIPlugin string - ClusterRole string // worker, control-plane - InitControlPlane bool - EnableControlAgent bool + ClusterRole string // worker, control-plane + InitControlPlane bool + EnableNodeControl bool AllowSchedulingOnControlPlane bool SkipImageCheck bool @@ -58,9 +58,9 @@ func defaultTemplateValues() TemplateValues { ContainerRuntimeEndpoint: "unix:///var/run/crio/crio.sock", CNIPlugin: "default", - ClusterRole: "control-plane", - InitControlPlane: true, - EnableControlAgent: true, + ClusterRole: "control-plane", + InitControlPlane: true, + EnableNodeControl: true, AllowSchedulingOnControlPlane: true, SkipImageCheck: false, @@ -104,7 +104,7 @@ func LoadTemplateValuesFromEnv() TemplateValues { v.ClusterRole = getenvDefault("MKS_CLUSTER_ROLE", v.ClusterRole) v.InitControlPlane = getenvBoolDefault("MKS_INIT_CONTROL_PLANE", v.InitControlPlane) - v.EnableControlAgent = getenvBoolDefault("MKS_ENABLE_CONTROL_AGENT", v.EnableControlAgent) + v.EnableNodeControl = getenvBoolDefault("MKS_ENABLE_NODE_CONTROL", v.EnableNodeControl) v.AllowSchedulingOnControlPlane = getenvBoolDefault("MKS_ALLOW_SCHEDULING_ON_CONTROL_PLANE", v.AllowSchedulingOnControlPlane) v.SkipImageCheck = getenvBoolDefault("MKS_SKIP_IMAGE_CHECK", v.SkipImageCheck) diff --git a/configs/cluster.env.default b/configs/cluster.env.default index 7e8517c..043970e 100644 --- a/configs/cluster.env.default +++ b/configs/cluster.env.default @@ -30,8 +30,8 @@ MKS_CLUSTER_DOMAIN=cluster.local MKS_CLUSTER_ROLE=control-plane MKS_INIT_CONTROL_PLANE=yes -# OSUpgrade agent -MKS_ENABLE_CONTROL_AGENT=yes +# Enable if you want OTA OSUpgrade +MKS_ENABLE_NODE_CONTROL=yes # Boot configs # usb, emmc diff --git a/docs/ota.md b/docs/ota.md index 9de4041..bd7beff 100644 --- a/docs/ota.md +++ b/docs/ota.md @@ -49,7 +49,7 @@ catalog: ``` ※ ConfigMap requires additional RBAC permissions which is not enabled by default. You can edit -the control-agent's ClusterRole and add `configmaps: get` to allow this. +the node-agent's ClusterRole and add `configmaps: get` to allow this. Contents should look like this ```yaml diff --git a/docs/uboot.md b/docs/uboot.md index e393d43..2e5520d 100644 --- a/docs/uboot.md +++ b/docs/uboot.md @@ -20,11 +20,11 @@ run bootusb ## Run fw_printenv and fw_setenv from kubectl ``` -# Avoid using daemonset/control-agent if you have multiple nodes -kubectl exec -n kube-system control-agent-abcdef1 -- /ctl internal fw-setenv --key foo --value bar +# Avoid using daemonset/node-agent if you have multiple nodes +kubectl exec -n kube-system node-agent-abcdef1 -- /ctl internal fw-setenv --key foo --value bar # fw_printenv -kubectl exec -n kube-system ds/control-agent -- /ctl internal fw-printenv --key foo +kubectl exec -n kube-system ds/node-agent -- /ctl internal fw-printenv --key foo ``` ## Original uboot env from mono