Added: ctl create controller

This commit is contained in:
2026-04-25 00:46:43 +08:00
parent e4a19e5926
commit 1354e83813
13 changed files with 372 additions and 450 deletions

View File

@@ -16,6 +16,7 @@ var (
AltPartDeviceLink = "/dev/mksaltpart"
BootStateFile = "/run/monok8s/boot-state.env"
CatalogURL = "https://example.com/monok8s.io/v1alpha1/catalog.yaml"
ControlAgentName = "control-agent"
ControlAgentKey = "monok8s.io/control-agent"
EnvConfigDir = "/opt/monok8s/config"
Label = "monok8s.io/label"

View File

@@ -53,6 +53,10 @@ type OSUpgradeSpec struct {
// +kubebuilder:validation:Enum=fast;balanced;safe
// +kubebuilder:default=balanced
// Profiles (TODO)
// safe - api-server can be responsive most of the time
// balanced - api-server can sometimes be unresponsive
// fast - disable throttling. Good for worker node.
FlashProfile string `json:"flashProfile,omitempty" yaml:"flashProfile,omitempty"`
Catalog *VersionCatalogSource `json:"catalog,omitempty" yaml:"catalog,omitempty"`
@@ -100,7 +104,13 @@ type OSUpgradeProgressList struct {
type OSUpgradeProgressSpec struct {
SourceRef OSUpgradeSourceRef `json:"sourceRef,omitempty" yaml:"sourceRef,omitempty"`
NodeName string `json:"nodeName,omitempty" yaml:"nodeName,omitempty"`
// RetryNonce triggers a retry when its value changes.
// Users can update this field (for example, set it to the current time)
// to request a retry of a failed OS upgrade.
RetryNonce string `json:"retryNonce,omitempty" yaml:"retryNonce,omitempty"`
NodeName string `json:"nodeName,omitempty" yaml:"nodeName,omitempty"`
}
type OSUpgradeSourceRef struct {

View File

@@ -21,13 +21,17 @@ import (
)
func NewCmdAgent(flags *genericclioptions.ConfigFlags) *cobra.Command {
var namespace string
var envFile string
cmd := &cobra.Command{
Use: "agent --env-file path",
Short: "Watch OSUpgradeProgress resources for this node and process upgrades",
RunE: func(cmd *cobra.Command, _ []string) error {
ns, _, err := flags.ToRawKubeConfigLoader().Namespace()
if err != nil {
return err
}
if envFile == "" {
return fmt.Errorf("--env-file is required")
}
@@ -51,7 +55,7 @@ func NewCmdAgent(flags *genericclioptions.ConfigFlags) *cobra.Command {
klog.InfoS("starting agent",
"node", cfg.Spec.NodeName,
"namespace", namespace,
"namespace", ns,
"envFile", envFile,
)
@@ -60,11 +64,10 @@ func NewCmdAgent(flags *genericclioptions.ConfigFlags) *cobra.Command {
return fmt.Errorf("create kube clients: %w", err)
}
return runWatchLoop(ctx, clients, namespace, cfg.Spec.NodeName)
return runWatchLoop(ctx, clients, ns, cfg.Spec.NodeName)
},
}
cmd.Flags().StringVar(&namespace, "namespace", templates.DefaultNamespace, "namespace to watch")
cmd.Flags().StringVar(&envFile, "env-file", "", "path to env file containing MKS_* variables")
return cmd

View File

@@ -15,7 +15,6 @@ import (
mkscontroller "example.com/monok8s/pkg/controller"
osupgradectrl "example.com/monok8s/pkg/controller/osupgrade"
"example.com/monok8s/pkg/kube"
"example.com/monok8s/pkg/templates"
)
type ServerConfig struct {
@@ -31,6 +30,12 @@ func NewCmdController(flags *genericclioptions.ConfigFlags) *cobra.Command {
Use: "controller",
Short: "Start a controller that handles OSUpgrade resources",
RunE: func(cmd *cobra.Command, _ []string) error {
ns, _, err := flags.ToRawKubeConfigLoader().Namespace()
if err != nil {
return err
}
conf.Namespace = ns
ctx := cmd.Context()
klog.InfoS("starting controller", "namespace", conf.Namespace)
@@ -79,7 +84,6 @@ func NewCmdController(flags *genericclioptions.ConfigFlags) *cobra.Command {
},
}
cmd.Flags().StringVar(&conf.Namespace, "namespace", templates.DefaultNamespace, "namespace to watch")
cmd.Flags().StringVar(&conf.TLSCertFile, "tls-cert-file", conf.TLSCertFile,
"File containing x509 Certificate used for serving HTTPS (with intermediate certs, if any, concatenated after server cert).")
cmd.Flags().StringVar(&conf.TLSPrivateKeyFile, "tls-private-key-file", conf.TLSPrivateKeyFile,

View File

@@ -3,11 +3,12 @@ package create
import (
"fmt"
"github.com/spf13/cobra"
"k8s.io/cli-runtime/pkg/genericclioptions"
render "example.com/monok8s/pkg/render"
)
func NewCmdCreate() *cobra.Command {
func NewCmdCreate(flags *genericclioptions.ConfigFlags) *cobra.Command {
cmd := &cobra.Command{Use: "create", Short: "Create starter resources"}
cmd.AddCommand(
&cobra.Command{
@@ -26,7 +27,29 @@ func NewCmdCreate() *cobra.Command {
Use: "osupgrade",
Short: "Print an OSUpgrade template",
RunE: func(cmd *cobra.Command, _ []string) error {
out, err := render.RenderOSUpgrade()
ns, _, err := flags.ToRawKubeConfigLoader().Namespace()
if err != nil {
return err
}
out, err := render.RenderOSUpgrade(ns)
if err != nil {
return err
}
_, err = fmt.Fprint(cmd.OutOrStdout(), out)
return err
},
},
&cobra.Command{
Use: "controller",
Short: "Print controller deployment template",
RunE: func(cmd *cobra.Command, _ []string) error {
ns, _, err := flags.ToRawKubeConfigLoader().Namespace()
if err != nil {
return err
}
out, err := render.RenderControllerDeployments(ns)
if err != nil {
return err
}

View File

@@ -44,7 +44,7 @@ func NewRootCmd() *cobra.Command {
versioncmd.NewCmdVersion(),
initcmd.NewCmdInit(flags),
checkconfigcmd.NewCmdCheckConfig(),
createcmd.NewCmdCreate(),
createcmd.NewCmdCreate(flags),
agentcmd.NewCmdAgent(flags),
controllercmd.NewCmdController(flags),
internalcmd.NewCmdInternal(),

View File

@@ -76,6 +76,10 @@ func (s *Server) Initialize() {
ws.Route(ws.GET("/healthz").To(s.queryHealthz).
Doc("Return basic controller status"))
// Stub for now
ws.Route(ws.GET("/readyz").To(s.queryHealthz).
Doc("Stub for now"))
s.restfulCont.Add(ws)
}

View File

@@ -20,7 +20,6 @@ import (
)
const (
controlAgentName = "control-agent"
controlAgentNodeSelectorValue = "true"
controlAgentImage = "localhost/monok8s/control-agent:dev"
kubeconfig = "/etc/kubernetes/admin.conf"
@@ -50,7 +49,7 @@ func ApplyControlAgentDaemonSetResources(ctx context.Context, n *NodeContext) er
}
labels := map[string]string{
"app.kubernetes.io/name": controlAgentName,
"app.kubernetes.io/name": monov1alpha1.ControlAgentName,
"app.kubernetes.io/component": "agent",
"app.kubernetes.io/part-of": "monok8s",
"app.kubernetes.io/managed-by": "ctl",
@@ -120,13 +119,13 @@ func copyStringMap(in map[string]string) map[string]string {
func applyControlAgentServiceAccount(ctx context.Context, kubeClient kubernetes.Interface, namespace string, labels map[string]string) error {
want := &corev1.ServiceAccount{
ObjectMeta: metav1.ObjectMeta{
Name: controlAgentName,
Name: monov1alpha1.ControlAgentName,
Namespace: namespace,
Labels: labels,
},
}
existing, err := kubeClient.CoreV1().ServiceAccounts(namespace).Get(ctx, controlAgentName, metav1.GetOptions{})
existing, err := kubeClient.CoreV1().ServiceAccounts(namespace).Get(ctx, monov1alpha1.ControlAgentName, metav1.GetOptions{})
if apierrors.IsNotFound(err) {
_, err = kubeClient.CoreV1().ServiceAccounts(namespace).Create(ctx, want, metav1.CreateOptions{})
return err
@@ -175,13 +174,13 @@ func applyControlAgentClusterRole(ctx context.Context, kubeClient kubernetes.Int
want := &rbacv1.ClusterRole{
ObjectMeta: metav1.ObjectMeta{
Name: controlAgentName,
Name: monov1alpha1.ControlAgentName,
Labels: labels,
},
Rules: wantRules,
}
existing, err := kubeClient.RbacV1().ClusterRoles().Get(ctx, controlAgentName, metav1.GetOptions{})
existing, err := kubeClient.RbacV1().ClusterRoles().Get(ctx, monov1alpha1.ControlAgentName, metav1.GetOptions{})
if apierrors.IsNotFound(err) {
_, err = kubeClient.RbacV1().ClusterRoles().Create(ctx, want, metav1.CreateOptions{})
return err
@@ -212,26 +211,26 @@ func applyControlAgentClusterRoleBinding(ctx context.Context, kubeClient kuberne
wantRoleRef := rbacv1.RoleRef{
APIGroup: rbacv1.GroupName,
Kind: "ClusterRole",
Name: controlAgentName,
Name: monov1alpha1.ControlAgentName,
}
wantSubjects := []rbacv1.Subject{
{
Kind: "ServiceAccount",
Name: controlAgentName,
Name: monov1alpha1.ControlAgentName,
Namespace: namespace,
},
}
want := &rbacv1.ClusterRoleBinding{
ObjectMeta: metav1.ObjectMeta{
Name: controlAgentName,
Name: monov1alpha1.ControlAgentName,
Labels: labels,
},
RoleRef: wantRoleRef,
Subjects: wantSubjects,
}
existing, err := kubeClient.RbacV1().ClusterRoleBindings().Get(ctx, controlAgentName, metav1.GetOptions{})
existing, err := kubeClient.RbacV1().ClusterRoleBindings().Get(ctx, monov1alpha1.ControlAgentName, metav1.GetOptions{})
if apierrors.IsNotFound(err) {
_, err = kubeClient.RbacV1().ClusterRoleBindings().Create(ctx, want, metav1.CreateOptions{})
return err
@@ -242,7 +241,7 @@ func applyControlAgentClusterRoleBinding(ctx context.Context, kubeClient kuberne
// roleRef is immutable. If it differs, fail loudly instead of pretending we can patch it.
if !reflect.DeepEqual(existing.RoleRef, want.RoleRef) {
return fmt.Errorf("existing ClusterRoleBinding %q has different roleRef and must be recreated", controlAgentName)
return fmt.Errorf("existing ClusterRoleBinding %q has different roleRef and must be recreated", monov1alpha1.ControlAgentName)
}
changed := false
@@ -267,7 +266,7 @@ func applyControlAgentDaemonSet(ctx context.Context, kubeClient kubernetes.Inter
privileged := true
dsLabels := map[string]string{
"app.kubernetes.io/name": controlAgentName,
"app.kubernetes.io/name": monov1alpha1.ControlAgentName,
"app.kubernetes.io/component": "agent",
"app.kubernetes.io/part-of": "monok8s",
"app.kubernetes.io/managed-by": "ctl",
@@ -275,14 +274,14 @@ func applyControlAgentDaemonSet(ctx context.Context, kubeClient kubernetes.Inter
want := &appsv1.DaemonSet{
ObjectMeta: metav1.ObjectMeta{
Name: controlAgentName,
Name: monov1alpha1.ControlAgentName,
Namespace: namespace,
Labels: labels,
},
Spec: appsv1.DaemonSetSpec{
Selector: &metav1.LabelSelector{
MatchLabels: map[string]string{
"app.kubernetes.io/name": controlAgentName,
"app.kubernetes.io/name": monov1alpha1.ControlAgentName,
},
},
Template: corev1.PodTemplateSpec{
@@ -290,7 +289,7 @@ func applyControlAgentDaemonSet(ctx context.Context, kubeClient kubernetes.Inter
Labels: dsLabels,
},
Spec: corev1.PodSpec{
ServiceAccountName: controlAgentName,
ServiceAccountName: monov1alpha1.ControlAgentName,
HostNetwork: true,
HostPID: true,
DNSPolicy: corev1.DNSClusterFirstWithHostNet,
@@ -380,7 +379,7 @@ func applyControlAgentDaemonSet(ctx context.Context, kubeClient kubernetes.Inter
},
}
existing, err := kubeClient.AppsV1().DaemonSets(namespace).Get(ctx, controlAgentName, metav1.GetOptions{})
existing, err := kubeClient.AppsV1().DaemonSets(namespace).Get(ctx, monov1alpha1.ControlAgentName, metav1.GetOptions{})
if apierrors.IsNotFound(err) {
_, err = kubeClient.AppsV1().DaemonSets(namespace).Create(ctx, want, metav1.CreateOptions{})
return err

View File

@@ -1,405 +0,0 @@
package node
import (
"context"
"fmt"
"reflect"
"sort"
"strings"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
rbacv1 "k8s.io/api/rbac/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/client-go/kubernetes"
"k8s.io/klog/v2"
monov1alpha1 "example.com/monok8s/pkg/apis/monok8s/v1alpha1"
"example.com/monok8s/pkg/kube"
templates "example.com/monok8s/pkg/templates"
)
func applyControllerDeploymentResources(ctx context.Context, n *NodeContext) error {
if strings.TrimSpace(n.Config.Spec.ClusterRole) != "control-plane" || !n.Config.Spec.EnableControlAgent {
klog.InfoS("skipped controller deployment",
"clusterRole", n.Config.Spec.ClusterRole,
"enableControlAgent", n.Config.Spec.EnableControlAgent,
)
return nil
}
if err := ApplyCRDs(ctx, n); err != nil {
return err
}
namespace := strings.TrimSpace(n.Config.Namespace)
if namespace == "" {
namespace = templates.DefaultNamespace
}
clients, err := kube.NewClientsFromKubeconfig(adminKubeconfigPath)
if err != nil {
return fmt.Errorf("build kube clients from %s: %w", adminKubeconfigPath, err)
}
labels := map[string]string{
"app.kubernetes.io/name": controlAgentName,
"app.kubernetes.io/component": "controller",
"app.kubernetes.io/part-of": "monok8s",
"app.kubernetes.io/managed-by": "ctl",
}
kubeClient := clients.Kubernetes
if err := ensureNamespace(ctx, kubeClient, namespace, labels); err != nil {
return fmt.Errorf("ensure namespace %q: %w", namespace, err)
}
if err := applyControllerServiceAccount(ctx, kubeClient, namespace, labels); err != nil {
return fmt.Errorf("apply serviceaccount: %w", err)
}
if err := applyControllerClusterRole(ctx, kubeClient, labels); err != nil {
return fmt.Errorf("apply clusterrole: %w", err)
}
if err := applyControllerClusterRoleBinding(ctx, kubeClient, namespace, labels); err != nil {
return fmt.Errorf("apply clusterrolebinding: %w", err)
}
if err := applyControllerDeployment(ctx, kubeClient, namespace, labels); err != nil {
return fmt.Errorf("apply deployment: %w", err)
}
return nil
}
func applyControllerServiceAccount(ctx context.Context, kubeClient kubernetes.Interface, namespace string, labels map[string]string) error {
automount := true
want := &corev1.ServiceAccount{
ObjectMeta: metav1.ObjectMeta{
Name: controlAgentName,
Namespace: namespace,
Labels: labels,
},
AutomountServiceAccountToken: &automount,
}
existing, err := kubeClient.CoreV1().ServiceAccounts(namespace).Get(ctx, controlAgentName, metav1.GetOptions{})
if apierrors.IsNotFound(err) {
_, err = kubeClient.CoreV1().ServiceAccounts(namespace).Create(ctx, want, metav1.CreateOptions{})
return err
}
if err != nil {
return err
}
changed := false
if !reflect.DeepEqual(existing.Labels, want.Labels) {
existing.Labels = want.Labels
changed = true
}
if !reflect.DeepEqual(existing.AutomountServiceAccountToken, want.AutomountServiceAccountToken) {
existing.AutomountServiceAccountToken = want.AutomountServiceAccountToken
changed = true
}
if !changed {
return nil
}
_, err = kubeClient.CoreV1().ServiceAccounts(namespace).Update(ctx, existing, metav1.UpdateOptions{})
return err
}
func applyControllerClusterRole(ctx context.Context, kubeClient kubernetes.Interface, labels map[string]string) error {
wantRules := []rbacv1.PolicyRule{
{
APIGroups: []string{monov1alpha1.Group},
Resources: []string{"osupgrades"},
Verbs: []string{"get", "list", "watch"},
},
{
APIGroups: []string{monov1alpha1.Group},
Resources: []string{"osupgrades/status"},
Verbs: []string{"get", "patch", "update"},
},
{
APIGroups: []string{monov1alpha1.Group},
Resources: []string{"osupgradeprogresses"},
Verbs: []string{"get", "list", "create"},
},
{
APIGroups: []string{monov1alpha1.Group},
Resources: []string{"osupgradeprogresses/status"},
Verbs: []string{"create"},
},
{
APIGroups: []string{""},
Resources: []string{"nodes"},
Verbs: []string{"get", "list", "watch"},
},
}
want := &rbacv1.ClusterRole{
ObjectMeta: metav1.ObjectMeta{
Name: controlAgentName,
Labels: labels,
},
Rules: wantRules,
}
existing, err := kubeClient.RbacV1().ClusterRoles().Get(ctx, controlAgentName, metav1.GetOptions{})
if apierrors.IsNotFound(err) {
_, err = kubeClient.RbacV1().ClusterRoles().Create(ctx, want, metav1.CreateOptions{})
return err
}
if err != nil {
return err
}
changed := false
if !reflect.DeepEqual(existing.Labels, want.Labels) {
existing.Labels = want.Labels
changed = true
}
if !reflect.DeepEqual(existing.Rules, want.Rules) {
existing.Rules = want.Rules
changed = true
}
if !changed {
return nil
}
_, err = kubeClient.RbacV1().ClusterRoles().Update(ctx, existing, metav1.UpdateOptions{})
return err
}
func applyControllerClusterRoleBinding(ctx context.Context, kubeClient kubernetes.Interface, namespace string, labels map[string]string) error {
wantSubjects := []rbacv1.Subject{
{
Kind: "ServiceAccount",
Name: controlAgentName,
Namespace: namespace,
},
}
wantRoleRef := rbacv1.RoleRef{
APIGroup: rbacv1.GroupName,
Kind: "ClusterRole",
Name: controlAgentName,
}
want := &rbacv1.ClusterRoleBinding{
ObjectMeta: metav1.ObjectMeta{
Name: controlAgentName,
Labels: labels,
},
Subjects: wantSubjects,
RoleRef: wantRoleRef,
}
existing, err := kubeClient.RbacV1().ClusterRoleBindings().Get(ctx, controlAgentName, metav1.GetOptions{})
if apierrors.IsNotFound(err) {
_, err = kubeClient.RbacV1().ClusterRoleBindings().Create(ctx, want, metav1.CreateOptions{})
return err
}
if err != nil {
return err
}
changed := false
if !reflect.DeepEqual(existing.Labels, want.Labels) {
existing.Labels = want.Labels
changed = true
}
if !reflect.DeepEqual(existing.Subjects, want.Subjects) {
existing.Subjects = want.Subjects
changed = true
}
if !reflect.DeepEqual(existing.RoleRef, want.RoleRef) {
existing.RoleRef = want.RoleRef
changed = true
}
if !changed {
return nil
}
_, err = kubeClient.RbacV1().ClusterRoleBindings().Update(ctx, existing, metav1.UpdateOptions{})
return err
}
func applyControllerDeployment(ctx context.Context, kubeClient kubernetes.Interface, namespace string, labels map[string]string) error {
replicas := int32(1)
selectorLabels := map[string]string{
"app.kubernetes.io/name": controlAgentName,
"app.kubernetes.io/component": "controller",
}
podLabels := mergeStringMaps(labels, selectorLabels)
runAsNonRoot := true
allowPrivilegeEscalation := false
want := &appsv1.Deployment{
ObjectMeta: metav1.ObjectMeta{
Name: controlAgentName,
Namespace: namespace,
Labels: labels,
},
Spec: appsv1.DeploymentSpec{
Replicas: &replicas,
Selector: &metav1.LabelSelector{
MatchLabels: selectorLabels,
},
Template: corev1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: podLabels,
},
Spec: corev1.PodSpec{
ServiceAccountName: controlAgentName,
Containers: []corev1.Container{
{
Name: "controller",
Image: controlAgentImage,
ImagePullPolicy: corev1.PullIfNotPresent,
Args: []string{
"controller",
},
Env: []corev1.EnvVar{
{
Name: "POD_NAME",
ValueFrom: &corev1.EnvVarSource{
FieldRef: &corev1.ObjectFieldSelector{
APIVersion: "v1",
FieldPath: "metadata.name",
},
},
},
{
Name: "POD_NAMESPACE",
ValueFrom: &corev1.EnvVarSource{
FieldRef: &corev1.ObjectFieldSelector{
APIVersion: "v1",
FieldPath: "metadata.namespace",
},
},
},
{
Name: "NODE_NAME",
ValueFrom: &corev1.EnvVarSource{
FieldRef: &corev1.ObjectFieldSelector{
APIVersion: "v1",
FieldPath: "spec.nodeName",
},
},
},
},
Ports: []corev1.ContainerPort{
{
Name: "http",
ContainerPort: 8080,
Protocol: corev1.ProtocolTCP,
},
},
LivenessProbe: &corev1.Probe{
ProbeHandler: corev1.ProbeHandler{
HTTPGet: &corev1.HTTPGetAction{
Path: "/healthz",
Port: intstr.FromString("http"),
},
},
},
ReadinessProbe: &corev1.Probe{
ProbeHandler: corev1.ProbeHandler{
HTTPGet: &corev1.HTTPGetAction{
Path: "/readyz",
Port: intstr.FromString("http"),
},
},
},
SecurityContext: &corev1.SecurityContext{
RunAsNonRoot: &runAsNonRoot,
AllowPrivilegeEscalation: &allowPrivilegeEscalation,
},
},
},
},
},
},
}
existing, err := kubeClient.AppsV1().Deployments(namespace).Get(ctx, controlAgentName, metav1.GetOptions{})
if apierrors.IsNotFound(err) {
_, err = kubeClient.AppsV1().Deployments(namespace).Create(ctx, want, metav1.CreateOptions{})
return err
}
if err != nil {
return err
}
changed := false
if !reflect.DeepEqual(existing.Labels, want.Labels) {
existing.Labels = want.Labels
changed = true
}
if !reflect.DeepEqual(existing.Spec.Replicas, want.Spec.Replicas) {
existing.Spec.Replicas = want.Spec.Replicas
changed = true
}
if !reflect.DeepEqual(existing.Spec.Selector, want.Spec.Selector) {
existing.Spec.Selector = want.Spec.Selector
changed = true
}
if !reflect.DeepEqual(existing.Spec.Template.Labels, want.Spec.Template.Labels) {
existing.Spec.Template.Labels = want.Spec.Template.Labels
changed = true
}
if !reflect.DeepEqual(existing.Spec.Template.Spec.ServiceAccountName, want.Spec.Template.Spec.ServiceAccountName) {
existing.Spec.Template.Spec.ServiceAccountName = want.Spec.Template.Spec.ServiceAccountName
changed = true
}
if !reflect.DeepEqual(existing.Spec.Template.Spec.Containers, want.Spec.Template.Spec.Containers) {
existing.Spec.Template.Spec.Containers = want.Spec.Template.Spec.Containers
changed = true
}
if !changed {
return nil
}
_, err = kubeClient.AppsV1().Deployments(namespace).Update(ctx, existing, metav1.UpdateOptions{})
return err
}
func mergeStringMaps(maps ...map[string]string) map[string]string {
var total int
for _, m := range maps {
total += len(m)
}
if total == 0 {
return nil
}
out := make(map[string]string, total)
for _, m := range maps {
for k, v := range m {
out[k] = v
}
}
return out
}
func sortedKeys(m map[string]string) []string {
if len(m) == 0 {
return nil
}
keys := make([]string, 0, len(m))
for k := range m {
keys = append(keys, k)
}
sort.Strings(keys)
return keys
}

View File

@@ -0,0 +1,278 @@
package render
import (
"bytes"
"fmt"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
rbacv1 "k8s.io/api/rbac/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/serializer/json"
"k8s.io/apimachinery/pkg/util/intstr"
monov1alpha1 "example.com/monok8s/pkg/apis/monok8s/v1alpha1"
templates "example.com/monok8s/pkg/templates"
)
func RenderControllerDeployments(namespace string) (string, error) {
vals := templates.LoadTemplateValuesFromEnv()
labels := map[string]string{
"app.kubernetes.io/name": monov1alpha1.ControlAgentName,
"app.kubernetes.io/component": "controller",
"app.kubernetes.io/part-of": "monok8s",
"app.kubernetes.io/managed-by": "ctl",
}
objs := []runtime.Object{
buildControllerServiceAccount(namespace, labels),
buildControllerClusterRole(labels),
buildControllerClusterRoleBinding(namespace, labels),
buildControllerDeployment(vals, namespace, labels),
}
s := runtime.NewScheme()
_ = corev1.AddToScheme(s)
_ = rbacv1.AddToScheme(s)
_ = appsv1.AddToScheme(s)
serializer := json.NewYAMLSerializer(json.DefaultMetaFactory, s, s)
var buf bytes.Buffer
for i, obj := range objs {
if i > 0 {
if _, err := fmt.Fprintln(&buf, "---"); err != nil {
return "", err
}
}
if err := serializer.Encode(obj, &buf); err != nil {
return "", err
}
}
return buf.String(), nil
}
func buildControllerServiceAccount(namespace string, labels map[string]string) *corev1.ServiceAccount {
automount := true
return &corev1.ServiceAccount{
TypeMeta: metav1.TypeMeta{
APIVersion: "v1",
Kind: "ServiceAccount",
},
ObjectMeta: metav1.ObjectMeta{
Name: monov1alpha1.ControlAgentName,
Namespace: namespace,
Labels: labels,
},
AutomountServiceAccountToken: &automount,
}
}
func buildControllerClusterRole(labels map[string]string) *rbacv1.ClusterRole {
wantRules := []rbacv1.PolicyRule{
{
APIGroups: []string{monov1alpha1.Group},
Resources: []string{"osupgrades"},
Verbs: []string{"get", "list", "watch"},
},
{
APIGroups: []string{monov1alpha1.Group},
Resources: []string{"osupgrades/status"},
Verbs: []string{"get", "patch", "update"},
},
{
APIGroups: []string{monov1alpha1.Group},
Resources: []string{"osupgradeprogresses"},
Verbs: []string{"get", "create"},
},
{
APIGroups: []string{monov1alpha1.Group},
Resources: []string{"osupgradeprogresses/status"},
Verbs: []string{"update"},
},
{
APIGroups: []string{""},
Resources: []string{"nodes"},
Verbs: []string{"get", "list"},
},
}
return &rbacv1.ClusterRole{
TypeMeta: metav1.TypeMeta{
APIVersion: "rbac.authorization.k8s.io/v1",
Kind: "ClusterRole",
},
ObjectMeta: metav1.ObjectMeta{
Name: monov1alpha1.ControlAgentName,
Labels: labels,
},
Rules: wantRules,
}
}
func buildControllerClusterRoleBinding(namespace string, labels map[string]string) *rbacv1.ClusterRoleBinding {
wantSubjects := []rbacv1.Subject{
{
Kind: "ServiceAccount",
Name: monov1alpha1.ControlAgentName,
Namespace: namespace,
},
}
wantRoleRef := rbacv1.RoleRef{
APIGroup: rbacv1.GroupName,
Kind: "ClusterRole",
Name: monov1alpha1.ControlAgentName,
}
return &rbacv1.ClusterRoleBinding{
TypeMeta: metav1.TypeMeta{
APIVersion: "rbac.authorization.k8s.io/v1",
Kind: "ClusterRoleBinding",
},
ObjectMeta: metav1.ObjectMeta{
Name: monov1alpha1.ControlAgentName,
Labels: labels,
},
Subjects: wantSubjects,
RoleRef: wantRoleRef,
}
}
func buildControllerDeployment(tVals templates.TemplateValues, namespace string, labels map[string]string) *appsv1.Deployment {
replicas := int32(1)
selectorLabels := map[string]string{
"app.kubernetes.io/name": monov1alpha1.ControlAgentName,
"app.kubernetes.io/component": "controller",
}
podLabels := mergeStringMaps(labels, selectorLabels)
runAsNonRoot := true
allowPrivilegeEscalation := false
return &appsv1.Deployment{
TypeMeta: metav1.TypeMeta{
APIVersion: "apps/v1",
Kind: "Deployment",
},
ObjectMeta: metav1.ObjectMeta{
Name: monov1alpha1.ControlAgentName,
Namespace: namespace,
Labels: labels,
},
Spec: appsv1.DeploymentSpec{
Replicas: &replicas,
Selector: &metav1.LabelSelector{
MatchLabels: selectorLabels,
},
Template: corev1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: podLabels,
},
Spec: corev1.PodSpec{
ServiceAccountName: monov1alpha1.ControlAgentName,
Containers: []corev1.Container{
{
Name: "controller",
Image: fmt.Sprintf("registry.local/control-agent:%s", tVals.KubernetesVersion),
ImagePullPolicy: corev1.PullIfNotPresent,
Args: []string{
"controller",
"--namespace",
namespace,
},
Env: []corev1.EnvVar{
{
Name: "POD_NAME",
ValueFrom: &corev1.EnvVarSource{
FieldRef: &corev1.ObjectFieldSelector{
APIVersion: "v1",
FieldPath: "metadata.name",
},
},
},
{
Name: "POD_NAMESPACE",
ValueFrom: &corev1.EnvVarSource{
FieldRef: &corev1.ObjectFieldSelector{
APIVersion: "v1",
FieldPath: "metadata.namespace",
},
},
},
{
Name: "NODE_NAME",
ValueFrom: &corev1.EnvVarSource{
FieldRef: &corev1.ObjectFieldSelector{
APIVersion: "v1",
FieldPath: "spec.nodeName",
},
},
},
},
Ports: []corev1.ContainerPort{
{
Name: "http",
ContainerPort: 8080,
Protocol: corev1.ProtocolTCP,
},
{
Name: "https",
ContainerPort: 8443,
Protocol: corev1.ProtocolTCP,
},
},
LivenessProbe: &corev1.Probe{
ProbeHandler: corev1.ProbeHandler{
HTTPGet: &corev1.HTTPGetAction{
Path: "/healthz",
Port: intstr.FromString("http"),
},
},
},
ReadinessProbe: &corev1.Probe{
ProbeHandler: corev1.ProbeHandler{
HTTPGet: &corev1.HTTPGetAction{
Path: "/readyz",
Port: intstr.FromString("http"),
},
},
},
SecurityContext: &corev1.SecurityContext{
RunAsNonRoot: &runAsNonRoot,
AllowPrivilegeEscalation: &allowPrivilegeEscalation,
},
},
},
},
},
},
}
}
func mergeStringMaps(maps ...map[string]string) map[string]string {
var total int
for _, m := range maps {
total += len(m)
}
if total == 0 {
return nil
}
out := make(map[string]string, total)
for _, m := range maps {
for k, v := range m {
out[k] = v
}
}
return out
}

View File

@@ -1,4 +1,4 @@
package templates
package render
import (
"bytes"
@@ -31,10 +31,12 @@ func RenderMonoKSConfig() (string, error) {
return buf.String(), nil
}
func RenderOSUpgrade() (string, error) {
func RenderOSUpgrade(namespace string) (string, error) {
vals := templates.LoadTemplateValuesFromEnv()
cfg := templates.DefaultOSUpgrade(vals)
cfg.Namespace = namespace
s := runtime.NewScheme()
if err := monov1alpha1.AddToScheme(s); err != nil {
return "", err