Refine controller template and probe listeners
This commit is contained in:
@@ -44,3 +44,12 @@ func addKnownTypes(scheme *runtime.Scheme) error {
|
||||
metav1.AddToGroupVersion(scheme, SchemeGroupVersion)
|
||||
return nil
|
||||
}
|
||||
|
||||
func NodeAgentLabels() map[string]string {
|
||||
return map[string]string{
|
||||
"app.kubernetes.io/name": NodeAgentName,
|
||||
"app.kubernetes.io/component": "agent",
|
||||
"app.kubernetes.io/part-of": "monok8s",
|
||||
"app.kubernetes.io/managed-by": NodeControlName,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -133,6 +133,12 @@ type OSUpgradeProgressStatus struct {
|
||||
CurrentStep int32 `json:"currentStep,omitempty" yaml:"currentStep,omitempty"`
|
||||
CurrentFrom string `json:"currentFrom,omitempty" yaml:"currentFrom,omitempty"`
|
||||
CurrentTo string `json:"currentTo,omitempty" yaml:"currentTo,omitempty"`
|
||||
|
||||
// ObservedRetryNonce records the last retryNonce value the agent accepted.
|
||||
// When spec.retryNonce is changed by the user and differs from this value,
|
||||
// the agent may retry a failed upgrade.
|
||||
// +optional
|
||||
ObservedRetryNonce string `json:"observedRetryNonce,omitempty"`
|
||||
}
|
||||
|
||||
func (osu OSUpgrade) StatusPhase() string {
|
||||
@@ -142,3 +148,11 @@ func (osu OSUpgrade) StatusPhase() string {
|
||||
}
|
||||
return phase
|
||||
}
|
||||
|
||||
func (osup OSUpgradeProgress) StatusPhase() string {
|
||||
phase := ""
|
||||
if osup.Status != nil {
|
||||
phase = string(osup.Status.Phase)
|
||||
}
|
||||
return phase
|
||||
}
|
||||
|
||||
@@ -152,14 +152,11 @@ func watchOnce(
|
||||
if !targetsNode(item, nodeName) {
|
||||
continue
|
||||
}
|
||||
if !shouldHandle(item) {
|
||||
continue
|
||||
}
|
||||
|
||||
klog.InfoS("found existing osupgradeprogress",
|
||||
"name", item.Name,
|
||||
"node", nodeName,
|
||||
"phase", progressPhase(item.Status),
|
||||
"phase", item.StatusPhase(),
|
||||
"resourceVersion", item.ResourceVersion,
|
||||
)
|
||||
|
||||
@@ -227,20 +224,11 @@ func watchOnce(
|
||||
if !targetsNode(osup, nodeName) {
|
||||
continue
|
||||
}
|
||||
if !shouldHandle(osup) {
|
||||
klog.V(2).InfoS("skipping osupgradeprogress due to phase",
|
||||
"name", osup.Name,
|
||||
"node", nodeName,
|
||||
"phase", progressPhase(osup.Status),
|
||||
"eventType", evt.Type,
|
||||
)
|
||||
continue
|
||||
}
|
||||
|
||||
klog.InfoS("received osupgradeprogress event",
|
||||
klog.V(4).InfoS("received osupgradeprogress event",
|
||||
"name", osup.Name,
|
||||
"node", nodeName,
|
||||
"phase", progressPhase(osup.Status),
|
||||
"phase", osup.StatusPhase(),
|
||||
"eventType", evt.Type,
|
||||
"resourceVersion", osup.ResourceVersion,
|
||||
)
|
||||
@@ -262,28 +250,3 @@ func targetsNode(osup *monov1alpha1.OSUpgradeProgress, nodeName string) bool {
|
||||
}
|
||||
return osup.Spec.NodeName == nodeName
|
||||
}
|
||||
|
||||
func shouldHandle(osup *monov1alpha1.OSUpgradeProgress) bool {
|
||||
if osup == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
if osup.Status == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
switch osup.Status.Phase {
|
||||
case "",
|
||||
monov1alpha1.OSUpgradeProgressPhasePending:
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
func progressPhase(st *monov1alpha1.OSUpgradeProgressStatus) string {
|
||||
if st == nil {
|
||||
return ""
|
||||
}
|
||||
return string(st.Phase)
|
||||
}
|
||||
|
||||
@@ -3,6 +3,7 @@ package controller
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"net/http"
|
||||
"os"
|
||||
@@ -57,7 +58,7 @@ func NewCmdController(flags *genericclioptions.ConfigFlags) *cobra.Command {
|
||||
}()
|
||||
|
||||
go func() {
|
||||
httpErrCh <- httpListen(ctx, clients, conf)
|
||||
httpErrCh <- listenAndServe(ctx, clients, conf)
|
||||
}()
|
||||
|
||||
select {
|
||||
@@ -92,63 +93,125 @@ func NewCmdController(flags *genericclioptions.ConfigFlags) *cobra.Command {
|
||||
return cmd
|
||||
}
|
||||
|
||||
func httpListen(ctx context.Context, clients *kube.Clients, conf ServerConfig) error {
|
||||
address, port := "", "8443"
|
||||
addr := net.JoinHostPort(address, port)
|
||||
|
||||
func listenAndServe(ctx context.Context, clients *kube.Clients, conf ServerConfig) error {
|
||||
nodeName := os.Getenv("NODE_NAME")
|
||||
|
||||
server := mkscontroller.NewServer(ctx, clients, conf.Namespace, nodeName)
|
||||
controllerServer := mkscontroller.NewServer(ctx, clients, conf.Namespace, nodeName)
|
||||
|
||||
s := &http.Server{
|
||||
Addr: addr,
|
||||
Handler: server,
|
||||
healthMux := http.NewServeMux()
|
||||
healthMux.HandleFunc("/healthz", func(w http.ResponseWriter, r *http.Request) {
|
||||
w.WriteHeader(http.StatusOK)
|
||||
_, _ = w.Write([]byte("ok\n"))
|
||||
})
|
||||
healthMux.HandleFunc("/readyz", func(w http.ResponseWriter, r *http.Request) {
|
||||
w.WriteHeader(http.StatusOK)
|
||||
_, _ = w.Write([]byte("ok\n"))
|
||||
})
|
||||
|
||||
healthAddr := net.JoinHostPort("", "8080")
|
||||
controllerAddr := net.JoinHostPort("", "8443")
|
||||
|
||||
healthHTTPServer := &http.Server{
|
||||
Addr: healthAddr,
|
||||
Handler: healthMux,
|
||||
IdleTimeout: 90 * time.Second,
|
||||
ReadTimeout: 10 * time.Second,
|
||||
WriteTimeout: 10 * time.Second,
|
||||
MaxHeaderBytes: 1 << 20,
|
||||
}
|
||||
|
||||
controllerHTTPServer := &http.Server{
|
||||
Addr: controllerAddr,
|
||||
Handler: controllerServer,
|
||||
IdleTimeout: 90 * time.Second,
|
||||
ReadTimeout: 4 * time.Minute,
|
||||
WriteTimeout: 4 * time.Minute,
|
||||
MaxHeaderBytes: 1 << 20,
|
||||
}
|
||||
|
||||
serverErrCh := make(chan error, 1)
|
||||
serverErrCh := make(chan error, 2)
|
||||
|
||||
go func() {
|
||||
if conf.TLSCertFile != "" {
|
||||
klog.InfoS("starting HTTPS server",
|
||||
"addr", addr,
|
||||
"certFile", conf.TLSCertFile,
|
||||
"keyFile", conf.TLSPrivateKeyFile,
|
||||
)
|
||||
serverErrCh <- s.ListenAndServeTLS(conf.TLSCertFile, conf.TLSPrivateKeyFile)
|
||||
klog.InfoS("starting health HTTP server", "addr", healthAddr)
|
||||
|
||||
err := healthHTTPServer.ListenAndServe()
|
||||
if err != nil && !errors.Is(err, http.ErrServerClosed) {
|
||||
serverErrCh <- fmt.Errorf("health HTTP server: %w", err)
|
||||
return
|
||||
}
|
||||
|
||||
klog.InfoS("starting HTTP server", "addr", addr)
|
||||
serverErrCh <- s.ListenAndServe()
|
||||
serverErrCh <- nil
|
||||
}()
|
||||
|
||||
go func() {
|
||||
if conf.TLSCertFile != "" {
|
||||
klog.InfoS("starting controller HTTPS server",
|
||||
"addr", controllerAddr,
|
||||
"certFile", conf.TLSCertFile,
|
||||
"keyFile", conf.TLSPrivateKeyFile,
|
||||
)
|
||||
|
||||
err := controllerHTTPServer.ListenAndServeTLS(conf.TLSCertFile, conf.TLSPrivateKeyFile)
|
||||
if err != nil && !errors.Is(err, http.ErrServerClosed) {
|
||||
serverErrCh <- fmt.Errorf("controller HTTPS server: %w", err)
|
||||
return
|
||||
}
|
||||
|
||||
serverErrCh <- nil
|
||||
return
|
||||
}
|
||||
|
||||
klog.InfoS("starting controller HTTP server", "addr", controllerAddr)
|
||||
|
||||
err := controllerHTTPServer.ListenAndServe()
|
||||
if err != nil && !errors.Is(err, http.ErrServerClosed) {
|
||||
serverErrCh <- fmt.Errorf("controller HTTP server: %w", err)
|
||||
return
|
||||
}
|
||||
|
||||
serverErrCh <- nil
|
||||
}()
|
||||
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
klog.InfoS("shutting down HTTP server", "addr", addr)
|
||||
klog.InfoS("shutting down HTTP servers",
|
||||
"healthAddr", healthAddr,
|
||||
"controllerAddr", controllerAddr,
|
||||
)
|
||||
|
||||
shutdownCtx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
defer cancel()
|
||||
|
||||
err := s.Shutdown(shutdownCtx)
|
||||
if err != nil {
|
||||
return err
|
||||
var errs []error
|
||||
|
||||
if err := healthHTTPServer.Shutdown(shutdownCtx); err != nil {
|
||||
errs = append(errs, fmt.Errorf("shutdown health HTTP server: %w", err))
|
||||
}
|
||||
|
||||
err = <-serverErrCh
|
||||
if err != nil && !errors.Is(err, http.ErrServerClosed) {
|
||||
return err
|
||||
if err := controllerHTTPServer.Shutdown(shutdownCtx); err != nil {
|
||||
errs = append(errs, fmt.Errorf("shutdown controller HTTP server: %w", err))
|
||||
}
|
||||
|
||||
for i := 0; i < 2; i++ {
|
||||
if err := <-serverErrCh; err != nil {
|
||||
errs = append(errs, err)
|
||||
}
|
||||
}
|
||||
|
||||
if len(errs) > 0 {
|
||||
return errors.Join(errs...)
|
||||
}
|
||||
|
||||
return context.Canceled
|
||||
|
||||
case err := <-serverErrCh:
|
||||
if err != nil && !errors.Is(err, http.ErrServerClosed) {
|
||||
if err != nil {
|
||||
klog.ErrorS(err, "HTTP server failed")
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
|
||||
// One server exited cleanly unexpectedly. Treat that as failure because
|
||||
// the process should keep both servers alive until ctx is canceled.
|
||||
return fmt.Errorf("HTTP server exited unexpectedly")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -42,23 +42,6 @@ func NewCmdCreate(flags *genericclioptions.ConfigFlags) *cobra.Command {
|
||||
return err
|
||||
},
|
||||
},
|
||||
&cobra.Command{
|
||||
Use: "controller",
|
||||
Short: "Print controller deployment template",
|
||||
RunE: func(cmd *cobra.Command, _ []string) error {
|
||||
ns, _, err := flags.ToRawKubeConfigLoader().Namespace()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
out, err := render.RenderControllerDeployments(ns)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = fmt.Fprint(cmd.OutOrStdout(), out)
|
||||
return err
|
||||
},
|
||||
},
|
||||
)
|
||||
|
||||
var authorizedKeysPath string
|
||||
@@ -90,6 +73,38 @@ func NewCmdCreate(flags *genericclioptions.ConfigFlags) *cobra.Command {
|
||||
sshdcmd.Flags().StringVar(&authorizedKeysPath, "authkeys", "", "path to authorized_keys file")
|
||||
|
||||
cmd.AddCommand(&sshdcmd)
|
||||
|
||||
cconf := render.ControllerConf{}
|
||||
controllercmd := cobra.Command{
|
||||
Use: "controller",
|
||||
Short: "Print controller deployment template",
|
||||
RunE: func(cmd *cobra.Command, _ []string) error {
|
||||
ns, _, err := flags.ToRawKubeConfigLoader().Namespace()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
cconf.Namespace = ns
|
||||
|
||||
out, err := render.RenderControllerDeployments(cconf)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = fmt.Fprint(cmd.OutOrStdout(), out)
|
||||
return err
|
||||
},
|
||||
}
|
||||
|
||||
controllercmd.Flags().StringVar(
|
||||
&cconf.Image,
|
||||
"image",
|
||||
"",
|
||||
"Controller image, including optional registry and tag",
|
||||
)
|
||||
|
||||
cmd.AddCommand(&controllercmd)
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
|
||||
@@ -19,8 +19,10 @@ import (
|
||||
func init() {
|
||||
klog.InitFlags(nil)
|
||||
|
||||
_ = flag.Set("logtostderr", "true")
|
||||
|
||||
if os.Getenv("DEBUG") != "" {
|
||||
_ = flag.Set("v", "4") // debug level
|
||||
_ = flag.Set("v", "4")
|
||||
} else {
|
||||
_ = flag.Set("v", "0")
|
||||
}
|
||||
@@ -39,7 +41,11 @@ func NewRootCmd() *cobra.Command {
|
||||
},
|
||||
}
|
||||
|
||||
// Expose klog stdlib flags through Cobra/pflag.
|
||||
cmd.PersistentFlags().AddGoFlagSet(flag.CommandLine)
|
||||
|
||||
flags.AddFlags(cmd.PersistentFlags())
|
||||
|
||||
cmd.AddCommand(
|
||||
versioncmd.NewCmdVersion(),
|
||||
initcmd.NewCmdInit(flags),
|
||||
@@ -49,5 +55,6 @@ func NewRootCmd() *cobra.Command {
|
||||
controllercmd.NewCmdController(flags),
|
||||
internalcmd.NewCmdInternal(),
|
||||
)
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
@@ -65,13 +65,20 @@ func handleOSUpgradeProgressLocked(
|
||||
}
|
||||
|
||||
if osup.Spec.NodeName != nodeName {
|
||||
klog.V(4).InfoS("skipping osupgradeprogress due to nodeName mismatch",
|
||||
"name", osup.Name,
|
||||
"node", nodeName,
|
||||
"target", osup.Spec.NodeName,
|
||||
)
|
||||
return nil
|
||||
}
|
||||
|
||||
if osup.Status.Phase != "" &&
|
||||
osup.Status.Phase != monov1alpha1.OSUpgradeProgressPhasePending &&
|
||||
osup.Status.Phase != monov1alpha1.OSUpgradeProgressPhaseDownloading {
|
||||
// tune this logic however you want
|
||||
if !shouldProcessProgress(osup) {
|
||||
klog.V(2).InfoS("skipping osupgradeprogress due to phase",
|
||||
"name", osup.Name,
|
||||
"node", nodeName,
|
||||
"phase", osup.StatusPhase(),
|
||||
)
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -124,7 +131,9 @@ func handleOSUpgradeProgressLocked(
|
||||
now := metav1.Now()
|
||||
cur.Status.CurrentVersion = buildinfo.KubeVersion
|
||||
cur.Status.TargetVersion = plan.ResolvedTarget
|
||||
cur.Status.PlannedPath = plannedPath(plan)
|
||||
cur.Status.Phase = monov1alpha1.OSUpgradeProgressPhaseDownloading
|
||||
cur.Status.ObservedRetryNonce = cur.Spec.RetryNonce
|
||||
cur.Status.Message = fmt.Sprintf("downloading image: %s", first.URL)
|
||||
cur.Status.LastUpdatedAt = &now
|
||||
})
|
||||
@@ -238,6 +247,26 @@ func handleOSUpgradeProgressLocked(
|
||||
select {}
|
||||
}
|
||||
|
||||
func shouldProcessProgress(osup *monov1alpha1.OSUpgradeProgress) bool {
|
||||
if osup == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
if osup.Status == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
switch osup.Status.Phase {
|
||||
case "",
|
||||
monov1alpha1.OSUpgradeProgressPhasePending:
|
||||
return true
|
||||
case monov1alpha1.OSUpgradeProgressPhaseFailed:
|
||||
return osup.Spec.RetryNonce != osup.Status.ObservedRetryNonce
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
func triggerReboot() error {
|
||||
_ = os.WriteFile("/proc/sysrq-trigger", []byte("s\n"), 0)
|
||||
_ = os.WriteFile("/proc/sysrq-trigger", []byte("u\n"), 0)
|
||||
|
||||
@@ -335,6 +335,14 @@ func lowestPatchInMinor(versions []Version, major, minor int) (Version, bool) {
|
||||
return Version{}, false
|
||||
}
|
||||
|
||||
func plannedPath(plan *Plan) []string {
|
||||
ppath := []string{}
|
||||
for _, img := range plan.Path {
|
||||
ppath = append(ppath, img.Version)
|
||||
}
|
||||
return ppath
|
||||
}
|
||||
|
||||
func versionsToStrings(vs []Version) []string {
|
||||
out := make([]string, 0, len(vs))
|
||||
for _, v := range vs {
|
||||
|
||||
@@ -209,6 +209,7 @@ func failProgress(
|
||||
cur.Status = &monov1alpha1.OSUpgradeProgressStatus{}
|
||||
}
|
||||
|
||||
cur.Status.ObservedRetryNonce = cur.Spec.RetryNonce
|
||||
cur.Status.LastUpdatedAt = &now
|
||||
cur.Status.Message = fmt.Sprintf("%s: %v", action, cause)
|
||||
cur.Status.Phase = monov1alpha1.OSUpgradeProgressPhaseFailed
|
||||
@@ -237,6 +238,7 @@ func markProgressCompleted(
|
||||
cur.Status = &monov1alpha1.OSUpgradeProgressStatus{}
|
||||
}
|
||||
|
||||
cur.Status.ObservedRetryNonce = cur.Spec.RetryNonce
|
||||
cur.Status.Phase = monov1alpha1.OSUpgradeProgressPhaseCompleted
|
||||
cur.Status.Message = message
|
||||
cur.Status.CurrentVersion = osup.Status.CurrentVersion
|
||||
|
||||
@@ -73,17 +73,13 @@ func (s *Server) Initialize() {
|
||||
ws.Consumes(restful.MIME_JSON)
|
||||
ws.Produces(restful.MIME_JSON)
|
||||
|
||||
ws.Route(ws.GET("/healthz").To(s.queryHealthz).
|
||||
ws.Route(ws.GET("/status").To(s.queryStatus).
|
||||
Doc("Return basic controller status"))
|
||||
|
||||
// Stub for now
|
||||
ws.Route(ws.GET("/readyz").To(s.queryHealthz).
|
||||
Doc("Stub for now"))
|
||||
|
||||
s.restfulCont.Add(ws)
|
||||
}
|
||||
|
||||
func (s *Server) queryHealthz(request *restful.Request, response *restful.Response) {
|
||||
func (s *Server) queryStatus(request *restful.Request, response *restful.Response) {
|
||||
resp := StatusResponse{
|
||||
OK: true,
|
||||
Service: "monok8s-controller",
|
||||
|
||||
@@ -20,9 +20,8 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
controlAgentNodeSelectorValue = "true"
|
||||
controlAgentImage = "localhost/monok8s/node-control:dev"
|
||||
kubeconfig = "/etc/kubernetes/admin.conf"
|
||||
controlAgentImage = "localhost/monok8s/node-control:dev"
|
||||
kubeconfig = "/etc/kubernetes/admin.conf"
|
||||
)
|
||||
|
||||
func ApplyNodeControlDaemonSetResources(ctx context.Context, n *NodeContext) error {
|
||||
@@ -265,12 +264,7 @@ func applyNodeAgentClusterRoleBinding(ctx context.Context, kubeClient kubernetes
|
||||
func applyNodeAgentDaemonSet(ctx context.Context, kubeClient kubernetes.Interface, namespace string, labels map[string]string) error {
|
||||
privileged := true
|
||||
|
||||
dsLabels := map[string]string{
|
||||
"app.kubernetes.io/name": monov1alpha1.NodeAgentName,
|
||||
"app.kubernetes.io/component": "agent",
|
||||
"app.kubernetes.io/part-of": "monok8s",
|
||||
"app.kubernetes.io/managed-by": monov1alpha1.NodeControlName,
|
||||
}
|
||||
dsLabels := monov1alpha1.NodeAgentLabels()
|
||||
|
||||
want := &appsv1.DaemonSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
@@ -294,7 +288,7 @@ func applyNodeAgentDaemonSet(ctx context.Context, kubeClient kubernetes.Interfac
|
||||
HostPID: true,
|
||||
DNSPolicy: corev1.DNSClusterFirstWithHostNet,
|
||||
NodeSelector: map[string]string{
|
||||
monov1alpha1.NodeControlKey: controlAgentNodeSelectorValue,
|
||||
monov1alpha1.NodeControlKey: "true",
|
||||
},
|
||||
Tolerations: []corev1.Toleration{
|
||||
{Operator: corev1.TolerationOpExists},
|
||||
|
||||
@@ -61,7 +61,7 @@ func ApplyLocalNodeMetadataIfPossible(ctx context.Context, nctx *NodeContext) er
|
||||
|
||||
// Additional Labels
|
||||
if spec.EnableNodeControl {
|
||||
node.Labels[monov1alpah1.NodeControlKey] = controlAgentNodeSelectorValue
|
||||
node.Labels[monov1alpah1.NodeControlKey] = "true"
|
||||
}
|
||||
|
||||
_, err = client.CoreV1().Nodes().Update(ctx, node, metav1.UpdateOptions{})
|
||||
|
||||
@@ -14,13 +14,20 @@ import (
|
||||
|
||||
monov1alpha1 "example.com/monok8s/pkg/apis/monok8s/v1alpha1"
|
||||
buildinfo "example.com/monok8s/pkg/buildinfo"
|
||||
templates "example.com/monok8s/pkg/templates"
|
||||
)
|
||||
|
||||
func RenderControllerDeployments(namespace string) (string, error) {
|
||||
vals := templates.LoadTemplateValuesFromEnv()
|
||||
type ControllerConf struct {
|
||||
Namespace string
|
||||
Image string
|
||||
Labels map[string]string
|
||||
}
|
||||
|
||||
labels := map[string]string{
|
||||
func RenderControllerDeployments(conf ControllerConf) (string, error) {
|
||||
if conf.Namespace == "" {
|
||||
return "", fmt.Errorf("namespace is required")
|
||||
}
|
||||
|
||||
conf.Labels = map[string]string{
|
||||
"app.kubernetes.io/name": monov1alpha1.ControllerName,
|
||||
"app.kubernetes.io/component": "controller",
|
||||
"app.kubernetes.io/part-of": "monok8s",
|
||||
@@ -28,10 +35,10 @@ func RenderControllerDeployments(namespace string) (string, error) {
|
||||
}
|
||||
|
||||
objs := []runtime.Object{
|
||||
buildControllerServiceAccount(namespace, labels),
|
||||
buildControllerClusterRole(labels),
|
||||
buildControllerClusterRoleBinding(namespace, labels),
|
||||
buildControllerDeployment(vals, namespace, labels),
|
||||
buildControllerServiceAccount(conf),
|
||||
buildControllerClusterRole(conf),
|
||||
buildControllerClusterRoleBinding(conf),
|
||||
buildControllerDeployment(conf),
|
||||
}
|
||||
|
||||
s := runtime.NewScheme()
|
||||
@@ -57,7 +64,7 @@ func RenderControllerDeployments(namespace string) (string, error) {
|
||||
return buf.String(), nil
|
||||
}
|
||||
|
||||
func buildControllerServiceAccount(namespace string, labels map[string]string) *corev1.ServiceAccount {
|
||||
func buildControllerServiceAccount(conf ControllerConf) *corev1.ServiceAccount {
|
||||
|
||||
automount := true
|
||||
|
||||
@@ -68,14 +75,14 @@ func buildControllerServiceAccount(namespace string, labels map[string]string) *
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: monov1alpha1.ControllerName,
|
||||
Namespace: namespace,
|
||||
Labels: labels,
|
||||
Namespace: conf.Namespace,
|
||||
Labels: conf.Labels,
|
||||
},
|
||||
AutomountServiceAccountToken: &automount,
|
||||
}
|
||||
}
|
||||
|
||||
func buildControllerClusterRole(labels map[string]string) *rbacv1.ClusterRole {
|
||||
func buildControllerClusterRole(conf ControllerConf) *rbacv1.ClusterRole {
|
||||
wantRules := []rbacv1.PolicyRule{
|
||||
{
|
||||
APIGroups: []string{monov1alpha1.Group},
|
||||
@@ -111,19 +118,19 @@ func buildControllerClusterRole(labels map[string]string) *rbacv1.ClusterRole {
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: monov1alpha1.ControllerName,
|
||||
Labels: labels,
|
||||
Labels: conf.Labels,
|
||||
},
|
||||
Rules: wantRules,
|
||||
}
|
||||
}
|
||||
|
||||
func buildControllerClusterRoleBinding(namespace string, labels map[string]string) *rbacv1.ClusterRoleBinding {
|
||||
func buildControllerClusterRoleBinding(conf ControllerConf) *rbacv1.ClusterRoleBinding {
|
||||
|
||||
wantSubjects := []rbacv1.Subject{
|
||||
{
|
||||
Kind: "ServiceAccount",
|
||||
Name: monov1alpha1.ControllerName,
|
||||
Namespace: namespace,
|
||||
Namespace: conf.Namespace,
|
||||
},
|
||||
}
|
||||
|
||||
@@ -140,14 +147,14 @@ func buildControllerClusterRoleBinding(namespace string, labels map[string]strin
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: monov1alpha1.ControllerName,
|
||||
Labels: labels,
|
||||
Labels: conf.Labels,
|
||||
},
|
||||
Subjects: wantSubjects,
|
||||
RoleRef: wantRoleRef,
|
||||
}
|
||||
}
|
||||
|
||||
func buildControllerDeployment(tVals templates.TemplateValues, namespace string, labels map[string]string) *appsv1.Deployment {
|
||||
func buildControllerDeployment(conf ControllerConf) *appsv1.Deployment {
|
||||
replicas := int32(1)
|
||||
|
||||
selectorLabels := map[string]string{
|
||||
@@ -155,10 +162,13 @@ func buildControllerDeployment(tVals templates.TemplateValues, namespace string,
|
||||
"app.kubernetes.io/component": "controller",
|
||||
}
|
||||
|
||||
podLabels := mergeStringMaps(labels, selectorLabels)
|
||||
podLabels := mergeStringMaps(conf.Labels, selectorLabels)
|
||||
|
||||
runAsNonRoot := true
|
||||
allowPrivilegeEscalation := false
|
||||
userGroup := int64(65532)
|
||||
|
||||
image, pullPolicy := controllerImage(conf)
|
||||
|
||||
return &appsv1.Deployment{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
@@ -167,8 +177,8 @@ func buildControllerDeployment(tVals templates.TemplateValues, namespace string,
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: monov1alpha1.ControllerName,
|
||||
Namespace: namespace,
|
||||
Labels: labels,
|
||||
Namespace: conf.Namespace,
|
||||
Labels: conf.Labels,
|
||||
},
|
||||
Spec: appsv1.DeploymentSpec{
|
||||
Replicas: &replicas,
|
||||
@@ -184,12 +194,12 @@ func buildControllerDeployment(tVals templates.TemplateValues, namespace string,
|
||||
Containers: []corev1.Container{
|
||||
{
|
||||
Name: "controller",
|
||||
Image: fmt.Sprintf("localhost/monok8s/node-control:%s", buildinfo.Version),
|
||||
ImagePullPolicy: corev1.PullIfNotPresent,
|
||||
Image: image,
|
||||
ImagePullPolicy: pullPolicy,
|
||||
Args: []string{
|
||||
"controller",
|
||||
"--namespace",
|
||||
namespace,
|
||||
conf.Namespace,
|
||||
},
|
||||
Env: []corev1.EnvVar{
|
||||
{
|
||||
@@ -239,6 +249,10 @@ func buildControllerDeployment(tVals templates.TemplateValues, namespace string,
|
||||
Port: intstr.FromString("http"),
|
||||
},
|
||||
},
|
||||
InitialDelaySeconds: 5,
|
||||
PeriodSeconds: 60,
|
||||
TimeoutSeconds: 2,
|
||||
FailureThreshold: 3,
|
||||
},
|
||||
ReadinessProbe: &corev1.Probe{
|
||||
ProbeHandler: corev1.ProbeHandler{
|
||||
@@ -247,13 +261,64 @@ func buildControllerDeployment(tVals templates.TemplateValues, namespace string,
|
||||
Port: intstr.FromString("http"),
|
||||
},
|
||||
},
|
||||
InitialDelaySeconds: 2,
|
||||
PeriodSeconds: 5,
|
||||
TimeoutSeconds: 2,
|
||||
FailureThreshold: 3,
|
||||
},
|
||||
SecurityContext: &corev1.SecurityContext{
|
||||
RunAsNonRoot: &runAsNonRoot,
|
||||
RunAsUser: &userGroup,
|
||||
RunAsGroup: &userGroup,
|
||||
AllowPrivilegeEscalation: &allowPrivilegeEscalation,
|
||||
},
|
||||
},
|
||||
},
|
||||
NodeSelector: controllerNodeSelector(conf),
|
||||
Affinity: controllerAffinity(conf),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func controllerImage(conf ControllerConf) (string, corev1.PullPolicy) {
|
||||
if conf.Image != "" {
|
||||
return conf.Image, corev1.PullIfNotPresent
|
||||
}
|
||||
|
||||
return fmt.Sprintf("localhost/monok8s/node-control:%s", buildinfo.Version), corev1.PullNever
|
||||
}
|
||||
|
||||
func controllerNodeSelector(conf ControllerConf) map[string]string {
|
||||
if conf.Image != "" {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Local image exists on managed nodes only.
|
||||
return map[string]string{
|
||||
monov1alpha1.NodeControlKey: "true",
|
||||
}
|
||||
}
|
||||
|
||||
func controllerAffinity(conf ControllerConf) *corev1.Affinity {
|
||||
// Local image exists only on managed nodes, so in that mode we already use
|
||||
// NodeSelector and should not fight placement with anti-affinity.
|
||||
if conf.Image == "" {
|
||||
return nil
|
||||
}
|
||||
|
||||
return &corev1.Affinity{
|
||||
PodAntiAffinity: &corev1.PodAntiAffinity{
|
||||
PreferredDuringSchedulingIgnoredDuringExecution: []corev1.WeightedPodAffinityTerm{
|
||||
{
|
||||
Weight: 100,
|
||||
PodAffinityTerm: corev1.PodAffinityTerm{
|
||||
TopologyKey: corev1.LabelHostname,
|
||||
LabelSelector: &metav1.LabelSelector{
|
||||
MatchLabels: monov1alpha1.NodeAgentLabels(),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
@@ -160,6 +160,7 @@ func buildSSHDDeployment(
|
||||
Labels: podLabels,
|
||||
},
|
||||
Spec: corev1.PodSpec{
|
||||
HostPID: true,
|
||||
NodeSelector: selectorLabels,
|
||||
Containers: []corev1.Container{
|
||||
{
|
||||
@@ -215,60 +216,110 @@ exec /usr/sbin/sshd \
|
||||
corev1.ResourceMemory: resource.MustParse("128Mi"),
|
||||
},
|
||||
},
|
||||
VolumeMounts: []corev1.VolumeMount{
|
||||
{
|
||||
Name: "authorized-keys",
|
||||
MountPath: "/authorized-keys",
|
||||
ReadOnly: true,
|
||||
},
|
||||
{
|
||||
Name: "host-etc",
|
||||
MountPath: "/host/etc",
|
||||
},
|
||||
{
|
||||
Name: "host-var",
|
||||
MountPath: "/host/var",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Volumes: []corev1.Volume{
|
||||
{
|
||||
Name: "authorized-keys",
|
||||
VolumeSource: corev1.VolumeSource{
|
||||
ConfigMap: &corev1.ConfigMapVolumeSource{
|
||||
LocalObjectReference: corev1.LocalObjectReference{
|
||||
Name: sshdConfigName,
|
||||
VolumeMounts: append(
|
||||
[]corev1.VolumeMount{
|
||||
{
|
||||
Name: "authorized-keys",
|
||||
MountPath: "/authorized-keys",
|
||||
ReadOnly: true,
|
||||
},
|
||||
DefaultMode: ptrInt32(0600),
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "host-etc",
|
||||
VolumeSource: corev1.VolumeSource{
|
||||
HostPath: &corev1.HostPathVolumeSource{
|
||||
Path: "/etc",
|
||||
Type: ptrHostPathType(corev1.HostPathDirectory),
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "host-var",
|
||||
VolumeSource: corev1.VolumeSource{
|
||||
HostPath: &corev1.HostPathVolumeSource{
|
||||
Path: "/var",
|
||||
Type: ptrHostPathType(corev1.HostPathDirectory),
|
||||
},
|
||||
},
|
||||
buildHostRootVolumeMounts()...,
|
||||
),
|
||||
},
|
||||
},
|
||||
Volumes: append(
|
||||
[]corev1.Volume{
|
||||
{
|
||||
Name: "authorized-keys",
|
||||
VolumeSource: corev1.VolumeSource{
|
||||
ConfigMap: &corev1.ConfigMapVolumeSource{
|
||||
LocalObjectReference: corev1.LocalObjectReference{
|
||||
Name: sshdConfigName,
|
||||
},
|
||||
DefaultMode: ptrInt32(0600),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
buildHostRootVolumes()...,
|
||||
),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func buildHostRootVolumeMounts() []corev1.VolumeMount {
|
||||
paths := []struct {
|
||||
name string
|
||||
mountPath string
|
||||
readOnly bool
|
||||
}{
|
||||
{"host-bin", "/host/bin", true},
|
||||
{"host-sbin", "/host/sbin", true},
|
||||
{"host-lib", "/host/lib", true},
|
||||
{"host-usr", "/host/usr", true},
|
||||
{"host-etc", "/host/etc", false},
|
||||
{"host-run", "/host/run", false},
|
||||
{"host-proc", "/host/proc", false},
|
||||
{"host-sys", "/host/sys", false},
|
||||
{"host-dev", "/host/dev", false},
|
||||
{"host-var", "/host/var", false},
|
||||
}
|
||||
|
||||
mounts := make([]corev1.VolumeMount, 0, len(paths))
|
||||
|
||||
for _, p := range paths {
|
||||
mounts = append(mounts, corev1.VolumeMount{
|
||||
Name: p.name,
|
||||
MountPath: p.mountPath,
|
||||
ReadOnly: p.readOnly,
|
||||
})
|
||||
}
|
||||
|
||||
return mounts
|
||||
}
|
||||
|
||||
func buildHostRootVolumes() []corev1.Volume {
|
||||
hostPathDir := corev1.HostPathDirectory
|
||||
|
||||
paths := []struct {
|
||||
name string
|
||||
path string
|
||||
}{
|
||||
{"host-bin", "/bin"},
|
||||
{"host-sbin", "/sbin"},
|
||||
{"host-lib", "/lib"},
|
||||
{"host-usr", "/usr"},
|
||||
{"host-etc", "/etc"},
|
||||
{"host-run", "/run"},
|
||||
{"host-proc", "/proc"},
|
||||
{"host-sys", "/sys"},
|
||||
{"host-dev", "/dev"},
|
||||
|
||||
// /var is an rbind mount in monok8s and may be private.
|
||||
// Mount the real backing path instead.
|
||||
{"host-var", "/data/var"},
|
||||
}
|
||||
|
||||
volumes := make([]corev1.Volume, 0, len(paths))
|
||||
|
||||
for _, p := range paths {
|
||||
volumes = append(volumes, corev1.Volume{
|
||||
Name: p.name,
|
||||
VolumeSource: corev1.VolumeSource{
|
||||
HostPath: &corev1.HostPathVolumeSource{
|
||||
Path: p.path,
|
||||
Type: &hostPathDir,
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
return volumes
|
||||
}
|
||||
|
||||
func ptrInt32(v int32) *int32 {
|
||||
return &v
|
||||
}
|
||||
|
||||
@@ -84,7 +84,7 @@ func LoadTemplateValuesFromEnv() TemplateValues {
|
||||
v := defaultTemplateValues()
|
||||
|
||||
v.Hostname = getenvDefault("MKS_HOSTNAME", v.Hostname)
|
||||
v.NodeName = getenvDefault("MKS_NODE_NAME", v.Hostname)
|
||||
v.NodeName = getenvDefault("MKS_NODE_NAME", getenvDefault("NODE_NAME", v.Hostname))
|
||||
|
||||
v.KubernetesVersion = getenvDefault("MKS_KUBERNETES_VERSION", v.KubernetesVersion)
|
||||
v.ClusterName = getenvDefault("MKS_CLUSTER_NAME", v.ClusterName)
|
||||
|
||||
Reference in New Issue
Block a user