Drop admission logic. Use a plain controller instead

This commit is contained in:
2026-04-22 05:01:48 +08:00
parent 6ddff7c433
commit 9eba55e7ee
9 changed files with 535 additions and 276 deletions

View File

@@ -1,52 +0,0 @@
#!/usr/bin/env bash
# Example: pipe this into curl
# ../../devtools/test-upgrade.sh \
# | this_script
# | curl -k -H 'Content-Type: application/json' --data-binary @- https://127.0.0.1:8443/admission
set -euo pipefail
uid=$(uuidgen 2>/dev/null)
yq -o=json '.' | jq --arg uid "$uid" '
{
apiVersion: "admission.k8s.io/v1",
kind: "AdmissionReview",
request: {
uid: $uid,
kind: {
group: "monok8s.io",
version: "v1alpha1",
kind: "OSUpgrade"
},
resource: {
group: "monok8s.io",
version: "v1alpha1",
resource: "osupgrades"
},
requestKind: {
group: "monok8s.io",
version: "v1alpha1",
kind: "OSUpgrade"
},
requestResource: {
group: "monok8s.io",
version: "v1alpha1",
resource: "osupgrades"
},
name: .metadata.name,
namespace: (.metadata.namespace // ""),
operation: "CREATE",
userInfo: {
username: "debug-user"
},
object: .,
oldObject: null,
dryRun: false,
options: {
apiVersion: "meta.k8s.io/v1",
kind: "CreateOptions"
}
}
}'

View File

@@ -14,4 +14,4 @@ else
-addext "subjectAltName=IP:127.0.0.1,DNS:localhost" -addext "subjectAltName=IP:127.0.0.1,DNS:localhost"
fi fi
go run "$PROJ_ROOT"/cmd/ctl controller --tls-cert-file "$OUT_DIR"/tls.crt --tls-private-key-file "$OUT_DIR"/tls.key go run "$PROJ_ROOT"/cmd/ctl controller --tls-cert-file "$OUT_DIR"/tls.crt --tls-private-key-file "$OUT_DIR"/tls.key --namespace default

View File

@@ -7,11 +7,9 @@ import (
type OSUpgradePhase string type OSUpgradePhase string
const ( const (
OSUpgradePhasePending OSUpgradePhase = "Pending" OSUpgradePhasePending OSUpgradePhase = "Pending"
OSUpgradePhaseAccepted OSUpgradePhase = "Accepted" OSUpgradePhaseAccepted OSUpgradePhase = "Accepted"
OSUpgradePhaseRollingOut OSUpgradePhase = "RollingOut" OSUpgradePhaseRejected OSUpgradePhase = "Rejected"
OSUpgradePhaseCompleted OSUpgradePhase = "Completed"
OSUpgradePhaseRejected OSUpgradePhase = "Rejected"
) )
type OSUpgradeProgressPhase string type OSUpgradeProgressPhase string
@@ -34,9 +32,6 @@ const (
// +kubebuilder:printcolumn:name="Desired",type=string,JSONPath=`.spec.desiredVersion` // +kubebuilder:printcolumn:name="Desired",type=string,JSONPath=`.spec.desiredVersion`
// +kubebuilder:printcolumn:name="Resolved",type=string,JSONPath=`.status.resolvedVersion` // +kubebuilder:printcolumn:name="Resolved",type=string,JSONPath=`.status.resolvedVersion`
// +kubebuilder:printcolumn:name="Phase",type=string,JSONPath=`.status.phase` // +kubebuilder:printcolumn:name="Phase",type=string,JSONPath=`.status.phase`
// +kubebuilder:printcolumn:name="Targets",type=integer,JSONPath=`.status.summary.targetedNodes`
// +kubebuilder:printcolumn:name="OK",type=integer,JSONPath=`.status.summary.succeededNodes`
// +kubebuilder:printcolumn:name="Fail",type=integer,JSONPath=`.status.summary.failedNodes`
type OSUpgrade struct { type OSUpgrade struct {
metav1.TypeMeta `json:",inline" yaml:",inline"` metav1.TypeMeta `json:",inline" yaml:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty" yaml:"metadata,omitempty"` metav1.ObjectMeta `json:"metadata,omitempty" yaml:"metadata,omitempty"`
@@ -74,20 +69,11 @@ type OSUpgradeStatus struct {
Phase OSUpgradePhase `json:"phase,omitempty" yaml:"phase,omitempty"` Phase OSUpgradePhase `json:"phase,omitempty" yaml:"phase,omitempty"`
ResolvedVersion string `json:"resolvedVersion,omitempty" yaml:"resolvedVersion,omitempty"` ResolvedVersion string `json:"resolvedVersion,omitempty" yaml:"resolvedVersion,omitempty"`
ObservedGeneration int64 `json:"observedGeneration,omitempty" yaml:"observedGeneration,omitempty"` ObservedGeneration int64 `json:"observedGeneration,omitempty" yaml:"observedGeneration,omitempty"`
Summary OSUpgradeSummary `json:"summary,omitempty" yaml:"summary,omitempty"`
Conditions []metav1.Condition `json:"conditions,omitempty" yaml:"conditions,omitempty"` Conditions []metav1.Condition `json:"conditions,omitempty" yaml:"conditions,omitempty"`
Reason string `json:"reason,omitempty" yaml:"reason,omitempty"` Reason string `json:"reason,omitempty" yaml:"reason,omitempty"`
Message string `json:"message,omitempty" yaml:"message,omitempty"` Message string `json:"message,omitempty" yaml:"message,omitempty"`
} }
type OSUpgradeSummary struct {
TargetedNodes int32 `json:"targetedNodes,omitempty" yaml:"targetedNodes,omitempty"`
PendingNodes int32 `json:"pendingNodes,omitempty" yaml:"pendingNodes,omitempty"`
RunningNodes int32 `json:"runningNodes,omitempty" yaml:"runningNodes,omitempty"`
SucceededNodes int32 `json:"succeededNodes,omitempty" yaml:"succeededNodes,omitempty"`
FailedNodes int32 `json:"failedNodes,omitempty" yaml:"failedNodes,omitempty"`
}
// +genclient // +genclient
// +kubebuilder:object:root=true // +kubebuilder:object:root=true
// +kubebuilder:subresource:status // +kubebuilder:subresource:status
@@ -96,7 +82,7 @@ type OSUpgradeSummary struct {
// +kubebuilder:printcolumn:name="Source",type=string,JSONPath=`.spec.sourceRef.name` // +kubebuilder:printcolumn:name="Source",type=string,JSONPath=`.spec.sourceRef.name`
// +kubebuilder:printcolumn:name="Current",type=string,JSONPath=`.status.currentVersion` // +kubebuilder:printcolumn:name="Current",type=string,JSONPath=`.status.currentVersion`
// +kubebuilder:printcolumn:name="Target",type=string,JSONPath=`.status.targetVersion` // +kubebuilder:printcolumn:name="Target",type=string,JSONPath=`.status.targetVersion`
// +kubebuilder:printcolumn:name="Status",type=string,JSONPath=`.status.phase` // +kubebuilder:printcolumn:name="Phase",type=string,JSONPath=`.status.phase`
type OSUpgradeProgress struct { type OSUpgradeProgress struct {
metav1.TypeMeta `json:",inline" yaml:",inline"` metav1.TypeMeta `json:",inline" yaml:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty" yaml:"metadata,omitempty"` metav1.ObjectMeta `json:"metadata,omitempty" yaml:"metadata,omitempty"`
@@ -138,3 +124,11 @@ type OSUpgradeProgressStatus struct {
CurrentFrom string `json:"currentFrom,omitempty" yaml:"currentFrom,omitempty"` CurrentFrom string `json:"currentFrom,omitempty" yaml:"currentFrom,omitempty"`
CurrentTo string `json:"currentTo,omitempty" yaml:"currentTo,omitempty"` CurrentTo string `json:"currentTo,omitempty" yaml:"currentTo,omitempty"`
} }
func (osu OSUpgrade) StatusPhase() string {
phase := ""
if osu.Status != nil {
phase = string(osu.Status.Phase)
}
return phase
}

View File

@@ -360,7 +360,6 @@ func (in *OSUpgradeSpec) DeepCopy() *OSUpgradeSpec {
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *OSUpgradeStatus) DeepCopyInto(out *OSUpgradeStatus) { func (in *OSUpgradeStatus) DeepCopyInto(out *OSUpgradeStatus) {
*out = *in *out = *in
out.Summary = in.Summary
if in.Conditions != nil { if in.Conditions != nil {
in, out := &in.Conditions, &out.Conditions in, out := &in.Conditions, &out.Conditions
*out = make([]v1.Condition, len(*in)) *out = make([]v1.Condition, len(*in))
@@ -380,21 +379,6 @@ func (in *OSUpgradeStatus) DeepCopy() *OSUpgradeStatus {
return out return out
} }
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *OSUpgradeSummary) DeepCopyInto(out *OSUpgradeSummary) {
*out = *in
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OSUpgradeSummary.
func (in *OSUpgradeSummary) DeepCopy() *OSUpgradeSummary {
if in == nil {
return nil
}
out := new(OSUpgradeSummary)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *VersionCatalogSource) DeepCopyInto(out *VersionCatalogSource) { func (in *VersionCatalogSource) DeepCopyInto(out *VersionCatalogSource) {
*out = *in *out = *in

View File

@@ -2,6 +2,7 @@ package controller
import ( import (
"context" "context"
"errors"
"net" "net"
"net/http" "net/http"
"os" "os"
@@ -11,44 +12,78 @@ import (
"k8s.io/cli-runtime/pkg/genericclioptions" "k8s.io/cli-runtime/pkg/genericclioptions"
"k8s.io/klog/v2" "k8s.io/klog/v2"
mksadmission "example.com/monok8s/pkg/controller/admission" mkscontroller "example.com/monok8s/pkg/controller"
osupgradectrl "example.com/monok8s/pkg/controller/osupgrade"
"example.com/monok8s/pkg/kube" "example.com/monok8s/pkg/kube"
"example.com/monok8s/pkg/templates" "example.com/monok8s/pkg/templates"
) )
type ServerConfig struct { type ServerConfig struct {
Namespace string `json:"namespace,omitempty"`
TLSCertFile string `json:"tlsCertFile,omitempty"` TLSCertFile string `json:"tlsCertFile,omitempty"`
TLSPrivateKeyFile string `json:"tlsPrivateKeyFile,omitempty"` TLSPrivateKeyFile string `json:"tlsPrivateKeyFile,omitempty"`
} }
func NewCmdController(flags *genericclioptions.ConfigFlags) *cobra.Command { func NewCmdController(flags *genericclioptions.ConfigFlags) *cobra.Command {
var namespace string = templates.DefaultNamespace
var conf ServerConfig var conf ServerConfig
cmd := &cobra.Command{ cmd := &cobra.Command{
Use: "controller", Use: "controller",
Short: "Admission controller that handles OSUpgrade resources", Short: "Start a controller that handles OSUpgrade resources",
RunE: func(cmd *cobra.Command, _ []string) error { RunE: func(cmd *cobra.Command, _ []string) error {
ctx := cmd.Context() ctx := cmd.Context()
klog.InfoS("starting controller", klog.InfoS("starting controller", "namespace", conf.Namespace)
"namespace", namespace,
)
clients, err := kube.NewClients(flags) clients, err := kube.NewClients(flags)
if err != nil { if err != nil {
return err return err
} }
return httpListen(ctx, clients, conf) ctx, cancel := context.WithCancel(ctx)
defer cancel()
httpErrCh := make(chan error, 1)
watchErrCh := make(chan error, 1)
go func() {
klog.InfoS("starting OSUpgrade watch loop", "namespace", conf.Namespace)
watchErrCh <- osupgradectrl.Watch(ctx, clients, conf.Namespace)
}()
go func() {
httpErrCh <- httpListen(ctx, clients, conf)
}()
select {
case <-ctx.Done():
klog.InfoS("controller context canceled")
return ctx.Err()
case err := <-watchErrCh:
if err != nil && !errors.Is(err, context.Canceled) {
cancel()
return err
}
cancel()
return nil
case err := <-httpErrCh:
if err != nil && !errors.Is(err, context.Canceled) {
cancel()
return err
}
cancel()
return nil
}
}, },
} }
cmd.Flags().StringVar(&namespace, "namespace", templates.DefaultNamespace, "namespace to watch") cmd.Flags().StringVar(&conf.Namespace, "namespace", templates.DefaultNamespace, "namespace to watch")
cmd.Flags().StringVar(&conf.TLSCertFile, "tls-cert-file", conf.TLSCertFile, cmd.Flags().StringVar(&conf.TLSCertFile, "tls-cert-file", conf.TLSCertFile,
"File containing x509 Certificate used for serving HTTPS (with intermediate certs, if any, concatenated after server cert).") "File containing x509 Certificate used for serving HTTPS (with intermediate certs, if any, concatenated after server cert).")
cmd.Flags().StringVar(&conf.TLSPrivateKeyFile, "tls-private-key-file", conf.TLSPrivateKeyFile, "File containing x509 private key matching --tls-cert-file.") cmd.Flags().StringVar(&conf.TLSPrivateKeyFile, "tls-private-key-file", conf.TLSPrivateKeyFile,
"File containing x509 private key matching --tls-cert-file.")
return cmd return cmd
} }
@@ -57,41 +92,59 @@ func httpListen(ctx context.Context, clients *kube.Clients, conf ServerConfig) e
address, port := "", "8443" address, port := "", "8443"
addr := net.JoinHostPort(address, port) addr := net.JoinHostPort(address, port)
ns := os.Getenv("POD_NAMESPACE")
nodeName := os.Getenv("NODE_NAME") nodeName := os.Getenv("NODE_NAME")
server := mksadmission.NewServer(ctx, clients, ns, nodeName) server := mkscontroller.NewServer(ctx, clients, conf.Namespace, nodeName)
s := &http.Server{ s := &http.Server{
Addr: addr, Addr: addr,
Handler: server, Handler: server,
IdleTimeout: 90 * time.Second, IdleTimeout: 90 * time.Second,
ReadTimeout: 4 * 60 * time.Minute, ReadTimeout: 4 * time.Minute,
WriteTimeout: 4 * 60 * time.Minute, WriteTimeout: 4 * time.Minute,
MaxHeaderBytes: 1 << 20, MaxHeaderBytes: 1 << 20,
} }
if conf.TLSCertFile != "" { serverErrCh := make(chan error, 1)
klog.InfoS("starting HTTPS server",
"addr", addr,
"certFile", conf.TLSCertFile,
"keyFile", conf.TLSPrivateKeyFile,
)
if err := s.ListenAndServeTLS(conf.TLSCertFile, conf.TLSPrivateKeyFile); err != nil { go func() {
klog.ErrorS(err, "HTTPS server failed") if conf.TLSCertFile != "" {
os.Exit(1) klog.InfoS("starting HTTPS server",
"addr", addr,
"certFile", conf.TLSCertFile,
"keyFile", conf.TLSPrivateKeyFile,
)
serverErrCh <- s.ListenAndServeTLS(conf.TLSCertFile, conf.TLSPrivateKeyFile)
return
} }
} else {
klog.InfoS("starting HTTP server",
"addr", addr,
)
if err := s.ListenAndServe(); err != nil { klog.InfoS("starting HTTP server", "addr", addr)
serverErrCh <- s.ListenAndServe()
}()
select {
case <-ctx.Done():
klog.InfoS("shutting down HTTP server", "addr", addr)
shutdownCtx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
err := s.Shutdown(shutdownCtx)
if err != nil {
return err
}
err = <-serverErrCh
if err != nil && !errors.Is(err, http.ErrServerClosed) {
return err
}
return context.Canceled
case err := <-serverErrCh:
if err != nil && !errors.Is(err, http.ErrServerClosed) {
klog.ErrorS(err, "HTTP server failed") klog.ErrorS(err, "HTTP server failed")
os.Exit(1) return err
} }
return nil
} }
return nil
} }

View File

@@ -1,157 +0,0 @@
package admission
import (
"context"
"io"
"net/http"
admissionv1 "k8s.io/api/admission/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
monov1alpha1 "example.com/monok8s/pkg/apis/monok8s/v1alpha1"
"example.com/monok8s/pkg/controller/osupgrade"
"example.com/monok8s/pkg/kube"
"github.com/emicklei/go-restful/v3"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/serializer"
"k8s.io/apiserver/pkg/server/httplog"
"k8s.io/klog/v2"
)
var (
scheme = runtime.NewScheme()
codecs = serializer.NewCodecFactory(scheme)
deserializer = codecs.UniversalDeserializer()
)
var statusesNoTracePred = httplog.StatusIsNot(
http.StatusOK,
http.StatusFound,
http.StatusMovedPermanently,
http.StatusTemporaryRedirect,
http.StatusBadRequest,
http.StatusNotFound,
http.StatusSwitchingProtocols,
)
func init() {
_ = admissionv1.AddToScheme(scheme)
_ = monov1alpha1.AddToScheme(scheme)
}
type Server struct {
restfulCont *restful.Container
ctx context.Context
clients *kube.Clients
namespace string
nodeName string
}
func NewServer(ctx context.Context, clients *kube.Clients, namespace, nodeName string) *Server {
s := &Server{
ctx: ctx,
clients: clients,
namespace: namespace,
nodeName: nodeName,
}
s.Initialize()
return s
}
func (s *Server) ServeHTTP(w http.ResponseWriter, req *http.Request) {
if s == nil {
http.Error(w, "admission server is nil", http.StatusInternalServerError)
return
}
if s.restfulCont == nil {
http.Error(w, "admission server not initialized", http.StatusInternalServerError)
return
}
handler := httplog.WithLogging(s.restfulCont, statusesNoTracePred)
handler.ServeHTTP(w, req)
}
func (s *Server) Initialize() {
s.restfulCont = restful.NewContainer()
ws := new(restful.WebService)
ws.Path("/admission").
Consumes(restful.MIME_JSON).
Produces(restful.MIME_JSON)
ws.Route(ws.POST("").To(s.triggerAdmission).
Reads(admissionv1.AdmissionReview{}).
Writes(admissionv1.AdmissionReview{}))
s.restfulCont.Add(ws)
}
func (s *Server) triggerAdmission(request *restful.Request, response *restful.Response) {
body, err := io.ReadAll(request.Request.Body)
if err != nil {
_ = response.WriteError(http.StatusBadRequest, err)
return
}
var reviewReq admissionv1.AdmissionReview
if _, _, err := deserializer.Decode(body, nil, &reviewReq); err != nil {
_ = response.WriteError(http.StatusBadRequest, err)
return
}
if reviewReq.Request == nil {
_ = response.WriteErrorString(http.StatusBadRequest, "missing admission request")
return
}
resp := admissionv1.AdmissionReview{
TypeMeta: metav1.TypeMeta{
Kind: "AdmissionReview",
APIVersion: "admission.k8s.io/v1",
},
Response: &admissionv1.AdmissionResponse{
UID: reviewReq.Request.UID,
Allowed: true,
Result: &metav1.Status{Message: "OK"},
},
}
var osu monov1alpha1.OSUpgrade
if _, _, err := deserializer.Decode(reviewReq.Request.Object.Raw, nil, &osu); err != nil {
klog.V(1).InfoS("Skipping non-OSUpgrade resource",
"uid", reviewReq.Request.UID,
"kind", reviewReq.Request.Kind.Kind,
"operation", reviewReq.Request.Operation,
"err", err,
)
_ = response.WriteEntity(resp)
return
}
klog.InfoS("Received OSUpgrade admission",
"uid", reviewReq.Request.UID,
"operation", reviewReq.Request.Operation,
"name", osu.Name,
"namespace", osu.Namespace,
"node", s.nodeName,
)
// Resolve every node name
if err := osupgrade.EnsureOSUpgradeProgressForNode(
s.ctx,
s.clients,
s.namespace,
s.nodeName,
&osu,
); err != nil {
klog.ErrorS(err, "ensure OSUpgradeProgress for node failed",
"osupgrade", osu.Name,
"node", s.nodeName,
)
}
_ = response.WriteEntity(resp)
}

View File

@@ -0,0 +1,346 @@
package osupgrade
import (
"context"
"fmt"
"time"
corev1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/watch"
"k8s.io/klog/v2"
monov1alpha1 "example.com/monok8s/pkg/apis/monok8s/v1alpha1"
"example.com/monok8s/pkg/kube"
)
func Watch(ctx context.Context, clients *kube.Clients, namespace string) error {
var resourceVersion string
for {
if ctx.Err() != nil {
return ctx.Err()
}
err := watchOnce(ctx, clients, namespace, &resourceVersion)
if err != nil {
if ctx.Err() != nil {
return ctx.Err()
}
// Expired RV is normal enough; clear it and relist.
if apierrors.IsResourceExpired(err) {
klog.InfoS("OSUpgrade watch resourceVersion expired; resetting",
"namespace", namespace,
"resourceVersion", resourceVersion,
)
resourceVersion = ""
} else {
klog.ErrorS(err, "OSUpgrade watch failed; retrying",
"namespace", namespace,
"resourceVersion", resourceVersion,
)
}
select {
case <-ctx.Done():
return ctx.Err()
case <-time.After(2 * time.Second):
}
continue
}
}
}
func watchOnce(
ctx context.Context,
clients *kube.Clients,
namespace string,
resourceVersion *string,
) error {
// Cold start: list existing objects once, handle them, then watch from list RV.
if *resourceVersion == "" {
list, err := clients.MonoKS.
Monok8sV1alpha1().
OSUpgrades(namespace).
List(ctx, metav1.ListOptions{})
if err != nil {
return fmt.Errorf("list OSUpgrades: %w", err)
}
for i := range list.Items {
osu := &list.Items[i]
handled, err := handleOSUpgrade(ctx, clients, namespace, osu)
if err != nil {
klog.ErrorS(err, "reconcile existing OSUpgrade failed",
"name", osu.Name,
"resourceVersion", osu.ResourceVersion,
)
continue
}
if !handled {
klog.V(2).InfoS("skipping existing OSUpgrade",
"name", osu.Name,
"phase", osu.StatusPhase(),
)
}
}
*resourceVersion = list.ResourceVersion
klog.InfoS("initial OSUpgrade sync complete",
"namespace", namespace,
"resourceVersion", *resourceVersion,
"count", len(list.Items),
)
}
w, err := clients.MonoKS.
Monok8sV1alpha1().
OSUpgrades(namespace).
Watch(ctx, metav1.ListOptions{
ResourceVersion: *resourceVersion,
AllowWatchBookmarks: true,
})
if err != nil {
return fmt.Errorf("watch OSUpgrades: %w", err)
}
defer w.Stop()
klog.InfoS("watching OSUpgrades",
"namespace", namespace,
"resourceVersion", *resourceVersion,
)
for {
select {
case <-ctx.Done():
return ctx.Err()
case evt, ok := <-w.ResultChan():
if !ok {
return fmt.Errorf("watch channel closed")
}
switch evt.Type {
case watch.Bookmark:
if rv := extractResourceVersion(evt.Object); rv != "" {
*resourceVersion = rv
}
continue
case watch.Error:
// Let outer loop retry / relist.
return fmt.Errorf("watch returned error event")
case watch.Deleted:
// Top-level delete does not require action here.
continue
case watch.Added, watch.Modified:
// handled below
default:
klog.V(1).InfoS("skipping unexpected watch event type",
"eventType", evt.Type,
)
continue
}
osu, ok := evt.Object.(*monov1alpha1.OSUpgrade)
if !ok {
klog.V(1).InfoS("skipping unexpected watch object type",
"type", fmt.Sprintf("%T", evt.Object),
)
continue
}
if osu.ResourceVersion != "" {
*resourceVersion = osu.ResourceVersion
}
handled, err := handleOSUpgrade(ctx, clients, namespace, osu)
if err != nil {
klog.ErrorS(err, "reconcile OSUpgrade failed",
"name", osu.Name,
"eventType", evt.Type,
"resourceVersion", osu.ResourceVersion,
)
continue
}
if !handled {
klog.V(2).InfoS("skipping OSUpgrade",
"name", osu.Name,
"eventType", evt.Type,
"phase", osu.StatusPhase(),
)
}
}
}
}
func handleOSUpgrade(
ctx context.Context,
clients *kube.Clients,
namespace string,
osu *monov1alpha1.OSUpgrade,
) (bool, error) {
if !shouldHandle(osu) {
return false, nil
}
if osu.Status == nil || osu.Status.ObservedGeneration != osu.Generation {
return true, reconcileSpec(ctx, clients, namespace, osu)
}
if osu.Status.Phase == monov1alpha1.OSUpgradePhaseAccepted {
return true, reconcileFanout(ctx, clients, namespace, osu)
}
return false, nil
}
func reconcileSpec(
ctx context.Context,
clients *kube.Clients,
namespace string,
osu *monov1alpha1.OSUpgrade,
) error {
osu = osu.DeepCopy()
osu.Status = &monov1alpha1.OSUpgradeStatus{
Phase: monov1alpha1.OSUpgradePhaseAccepted,
ResolvedVersion: osu.Spec.DesiredVersion,
ObservedGeneration: osu.Generation,
}
_, err := clients.MonoKS.
Monok8sV1alpha1().
OSUpgrades(namespace).
UpdateStatus(ctx, osu, metav1.UpdateOptions{})
return err
}
func reconcileFanout(
ctx context.Context,
clients *kube.Clients,
namespace string,
osu *monov1alpha1.OSUpgrade,
) error {
nodeNames, err := listTargetNodeNames(ctx, clients, osu)
if err != nil {
return fmt.Errorf("list target nodes for %s: %w", osu.Name, err)
}
if len(nodeNames) == 0 {
klog.InfoS("no targets", "osupgrade", osu.Name)
return nil
}
klog.InfoS("ensuring OSUpgradeProgress for target nodes",
"osupgrade", osu.Name,
"targets", len(nodeNames),
)
for _, nodeName := range nodeNames {
if err := EnsureOSUpgradeProgressForNode(
ctx,
clients,
namespace,
nodeName,
osu,
); err != nil {
klog.ErrorS(err, "ensure OSUpgradeProgress for node failed",
"osupgrade", osu.Name,
"node", nodeName,
)
}
}
return nil
}
func listTargetNodeNames(
ctx context.Context,
clients *kube.Clients,
osu *monov1alpha1.OSUpgrade,
) ([]string, error) {
selector := labels.SelectorFromSet(labels.Set{
monov1alpha1.ControlAgentKey: "true",
})
if osu.Spec.NodeSelector != nil {
sel, err := metav1.LabelSelectorAsSelector(osu.Spec.NodeSelector)
if err != nil {
return nil, fmt.Errorf("invalid nodeSelector: %w", err)
}
reqs, selectable := sel.Requirements()
if !selectable {
return nil, fmt.Errorf("nodeSelector is not selectable")
}
selector = selector.Add(reqs...)
}
list, err := clients.Kubernetes.CoreV1().
Nodes().
List(ctx, metav1.ListOptions{
LabelSelector: selector.String(),
})
if err != nil {
return nil, fmt.Errorf("list nodes: %w", err)
}
out := make([]string, 0, len(list.Items))
for i := range list.Items {
node := &list.Items[i]
if shouldUseNode(node) {
out = append(out, node.Name)
}
}
return out, nil
}
func shouldUseNode(node *corev1.Node) bool {
// Keep this conservative for now. Tighten if you want only Ready nodes.
return node != nil && node.Name != ""
}
func shouldHandle(osu *monov1alpha1.OSUpgrade) bool {
if osu == nil || osu.DeletionTimestamp != nil {
return false
}
if osu.Spec.DesiredVersion == "" {
return false
}
// NEW: initial processing stage
if osu.Status == nil {
return true
}
// Reconcile if spec changed
if osu.Status.ObservedGeneration != osu.Generation {
return true
}
// Fanout stage
return osu.Status.Phase == monov1alpha1.OSUpgradePhaseAccepted
}
func extractResourceVersion(obj interface{}) string {
type hasRV interface {
GetResourceVersion() string
}
if o, ok := obj.(hasRV); ok {
return o.GetResourceVersion()
}
return ""
}

View File

@@ -0,0 +1,92 @@
package controller
import (
"context"
"net/http"
"time"
"example.com/monok8s/pkg/kube"
"github.com/emicklei/go-restful/v3"
"k8s.io/apiserver/pkg/server/httplog"
)
var statusesNoTracePred = httplog.StatusIsNot(
http.StatusOK,
http.StatusFound,
http.StatusMovedPermanently,
http.StatusTemporaryRedirect,
http.StatusBadRequest,
http.StatusNotFound,
http.StatusSwitchingProtocols,
)
type Server struct {
restfulCont *restful.Container
ctx context.Context
clients *kube.Clients
namespace string
nodeName string
startedAt time.Time
}
type StatusResponse struct {
OK bool `json:"ok"`
Service string `json:"service"`
Namespace string `json:"namespace,omitempty"`
NodeName string `json:"nodeName,omitempty"`
UptimeSec int64 `json:"uptimeSec"`
}
func NewServer(ctx context.Context, clients *kube.Clients, namespace, nodeName string) *Server {
s := &Server{
ctx: ctx,
clients: clients,
namespace: namespace,
nodeName: nodeName,
startedAt: time.Now(),
}
s.Initialize()
return s
}
func (s *Server) ServeHTTP(w http.ResponseWriter, req *http.Request) {
if s == nil {
http.Error(w, "server is nil", http.StatusInternalServerError)
return
}
if s.restfulCont == nil {
http.Error(w, "server not initialized", http.StatusInternalServerError)
return
}
handler := httplog.WithLogging(s.restfulCont, statusesNoTracePred)
handler.ServeHTTP(w, req)
}
func (s *Server) Initialize() {
s.restfulCont = restful.NewContainer()
ws := new(restful.WebService)
ws.Path("/")
ws.Consumes(restful.MIME_JSON)
ws.Produces(restful.MIME_JSON)
ws.Route(ws.GET("/status").To(s.queryStatus).
Doc("Return basic controller status"))
s.restfulCont.Add(ws)
}
func (s *Server) queryStatus(request *restful.Request, response *restful.Response) {
resp := StatusResponse{
OK: true,
Service: "monok8s-controller",
Namespace: s.namespace,
NodeName: s.nodeName,
UptimeSec: int64(time.Since(s.startedAt).Seconds()),
}
_ = response.WriteHeaderAndEntity(http.StatusOK, resp)
}

View File

@@ -16,7 +16,6 @@ echo "metadata:"
echo " name: \"$NAME\"" echo " name: \"$NAME\""
echo "spec:" echo "spec:"
echo " desiredVersion: \"$TARGET_VERSION\"" echo " desiredVersion: \"$TARGET_VERSION\""
echo " imageURL: \"$BASE_URL/monok8s-$TARGET_VERSION-dev.ext4.zst\""
echo " nodeSelector: {}" echo " nodeSelector: {}"
echo " catalog:" echo " catalog:"
echo " inline: |" echo " inline: |"