Generate CRDs with controller-gen instead

This commit is contained in:
2026-04-03 03:10:20 +08:00
parent 1ce15e9ac5
commit 53f9f9376a
13 changed files with 137 additions and 145 deletions

View File

@@ -113,9 +113,10 @@ status:
kubectl get osugrades
```
NAME TARGET STATUS AGE
my-upgrade-2 v1.35.3 accepted 1m # latest gets realized into a version number
my-downgrade-1 v1.33.2 rejected 1m # Downgrade not supported
NAME DESIRED RESOLVED PHASE TARGETS OK FAIL AGE
my-upgrade-3 stable v1.35.4 RollingOut 3 1 0 1m
my-upgrade-2 v1.35.3 v1.35.3 Accepted 2 0 0 1m
my-downgrade-1 v1.33.2 v1.33.2 Rejected 2 0 2 1m
```
kubectl get osupgradeprogress

View File

@@ -6,9 +6,6 @@ set -euo pipefail
export CTL_BIN_LAYER=$( skopeo inspect docker-daemon:localhost/monok8s/control-agent:dev | jq -r '.Layers[0] | sub("^sha256:"; "")' )
mkdir -p "$ROOTFS/var/cache/apk"
mkdir -p "$ROOTFS/opt/monok8s/config"
mkdir -p "$ROOTFS/build"
mkdir -p \
"$ROOTFS/dev" \
"$ROOTFS/proc" \
@@ -16,7 +13,11 @@ mkdir -p \
"$ROOTFS/run" \
"$ROOTFS/data" \
"$ROOTFS/var" \
"$ROOTFS/tmp"
"$ROOTFS/tmp" \
"$ROOTFS/build" \
"$ROOTFS/var/cache/apk" \
"$ROOTFS/usr/lib/monok8s/crds" \
"$ROOTFS/opt/monok8s/config"
mount --bind /var/cache/apk "$ROOTFS/var/cache/apk"
mount --bind /dev "$ROOTFS/dev"
@@ -27,6 +28,7 @@ mount --bind /run "$ROOTFS/run"
cp /usr/bin/qemu-aarch64-static "$ROOTFS/usr/bin/"
cp /etc/resolv.conf "$ROOTFS/etc/resolv.conf"
cp /build/crio.tar.gz "$ROOTFS/build/"
cp /build/crds/*.yaml "$ROOTFS/usr/lib/monok8s/crds"
chroot "$ROOTFS" /bin/sh -c "ln -s /var/cache/apk /etc/apk/cache"
# chroot "$ROOTFS" /bin/sh -c "apk update"

View File

@@ -1,5 +1,11 @@
## For development workflow
For running `controll-gen`
```
export PATH="$(go env GOPATH)/bin:$PATH"
go install sigs.k8s.io/controller-tools/cmd/controller-gen@latest
```
Run this on device
```bash
while true; do nc -l -p 1234 -e sh; done

View File

@@ -1,17 +1,20 @@
# Should be the same as upstream version in prodution
# Should be the same as upstream version in production
VERSION ?= dev
# Target kube version
KUBE_VERSION ?= v1.35.1
GIT_REV=$(shell git rev-parse HEAD)
GIT_REV := $(shell git rev-parse HEAD)
BIN_DIR := bin
OUT_DIR := out
BUILDINFO_FILE := pkg/buildinfo/buildinfo_gen.go
CRD_PATHS := ./pkg/apis/...
# Never cache this
.buildinfo:
@mkdir -p $(dir $(BUILDINFO_FILE))
@printf '%s\n' \
'package buildinfo' \
'' \
@@ -24,8 +27,9 @@ BUILDINFO_FILE := pkg/buildinfo/buildinfo_gen.go
> $(BUILDINFO_FILE)
build: .buildinfo
mkdir -p $(BIN_DIR)
GOOS=linux GOARCH=arm64 go build -o $(BIN_DIR)/ctl-linux-aarch64-$(VERSION) ./cmd/ctl/
mkdir -p $(BIN_DIR) $(OUT_DIR)/crds
controller-gen crd paths=$(CRD_PATHS) output:crd:dir=$(OUT_DIR)/crds
GOOS=linux GOARCH=arm64 go build -o $(BIN_DIR)/ctl-linux-aarch64-$(VERSION) ./cmd/ctl
build-agent: build
docker build \
@@ -34,15 +38,17 @@ build-agent: build
-t localhost/monok8s/control-agent:$(VERSION) .
build-local: .buildinfo
mkdir -p $(BIN_DIR)
go build -o $(BIN_DIR)/ctl-$(VERSION) ./cmd/ctl
run:
go run ./cmd/ctl
clean:
docker image rm localhost/monok8s/control-agent:$(VERSION)
-docker image rm localhost/monok8s/control-agent:$(VERSION)
rm -rf $(BIN_DIR) \
$(BUILDINFO_FILE)
$(BUILDINFO_FILE) \
$(OUT_DIR)/crds
all: build build-agent build-local

View File

@@ -1,3 +1,5 @@
// +kubebuilder:object:generate=true
// +groupName=monok8s.io
package v1alpha1
import (
@@ -15,19 +17,21 @@ var (
Label = "monok8s.io/label"
Annotation = "monok8s.io/annotation"
ControlAgentKey = "monok8s.io/control-agent"
)
SchemeGroupVersion = schema.GroupVersion{Group: Group, Version: Version}
SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes)
AddToScheme = SchemeBuilder.AddToScheme
var (
GroupVersion = schema.GroupVersion{Group: Group, Version: Version}
SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes)
AddToScheme = SchemeBuilder.AddToScheme
)
func addKnownTypes(scheme *runtime.Scheme) error {
scheme.AddKnownTypes(SchemeGroupVersion,
scheme.AddKnownTypes(GroupVersion,
&MonoKSConfig{},
&MonoKSConfigList{},
&OSUpgrade{},
&OSUpgradeList{},
)
metav1.AddToGroupVersion(scheme, SchemeGroupVersion)
metav1.AddToGroupVersion(scheme, GroupVersion)
return nil
}

View File

@@ -5,13 +5,47 @@ import (
"k8s.io/apimachinery/pkg/runtime"
)
type OSUpgradePhase string
const (
OSUpgradePhasePending OSUpgradePhase = "Pending"
OSUpgradePhaseAccepted OSUpgradePhase = "Accepted"
OSUpgradePhaseRollingOut OSUpgradePhase = "RollingOut"
OSUpgradePhaseCompleted OSUpgradePhase = "Completed"
OSUpgradePhaseRejected OSUpgradePhase = "Rejected"
)
type OSUpgradeProgressPhase string
const (
OSUpgradeProgressPhasePending OSUpgradeProgressPhase = "pending"
OSUpgradeProgressPhaseDownloading OSUpgradeProgressPhase = "downloading"
OSUpgradeProgressPhaseWriting OSUpgradeProgressPhase = "writing"
OSUpgradeProgressPhaseRebooting OSUpgradeProgressPhase = "rebooting"
OSUpgradeProgressPhaseVerifying OSUpgradeProgressPhase = "verifying"
OSUpgradeProgressPhaseCompleted OSUpgradeProgressPhase = "completed"
OSUpgradeProgressPhaseFailed OSUpgradeProgressPhase = "failed"
OSUpgradeProgressPhaseRejected OSUpgradeProgressPhase = "rejected"
)
// +kubebuilder:object:root=true
// +kubebuilder:subresource:status
// +kubebuilder:resource:scope=Namespaced,shortName=osu
// +kubebuilder:printcolumn:name="Desired",type=string,JSONPath=`.spec.desiredVersion`
// +kubebuilder:printcolumn:name="Resolved",type=string,JSONPath=`.status.resolvedVersion`
// +kubebuilder:printcolumn:name="Phase",type=string,JSONPath=`.status.phase`
// +kubebuilder:printcolumn:name="Targets",type=integer,JSONPath=`.status.summary.targetedNodes`
// +kubebuilder:printcolumn:name="OK",type=integer,JSONPath=`.status.summary.succeededNodes`
// +kubebuilder:printcolumn:name="Fail",type=integer,JSONPath=`.status.summary.failedNodes`
type OSUpgrade struct {
metav1.TypeMeta `json:",inline" yaml:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty" yaml:"metadata,omitempty"`
Spec OSUpgradeSpec `json:"spec,omitempty" yaml:"spec,omitempty"`
Status *OSUpgradeStatus `json:"status,omitempty" yaml:"status,omitempty"`
Spec OSUpgradeSpec `json:"spec,omitempty" yaml:"spec,omitempty"`
Status *OSUpgradeStatus `json:"status,omitempty" yaml:"status,omitempty"`
}
// +kubebuilder:object:root=true
type OSUpgradeList struct {
metav1.TypeMeta `json:",inline" yaml:",inline"`
metav1.ListMeta `json:"metadata,omitempty" yaml:"metadata,omitempty"`
@@ -19,18 +53,25 @@ type OSUpgradeList struct {
}
type OSUpgradeSpec struct {
Version string `json:"version,omitempty" yaml:"version,omitempty"`
ImageURL string `json:"imageURL,omitempty" yaml:"imageURL,omitempty"`
Checksum string `json:"checksum,omitempty" yaml:"checksum,omitempty"`
// User request, can be "stable" or an explicit version like "v1.35.3".
DesiredVersion string `json:"desiredVersion,omitempty" yaml:"desiredVersion,omitempty"`
ImageURL string `json:"imageURL,omitempty" yaml:"imageURL,omitempty"`
Checksum string `json:"checksum,omitempty" yaml:"checksum,omitempty"`
NodeSelector *metav1.LabelSelector `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty"`
}
type OSUpgradeStatus struct {
Phase string `json:"phase,omitempty" yaml:"phase,omitempty"`
Phase OSUpgradePhase `json:"phase,omitempty" yaml:"phase,omitempty"`
ResolvedVersion string `json:"resolvedVersion,omitempty" yaml:"resolvedVersion,omitempty"`
ObservedGeneration int64 `json:"observedGeneration,omitempty" yaml:"observedGeneration,omitempty"`
Summary OSUpgradeSummary `json:"summary,omitempty" yaml:"summary,omitempty"`
Conditions []metav1.Condition `json:"conditions,omitempty" yaml:"conditions,omitempty"`
// Optional, useful when rejected.
Reason string `json:"reason,omitempty" yaml:"reason,omitempty"`
Message string `json:"message,omitempty" yaml:"message,omitempty"`
}
type OSUpgradeSummary struct {
@@ -41,13 +82,23 @@ type OSUpgradeSummary struct {
FailedNodes int32 `json:"failedNodes,omitempty" yaml:"failedNodes,omitempty"`
}
// +kubebuilder:object:root=true
// +kubebuilder:subresource:status
// +kubebuilder:resource:scope=Namespaced,shortName=osup
// +kubebuilder:printcolumn:name="Node",type=string,JSONPath=`.spec.nodeName`
// +kubebuilder:printcolumn:name="Source",type=string,JSONPath=`.spec.sourceRef.name`
// +kubebuilder:printcolumn:name="Current",type=string,JSONPath=`.status.currentVersion`
// +kubebuilder:printcolumn:name="Target",type=string,JSONPath=`.status.targetVersion`
// +kubebuilder:printcolumn:name="Status",type=string,JSONPath=`.status.phase`
type OSUpgradeProgress struct {
metav1.TypeMeta `json:",inline" yaml:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty" yaml:"metadata,omitempty"`
Spec OSUpgradeProgressSpec `json:"spec,omitempty" yaml:"spec,omitempty"`
Status *OSUpgradeProgressStatus `json:"status,omitempty" yaml:"status,omitempty"`
Spec OSUpgradeProgressSpec `json:"spec,omitempty" yaml:"spec,omitempty"`
Status *OSUpgradeProgressStatus `json:"status,omitempty" yaml:"status,omitempty"`
}
// +kubebuilder:object:root=true
type OSUpgradeProgressList struct {
metav1.TypeMeta `json:",inline" yaml:",inline"`
metav1.ListMeta `json:"metadata,omitempty" yaml:"metadata,omitempty"`
@@ -64,16 +115,16 @@ type OSUpgradeSourceRef struct {
}
type OSUpgradeProgressStatus struct {
CurrentVersion string `json:"currentVersion,omitempty" yaml:"currentVersion,omitempty"`
TargetVersion string `json:"targetVersion,omitempty" yaml:"targetVersion,omitempty"`
Phase string `json:"phase,omitempty" yaml:"phase,omitempty"`
StartedAt *metav1.Time `json:"startedAt,omitempty" yaml:"startedAt,omitempty"`
CompletedAt *metav1.Time `json:"completedAt,omitempty" yaml:"completedAt,omitempty"`
LastUpdatedAt *metav1.Time `json:"lastUpdatedAt,omitempty" yaml:"lastUpdatedAt,omitempty"`
RetryCount int32 `json:"retryCount,omitempty" yaml:"retryCount,omitempty"`
InactivePartition string `json:"inactivePartition,omitempty" yaml:"inactivePartition,omitempty"`
FailureReason string `json:"failureReason,omitempty" yaml:"failureReason,omitempty"`
Message string `json:"message,omitempty" yaml:"message,omitempty"`
CurrentVersion string `json:"currentVersion,omitempty" yaml:"currentVersion,omitempty"`
TargetVersion string `json:"targetVersion,omitempty" yaml:"targetVersion,omitempty"`
Phase OSUpgradeProgressPhase `json:"phase,omitempty" yaml:"phase,omitempty"`
StartedAt *metav1.Time `json:"startedAt,omitempty" yaml:"startedAt,omitempty"`
CompletedAt *metav1.Time `json:"completedAt,omitempty" yaml:"completedAt,omitempty"`
LastUpdatedAt *metav1.Time `json:"lastUpdatedAt,omitempty" yaml:"lastUpdatedAt,omitempty"`
RetryCount int32 `json:"retryCount,omitempty" yaml:"retryCount,omitempty"`
InactivePartition string `json:"inactivePartition,omitempty" yaml:"inactivePartition,omitempty"`
FailureReason string `json:"failureReason,omitempty" yaml:"failureReason,omitempty"`
Message string `json:"message,omitempty" yaml:"message,omitempty"`
}
func (in *OSUpgrade) DeepCopyObject() runtime.Object {

View File

@@ -1,54 +0,0 @@
package apply
import (
"context"
"fmt"
"example.com/monok8s/pkg/crds"
"example.com/monok8s/pkg/kube"
"github.com/spf13/cobra"
apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/cli-runtime/pkg/genericclioptions"
"k8s.io/klog/v2"
)
func NewCmdApply(flags *genericclioptions.ConfigFlags) *cobra.Command {
cmd := &cobra.Command{Use: "apply", Short: "Apply MonoK8s resources"}
cmd.AddCommand(newCmdApplyCRDs(flags))
return cmd
}
func newCmdApplyCRDs(flags *genericclioptions.ConfigFlags) *cobra.Command {
return &cobra.Command{
Use: "crds",
Short: "Register the MonoKSConfig and OSUpgrade CRDs",
RunE: func(cmd *cobra.Command, _ []string) error {
clients, err := kube.NewClients(flags)
if err != nil {
return err
}
ctx := context.Background()
for _, wanted := range crds.Definitions() {
_, err := clients.APIExtensions.ApiextensionsV1().CustomResourceDefinitions().Create(ctx, wanted, metav1.CreateOptions{})
if apierrors.IsAlreadyExists(err) {
current, getErr := clients.APIExtensions.ApiextensionsV1().CustomResourceDefinitions().Get(ctx, wanted.Name, metav1.GetOptions{})
if getErr != nil {
return getErr
}
wanted.ResourceVersion = current.ResourceVersion
_, err = clients.APIExtensions.ApiextensionsV1().CustomResourceDefinitions().Update(ctx, wanted, metav1.UpdateOptions{})
}
if err != nil {
return err
}
klog.InfoS("crd applied", "name", wanted.Name)
}
_, _ = fmt.Fprintln(cmd.OutOrStdout(), "CRDs applied")
return nil
},
}
}
var _ *apiextensionsv1.CustomResourceDefinition

View File

@@ -4,16 +4,15 @@ import (
"flag"
"os"
"github.com/spf13/cobra"
"k8s.io/cli-runtime/pkg/genericclioptions"
"k8s.io/klog/v2"
agentcmd "example.com/monok8s/pkg/cmd/agent"
applycmd "example.com/monok8s/pkg/cmd/apply"
checkconfigcmd "example.com/monok8s/pkg/cmd/checkconfig"
createcmd "example.com/monok8s/pkg/cmd/create"
initcmd "example.com/monok8s/pkg/cmd/initcmd"
internalcmd "example.com/monok8s/pkg/cmd/internal"
versioncmd "example.com/monok8s/pkg/cmd/version"
"github.com/spf13/cobra"
"k8s.io/cli-runtime/pkg/genericclioptions"
"k8s.io/klog/v2"
)
func init() {
@@ -45,7 +44,6 @@ func NewRootCmd() *cobra.Command {
initcmd.NewCmdInit(flags),
checkconfigcmd.NewCmdCheckConfig(),
createcmd.NewCmdCreate(),
applycmd.NewCmdApply(flags),
agentcmd.NewCmdAgent(flags),
internalcmd.NewCmdInternal(),
)

View File

@@ -15,7 +15,6 @@ import (
"k8s.io/klog/v2"
monov1alpha1 "example.com/monok8s/pkg/apis/monok8s/v1alpha1"
"example.com/monok8s/pkg/crds"
"example.com/monok8s/pkg/kube"
templates "example.com/monok8s/pkg/templates"
)
@@ -27,48 +26,6 @@ const (
kubeconfig = "/etc/kubernetes/admin.conf"
)
func ApplyCRDs(ctx context.Context, n *NodeContext) error {
if n.Config.Spec.ClusterRole != "control-plane" {
return nil
}
clients, err := kube.NewClientsFromKubeconfig(kubeconfig)
if err != nil {
return fmt.Errorf("build kube clients from %s: %w", kubeconfig, err)
}
crdClient := clients.APIExtensions.ApiextensionsV1().CustomResourceDefinitions()
for _, wanted := range crds.Definitions() {
_, err := crdClient.Create(ctx, wanted, metav1.CreateOptions{})
if err == nil {
klog.InfoS("crd created", "name", wanted.Name)
continue
}
if !apierrors.IsAlreadyExists(err) {
return fmt.Errorf("create CRD %s: %w", wanted.Name, err)
}
current, getErr := crdClient.Get(ctx, wanted.Name, metav1.GetOptions{})
if getErr != nil {
return fmt.Errorf("get existing CRD %s: %w", wanted.Name, getErr)
}
updated := wanted.DeepCopy()
updated.ResourceVersion = current.ResourceVersion
_, err = crdClient.Update(ctx, updated, metav1.UpdateOptions{})
if err != nil {
return fmt.Errorf("update CRD %s: %w", wanted.Name, err)
}
klog.InfoS("crd updated", "name", wanted.Name)
}
return nil
}
func ApplyControlAgentDaemonSetResources(ctx context.Context, n *NodeContext) error {
// Only the control-plane should bootstrap this DaemonSet definition.
// And only when the feature is enabled.

View File

@@ -206,11 +206,6 @@ func InitControlPlane(ctx context.Context, nctx *NodeContext) error {
return fmt.Errorf("init control-plane requires fresh local state, got %q", nctx.LocalClusterState.MembershipKind)
}
// Example:
// if err := RunKubeadmInit(ctx, nctx); err != nil {
// return fmt.Errorf("kubeadm init: %w", err)
// }
return nil
}

View File

@@ -8,9 +8,15 @@ import (
"strings"
"time"
"k8s.io/klog/v2"
system "example.com/monok8s/pkg/system"
)
const (
crdsPath = "/usr/lib/monok8s/crds/"
)
func StartKubelet(ctx context.Context, n *NodeContext) error {
return system.EnsureServiceRunning(ctx, n.SystemRunner, "kubelet")
}
@@ -47,3 +53,22 @@ func waitForKubeletHealthy(ctx context.Context, timeout time.Duration) error {
}
}
}
func ApplyCRDs(ctx context.Context, nctx *NodeContext) error {
if nctx.Config.Spec.ClusterRole != "control-plane" {
return nil
}
_, err := nctx.SystemRunner.RunWithOptions(
ctx,
"kubectl",
[]string{"apply", "-f", crdsPath},
system.RunOptions{
Timeout: 10 * time.Minute,
OnStdoutLine: func(line string) { klog.Infof("[kubectl] %s", line) },
OnStderrLine: func(line string) { klog.Infof("[kubectl] %s", line) },
},
)
return err
}

View File

@@ -76,8 +76,8 @@ func DefaultOSUpgrade(v TemplateValues) monov1alpha1.OSUpgrade {
Namespace: DefaultNamespace,
},
Spec: monov1alpha1.OSUpgradeSpec{
Version: v.KubernetesVersion,
ImageURL: "https://example.invalid/images/monok8s-v0.0.1.img.zst",
DesiredVersion: v.KubernetesVersion,
ImageURL: "https://example.invalid/images/monok8s-v0.0.1.img.zst",
NodeSelector: &metav1.LabelSelector{
MatchLabels: map[string]string{
"kubernetes.io/hostname": firstNonEmpty(v.NodeName, v.Hostname),

View File

@@ -18,6 +18,7 @@ COPY out/board.itb ./
COPY out/rootfs.tar.gz ./rootfs.tar.gz
COPY out/${DEVICE_TREE_TARGET}.dtb ./
COPY out/Image.gz ./
COPY clitools/out/crds ./crds
RUN tar zxf rootfs.tar.gz -C "/out"
RUN tar zxf alpine.tar.gz -C "/out/rootfs"