Migrate to generated clients

This commit is contained in:
2026-04-24 02:51:02 +08:00
parent 4549b9d167
commit e4a19e5926
7 changed files with 316 additions and 107 deletions

View File

@@ -265,6 +265,10 @@ func shouldHandle(osup *monov1alpha1.OSUpgradeProgress) bool {
return false
}
if osup.Status == nil {
return false
}
switch osup.Status.Phase {
case "",
monov1alpha1.OSUpgradeProgressPhasePending:

View File

@@ -7,9 +7,6 @@ import (
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/client-go/util/retry"
"k8s.io/klog/v2"
@@ -17,16 +14,6 @@ import (
"example.com/monok8s/pkg/kube"
)
var (
unstructuredConverter = runtime.DefaultUnstructuredConverter
osup_gvr = schema.GroupVersionResource{
Group: monov1alpha1.Group,
Version: monov1alpha1.Version,
Resource: "osupgradeprogresses",
}
)
func EnsureOSUpgradeProgressForNode(
ctx context.Context,
clients *kube.Clients,
@@ -39,6 +26,7 @@ func EnsureOSUpgradeProgressForNode(
}
name := fmt.Sprintf("%s-%s", osu.Name, nodeName)
now := metav1.Now()
progress := &monov1alpha1.OSUpgradeProgress{
TypeMeta: metav1.TypeMeta{
@@ -55,9 +43,13 @@ func EnsureOSUpgradeProgressForNode(
Name: osu.Name,
},
},
Status: &monov1alpha1.OSUpgradeProgressStatus{
Phase: monov1alpha1.OSUpgradeProgressPhasePending,
LastUpdatedAt: &now,
},
}
created, err := createProgress(ctx, clients, osup_gvr, progress)
created, err := createProgress(ctx, clients, progress)
if err == nil {
klog.InfoS("created osupgradeprogress", "name", created.Name, "namespace", created.Namespace)
return nil
@@ -66,7 +58,7 @@ func EnsureOSUpgradeProgressForNode(
return fmt.Errorf("create OSUpgradeProgress %s/%s: %w", namespace, name, err)
}
existing, err := getProgress(ctx, clients, osup_gvr, namespace, name)
existing, err := getProgress(ctx, clients, namespace, name)
if err != nil {
return fmt.Errorf("get existing OSUpgradeProgress %s/%s: %w", namespace, name, err)
}
@@ -96,7 +88,7 @@ func updateProgressRobust(
var out *monov1alpha1.OSUpgradeProgress
err := retry.RetryOnConflict(retry.DefaultRetry, func() error {
current, err := getProgress(ctx, clients, osup_gvr, namespace, name)
current, err := getProgress(ctx, clients, namespace, name)
if err != nil {
return err
}
@@ -107,10 +99,10 @@ func updateProgressRobust(
mutate(current)
updated, err := updateProgressStatus(ctx, clients, osup_gvr, current)
updated, err := updateProgressStatus(ctx, clients, current)
if err != nil {
if isUnknownUpdateResult(err) {
latest, getErr := getProgress(ctx, clients, osup_gvr, namespace, name)
latest, getErr := getProgress(ctx, clients, namespace, name)
if getErr == nil {
out = latest
}
@@ -152,84 +144,55 @@ func isUnknownUpdateResult(err error) bool {
func createProgress(
ctx context.Context,
clients *kube.Clients,
gvr schema.GroupVersionResource,
progress *monov1alpha1.OSUpgradeProgress,
) (*monov1alpha1.OSUpgradeProgress, error) {
obj, err := toUnstructured(progress)
toCreate := progress.DeepCopy()
toCreate.Status = nil
created, err := clients.MonoKS.
Monok8sV1alpha1().
OSUpgradeProgresses(toCreate.Namespace).
Create(ctx, toCreate, metav1.CreateOptions{})
if err != nil {
return nil, err
}
created, err := clients.Dynamic.
Resource(gvr).
Namespace(progress.Namespace).
Create(ctx, obj, metav1.CreateOptions{})
if err != nil {
return nil, err
if progress.Status != nil {
toUpdate := created.DeepCopy()
toUpdate.Status = progress.Status
return updateProgressStatus(ctx, clients, toUpdate)
}
return fromUnstructuredProgress(created)
}
func getProgress(
ctx context.Context,
clients *kube.Clients,
gvr schema.GroupVersionResource,
namespace, name string,
) (*monov1alpha1.OSUpgradeProgress, error) {
got, err := clients.Dynamic.
Resource(gvr).
Namespace(namespace).
Get(ctx, name, metav1.GetOptions{})
if err != nil {
return nil, err
}
return fromUnstructuredProgress(got)
}
func updateProgressSpec(
ctx context.Context,
clients *kube.Clients,
gvr schema.GroupVersionResource,
progress *monov1alpha1.OSUpgradeProgress,
) (*monov1alpha1.OSUpgradeProgress, error) {
obj, err := toUnstructured(progress)
if err != nil {
return nil, err
}
updated, err := clients.Dynamic.
Resource(gvr).
Namespace(progress.Namespace).
Update(ctx, obj, metav1.UpdateOptions{})
if err != nil {
return nil, err
}
return fromUnstructuredProgress(updated)
return created, nil
}
func updateProgressStatus(
ctx context.Context,
clients *kube.Clients,
gvr schema.GroupVersionResource,
progress *monov1alpha1.OSUpgradeProgress,
) (*monov1alpha1.OSUpgradeProgress, error) {
obj, err := toUnstructured(progress)
updated, err := clients.MonoKS.
Monok8sV1alpha1().
OSUpgradeProgresses(progress.Namespace).
UpdateStatus(ctx, progress, metav1.UpdateOptions{})
if err != nil {
return nil, err
return nil, fmt.Errorf(
"update status for OSUpgradeProgress %s/%s: %w",
progress.Namespace, progress.Name, err,
)
}
return updated, nil
}
updated, err := clients.Dynamic.
Resource(gvr).
Namespace(progress.Namespace).
UpdateStatus(ctx, obj, metav1.UpdateOptions{})
if err != nil {
return nil, err
}
return fromUnstructuredProgress(updated)
func getProgress(
ctx context.Context,
clients *kube.Clients,
namespace, name string,
) (*monov1alpha1.OSUpgradeProgress, error) {
return clients.MonoKS.
Monok8sV1alpha1().
OSUpgradeProgresses(namespace).
Get(ctx, name, metav1.GetOptions{})
}
func failProgress(
@@ -287,19 +250,3 @@ func markProgressCompleted(
return nil
}
func toUnstructured(progress *monov1alpha1.OSUpgradeProgress) (*unstructured.Unstructured, error) {
m, err := unstructuredConverter.ToUnstructured(progress)
if err != nil {
return nil, fmt.Errorf("convert OSUpgradeProgress to unstructured: %w", err)
}
return &unstructured.Unstructured{Object: m}, nil
}
func fromUnstructuredProgress(obj *unstructured.Unstructured) (*monov1alpha1.OSUpgradeProgress, error) {
var progress monov1alpha1.OSUpgradeProgress
if err := unstructuredConverter.FromUnstructured(obj.Object, &progress); err != nil {
return nil, fmt.Errorf("convert unstructured to OSUpgradeProgress: %w", err)
}
return &progress, nil
}

View File

@@ -151,6 +151,11 @@ func applyControlAgentServiceAccount(ctx context.Context, kubeClient kubernetes.
func applyControlAgentClusterRole(ctx context.Context, kubeClient kubernetes.Interface, labels map[string]string) error {
wantRules := []rbacv1.PolicyRule{
{
APIGroups: []string{monov1alpha1.Group},
Resources: []string{"osupgrades"},
Verbs: []string{"get"},
},
{
APIGroups: []string{monov1alpha1.Group},
Resources: []string{"osupgradeprogresses"},

View File

@@ -21,9 +21,9 @@ import (
templates "example.com/monok8s/pkg/templates"
)
func applyAdmissionControllerDeploymentResources(ctx context.Context, n *NodeContext) error {
func applyControllerDeploymentResources(ctx context.Context, n *NodeContext) error {
if strings.TrimSpace(n.Config.Spec.ClusterRole) != "control-plane" || !n.Config.Spec.EnableControlAgent {
klog.InfoS("skipped admission controller deployment",
klog.InfoS("skipped controller deployment",
"clusterRole", n.Config.Spec.ClusterRole,
"enableControlAgent", n.Config.Spec.EnableControlAgent,
)
@@ -56,23 +56,23 @@ func applyAdmissionControllerDeploymentResources(ctx context.Context, n *NodeCon
if err := ensureNamespace(ctx, kubeClient, namespace, labels); err != nil {
return fmt.Errorf("ensure namespace %q: %w", namespace, err)
}
if err := applyAdmissionControllerServiceAccount(ctx, kubeClient, namespace, labels); err != nil {
if err := applyControllerServiceAccount(ctx, kubeClient, namespace, labels); err != nil {
return fmt.Errorf("apply serviceaccount: %w", err)
}
if err := applyAdmissionControllerClusterRole(ctx, kubeClient, labels); err != nil {
if err := applyControllerClusterRole(ctx, kubeClient, labels); err != nil {
return fmt.Errorf("apply clusterrole: %w", err)
}
if err := applyAdmissionControllerClusterRoleBinding(ctx, kubeClient, namespace, labels); err != nil {
if err := applyControllerClusterRoleBinding(ctx, kubeClient, namespace, labels); err != nil {
return fmt.Errorf("apply clusterrolebinding: %w", err)
}
if err := applyAdmissionControllerDeployment(ctx, kubeClient, namespace, labels); err != nil {
if err := applyControllerDeployment(ctx, kubeClient, namespace, labels); err != nil {
return fmt.Errorf("apply deployment: %w", err)
}
return nil
}
func applyAdmissionControllerServiceAccount(ctx context.Context, kubeClient kubernetes.Interface, namespace string, labels map[string]string) error {
func applyControllerServiceAccount(ctx context.Context, kubeClient kubernetes.Interface, namespace string, labels map[string]string) error {
automount := true
want := &corev1.ServiceAccount{
@@ -111,7 +111,7 @@ func applyAdmissionControllerServiceAccount(ctx context.Context, kubeClient kube
return err
}
func applyAdmissionControllerClusterRole(ctx context.Context, kubeClient kubernetes.Interface, labels map[string]string) error {
func applyControllerClusterRole(ctx context.Context, kubeClient kubernetes.Interface, labels map[string]string) error {
wantRules := []rbacv1.PolicyRule{
{
APIGroups: []string{monov1alpha1.Group},
@@ -126,7 +126,12 @@ func applyAdmissionControllerClusterRole(ctx context.Context, kubeClient kuberne
{
APIGroups: []string{monov1alpha1.Group},
Resources: []string{"osupgradeprogresses"},
Verbs: []string{"get", "list", "watch", "create", "patch", "update"},
Verbs: []string{"get", "list", "create"},
},
{
APIGroups: []string{monov1alpha1.Group},
Resources: []string{"osupgradeprogresses/status"},
Verbs: []string{"create"},
},
{
APIGroups: []string{""},
@@ -170,7 +175,7 @@ func applyAdmissionControllerClusterRole(ctx context.Context, kubeClient kuberne
return err
}
func applyAdmissionControllerClusterRoleBinding(ctx context.Context, kubeClient kubernetes.Interface, namespace string, labels map[string]string) error {
func applyControllerClusterRoleBinding(ctx context.Context, kubeClient kubernetes.Interface, namespace string, labels map[string]string) error {
wantSubjects := []rbacv1.Subject{
{
Kind: "ServiceAccount",
@@ -225,7 +230,7 @@ func applyAdmissionControllerClusterRoleBinding(ctx context.Context, kubeClient
return err
}
func applyAdmissionControllerDeployment(ctx context.Context, kubeClient kubernetes.Interface, namespace string, labels map[string]string) error {
func applyControllerDeployment(ctx context.Context, kubeClient kubernetes.Interface, namespace string, labels map[string]string) error {
replicas := int32(1)
selectorLabels := map[string]string{

248
devtools/build-all.sh Executable file
View File

@@ -0,0 +1,248 @@
#!/bin/bash
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
OUT_DIR="$(realpath "$SCRIPT_DIR/../out/")"
cd "$SCRIPT_DIR/../"
# ===== CONFIG TABLE =====
# Add supported concrete builds here.
# format: kube=crio
CONFIGS=(
"v1.33.3=cri-o.arm64.v1.33.3"
"v1.34.1=cri-o.arm64.v1.34.1"
"v1.35.3=cri-o.arm64.v1.35.2"
)
# ===== HELPERS =====
strip_v() {
echo "${1#v}"
}
minor_of_version() {
# input: 1.35.3 or v1.35.3
local v="${1#v}"
IFS='.' read -r major minor patch <<< "$v"
echo "$major.$minor"
}
version_sort() {
sort -t. -k1,1n -k2,2n -k3,3n
}
list_configs() {
echo "Available build targets:"
for c in "${CONFIGS[@]}"; do
local kube="${c%%=*}"
local crio="${c##*=}"
echo " ${kube#v} (CRI-O: $crio)"
done
}
list_minors() {
echo "Latest supported target per minor:"
latest_per_minor | while read -r kube crio; do
echo " ${kube#v} (CRI-O: $crio)"
done
}
latest_per_minor() {
# Output lines: <kube> <crio>
local minors
minors="$(
for c in "${CONFIGS[@]}"; do
kube="${c%%=*}"
minor_of_version "$kube"
done | sort -uV
)"
while read -r minor; do
[ -n "$minor" ] || continue
resolve_minor "$minor"
done <<< "$minors"
}
resolve_exact() {
local target="$1"
local target_v="v${target#v}"
for c in "${CONFIGS[@]}"; do
local kube="${c%%=*}"
local crio="${c##*=}"
if [[ "$kube" == "$target_v" ]]; then
echo "$kube $crio"
return 0
fi
done
return 1
}
resolve_minor() {
# input: 1.35 or v1.35
local want_minor
want_minor="$(minor_of_version "${1#v}.0")"
local matches=()
local c kube crio
for c in "${CONFIGS[@]}"; do
kube="${c%%=*}"
crio="${c##*=}"
if [[ "$(minor_of_version "$kube")" == "$want_minor" ]]; then
matches+=("${kube}=${crio}")
fi
done
if [[ ${#matches[@]} -eq 0 ]]; then
return 1
fi
local latest_kube
latest_kube="$(
for c in "${matches[@]}"; do
echo "${c%%=*#}" | sed 's/^v//'
done | version_sort | tail -n1
)"
resolve_exact "$latest_kube"
}
build_exact() {
local target="$1"
local resolved
if ! resolved="$(resolve_exact "$target")"; then
echo "❌ Unknown exact target: $target" >&2
echo >&2
list_configs >&2
exit 1
fi
local kube crio
read -r kube crio <<< "$resolved"
echo ">>> Building Kubernetes $kube with $crio"
make release CRIO_VERSION="$crio" KUBE_VERSION="$kube"
}
build_minor() {
local minor="$1"
local resolved
if ! resolved="$(resolve_minor "$minor")"; then
echo "❌ No supported target found for minor: $minor" >&2
echo >&2
list_minors >&2
exit 1
fi
local kube crio
read -r kube crio <<< "$resolved"
echo ">>> Minor $minor resolved to latest supported target ${kube#v}"
echo ">>> Building Kubernetes $kube with $crio"
make release CRIO_VERSION="$crio" KUBE_VERSION="$kube"
}
build_range() {
# input like 1.33-1.35
local range="$1"
local start="${range%-*}"
local end="${range#*-}"
local start_minor="${start#v}"
local end_minor="${end#v}"
local any=0
while read -r minor; do
[ -n "$minor" ] || continue
# simple lexical-safe because format is N.N and sort -V was used
if [[ "$(printf '%s\n%s\n' "$start_minor" "$minor" | sort -V | head -n1)" != "$start_minor" ]]; then
continue
fi
if [[ "$(printf '%s\n%s\n' "$minor" "$end_minor" | sort -V | head -n1)" != "$minor" ]]; then
continue
fi
any=1
build_minor "$minor"
done < <(
for c in "${CONFIGS[@]}"; do
kube="${c%%=*}"
minor_of_version "$kube"
done | sort -uV
)
if [[ $any -eq 0 ]]; then
echo "❌ No supported minors found in range: $range" >&2
echo >&2
list_minors >&2
exit 1
fi
}
build_all() {
local c kube crio
for c in "${CONFIGS[@]}"; do
kube="${c%%=*}"
crio="${c##*=}"
echo ">>> Building Kubernetes $kube with $crio"
make release CRIO_VERSION="$crio" KUBE_VERSION="$kube"
done
}
usage() {
cat <<'EOF'
Usage:
./devtools/build-all.sh
./devtools/build-all.sh list
./devtools/build-all.sh list-minors
./devtools/build-all.sh 1.35.3
./devtools/build-all.sh 1.35
./devtools/build-all.sh 1.33-1.35
Behavior:
no args Build all configured targets
list List all exact configured targets
list-minors List latest supported target per minor
X.Y.Z Build exact version
X.Y Build latest supported patch in that minor
X.Y-A.B Build latest supported patch for each minor in range
EOF
}
# ===== ENTRY =====
if [[ $# -eq 0 ]]; then
echo "No target specified -> building all configured targets"
build_all
exit 0
fi
case "$1" in
list)
list_configs
;;
list-minors)
list_minors
;;
-*|--help|help)
usage
;;
*-*)
build_range "$1"
;;
*.*.*)
build_exact "$1"
;;
*.*)
build_minor "$1"
;;
*)
echo "❌ Unrecognized target: $1" >&2
echo >&2
usage >&2
exit 1
;;
esac

View File

@@ -6,7 +6,7 @@ OUT_DIR="$( realpath "$SCRIPT_DIR"/../out/ )"
set -e
BASE_URL="http://localhost:8000"
TARGET_VERSION="v1.34.6"
TARGET_VERSION="v$1"
STABLE_VERSION="v1.34.6"
NAME="my-upgrade-1"

View File

@@ -38,7 +38,7 @@ PART_IMAGE := $(OUT_DIR)/monok8s-$(KUBE_VERSION)-$(TAG).ext4.zst
KERNEL_IMAGE := $(OUT_DIR)/Image.gz
BUILD_BASE_STAMP := $(OUT_DIR)/.build-base-$(TAG).stamp
DOWNLOAD_PACKAGES_STAMP := $(PACKAGES_DIR)/.download-packages.stamp
DOWNLOAD_PACKAGES_STAMP := $(PACKAGES_DIR)/.stamp-$(KUBE_VERSION)-$(CRIO_VERSION)
ALPINE_SERIES := $(word 1,$(subst ., ,$(ALPINE_VER))).$(word 2,$(subst ., ,$(ALPINE_VER)))