Compare commits

..

72 Commits

Author SHA256 Message Date
2a1a5a8f08 Worker node upgrade chain 2026-04-29 19:28:18 +08:00
e1959bee6d Refactor into RenderAgent and ApplyAgent 2026-04-29 16:41:40 +08:00
6d290a97ae Passed cilium connectivity tests as a worker node 2026-04-29 04:45:25 +08:00
e86b3b3383 Added supervised-init.sh to retry ctl init 2026-04-28 03:56:27 +08:00
7b31a1dec3 Removed build dependency on jq 2026-04-28 00:57:23 +08:00
84d2c7c8e8 Update README.md 2026-04-28 00:41:20 +08:00
1d45b07e1a Added setup-build-host.sh 2026-04-28 00:15:32 +08:00
ee890a5494 Typo 2026-04-28 00:02:01 +08:00
aa57177db0 Ensure loop ready 2026-04-27 23:58:56 +08:00
dcb4d8d4c6 Removed local proxy 2026-04-27 23:12:27 +08:00
7ade7498c9 Split monolith agent into controller and agent 2026-04-27 01:32:39 +08:00
de830a4e3b Mark complete after upgrade success 2026-04-27 01:24:10 +08:00
d7c2dac944 Refine controller template and probe listeners 2026-04-27 00:28:25 +08:00
8fae920fc8 Renamed ControlAgent to NodeControl 2026-04-25 04:38:23 +08:00
1354e83813 Added: ctl create controller 2026-04-25 00:46:43 +08:00
e4a19e5926 Migrate to generated clients 2026-04-24 02:51:02 +08:00
4549b9d167 Controller to not touch osup if possible 2026-04-23 19:05:07 +08:00
9eba55e7ee Drop admission logic. Use a plain controller instead 2026-04-22 05:01:48 +08:00
6ddff7c433 Drafting ctl controller 2026-04-20 02:51:02 +08:00
c6b399ba22 Added pkgclean & distclean for clitools 2026-04-17 03:24:17 +08:00
e138ec1254 Removed git dependency on make 2026-04-17 03:16:27 +08:00
8adf03a2a4 Removed curl dependency on make 2026-04-17 02:42:27 +08:00
286241c7fb Added back missing .buildinfo dep 2026-04-16 22:15:59 +08:00
f6788c0894 Removed go dependency on make 2026-04-16 21:51:16 +08:00
16aa141aa1 Fixed build tag error on make 2026-04-16 20:42:17 +08:00
65c643d7a2 Added migrations.d 2026-04-16 10:44:42 +08:00
f1a7074528 Cleanup k8s states after upgrade 2026-04-15 04:53:28 +08:00
9225857db6 VPP just won't work. God help 2026-04-14 03:58:23 +08:00
9027132a7d Fixed oops due to incorrect build flags 2026-04-10 17:31:06 +08:00
ee1f78f496 Oops: 0000000096000004 2026-04-10 00:41:11 +08:00
b8bc6a13cf VPP = Very Painful Process 2026-04-09 21:30:12 +08:00
4eae2621c9 Trying to build vpp 2026-04-08 21:13:21 +08:00
0c5f490dfc Temp commit, nothing works yet 2026-04-08 01:12:49 +08:00
bc4b124246 Throttle disk write to prevent etcd puking during upgrade 2026-04-07 17:34:38 +08:00
11e2c96173 Preflight healtcheck ourselves 2026-04-07 03:11:26 +08:00
578b3e6a6f Initial upgrade flow for control-plane 2026-04-06 23:49:39 +08:00
c6f89651ce Update the docs 2026-04-06 05:47:34 +08:00
d662162921 Fixed some race conditions 2026-04-06 05:18:06 +08:00
50d9440e0a Reduce the number of bootenv vars 2026-04-06 02:20:41 +08:00
f8db036a5f Split action into kubectl 2026-04-05 16:37:41 +08:00
e3e2d4a04a make to also output catalog 2026-04-04 18:00:35 +00:00
abc749a4b0 Writes bootcmd 2026-04-04 22:47:51 +08:00
b8a5f197f4 Split readme into small files 2026-04-04 12:46:56 +00:00
517cc2e01d control agent can now uboot commands 2026-04-04 20:19:25 +08:00
4f490ab37e Writes and verify image 2026-04-04 02:45:46 +08:00
9cb593ffc0 OSUpgrade: Version Planning 2026-04-03 08:57:27 +08:00
a14fa23e7a Split action into kubectl 2026-04-03 03:26:39 +08:00
53f9f9376a Generate CRDs with controller-gen instead 2026-04-03 03:10:20 +08:00
1ce15e9ac5 Moved all custom naming into one place 2026-04-03 01:48:07 +08:00
3c0df319d7 Draft for OSUpgrade Spec 2026-04-03 01:04:40 +08:00
e7b8e406a8 Agent can now poll osupgrade resources from cluster 2026-04-02 00:07:32 +08:00
145b0a4662 Added some initial daemonsets 2026-04-01 22:55:22 +08:00
3e6df38f45 Slightly simplify the build 2026-04-01 19:28:42 +08:00
9ba8063f19 image needs to be localhost instead of docker.io 2026-04-01 02:23:04 +08:00
7c3e096fec Ship the control-agent controller for node 2026-04-01 02:03:37 +08:00
b6c110410f Ditch rc-service control-agent, use daemonset insteaad 2026-04-01 00:00:06 +08:00
d8275622e6 Configures uboot commands 2026-03-31 18:15:24 +08:00
f67c338e60 Draft for making OTA images 2026-03-31 04:26:03 +08:00
3bbd0a00a8 Agent not work yet. Need to build Part B first 2026-03-30 22:30:42 +08:00
bdbc29649c Some basic agent startup 2026-03-30 21:50:04 +08:00
fcf7371e9e Removed old conf var 2026-03-30 19:40:47 +08:00
d9ffd1b446 Update ctl init to support env file 2026-03-30 19:33:44 +08:00
60a9ffeaf6 Removed AutoRemoveTaint 2026-03-30 18:47:19 +08:00
68e7dcd001 Can now reconcile 2026-03-30 18:41:18 +08:00
0aa4065c26 Added join step 2026-03-30 17:40:27 +08:00
210fabdcc6 Added kubeadm init 2026-03-30 01:31:38 +08:00
5fbc2846a1 Can ctl init with index 2026-03-29 23:55:59 +08:00
03a5e5bedb detect local cluster states 2026-03-29 22:27:13 +08:00
ecceff225f Matches ctl version to upstream 2026-03-28 20:28:22 +08:00
848daefffe clitools: Added ConfigureDNS 2026-03-27 20:25:54 +08:00
87aa1d4b0b Added some ctl boilerplate 2026-03-27 18:34:53 +08:00
bf85462e34 Fixed reboot not working 2026-03-26 20:44:04 +08:00
171 changed files with 17143 additions and 1381 deletions

2
.dockerignore Normal file
View File

@@ -0,0 +1,2 @@
out/*.img.gz
out/*.ext4.zst

5
.gitignore vendored
View File

@@ -1,3 +1,8 @@
build.env.work
cluster.env.work
.DS_Store
clitools/bin
packages/
out/
*_gen.go
*.swp

248
README.md
View File

@@ -1,110 +1,196 @@
# monok8s
Kubernetes node firmware (built with Linux) for Mono Gateway Development Kit
This is an Alpine-based Kubernetes image for Mono's Gateway Development Kit.
https://docs.mono.si/gateway-development-kit/getting-started
It gives you a ready-to-boot Kubernetes control-plane image so you can get your device running first, then learn and customize from there.
## DISCLAIMER
USE AT YOUR OWN RISKS. I leverage ChatGPT heavily for this. I am testing them all by myself right now.
Project/device docs: <https://docs.mono.si/gateway-development-kit/getting-started>
## Build
---
Find the latest package versions and update build.env
* [kernel](https://github.com/nxp-qoriq/linux/archive/refs/tags/)
* [busybox](https://github.com/mirror/busybox/archive/refs/tags/)
* [CRI-O](https://github.com/cri-o/cri-o/releases)
* [Kubelet](https://kubernetes.io/releases/download/)
## What you get
Then run
The default image boots into a small Kubernetes control-plane environment with:
- Alpine Linux
- Kubernetes initialized through `kubeadm`
- read-only root filesystem layout
- A/B rootfs layout for safer OS upgrades
- a Kubernetes-style OS upgrade path through `OSUpgrade`
You do **not** need to know Go or understand the internal build system to try the image.
---
## Before you start
You need:
- a Linux build machine or VM
- Docker
- `make`
- basic command-line comfort
If you are building on a fresh Debian machine, you can install the usual build dependencies with:
```bash
devtools/setup-build-host.sh
```
Or install the minimum packages yourself:
```bash
sudo apt-get update
sudo apt-get install -y docker.io make qemu-user-static binfmt-support
```
Make sure your user can run Docker, or use `sudo` where needed.
---
## Fast path: build an image
Download the project tarball, extract it, then run:
```bash
make release
```
Then run, follow the instructions
```
make cluster-config
```
When the build finishes, check the `out/` directory for the generated image artifacts.
## Flashing
That is the main path most users should try first.
#### USB Drive
1. `make release`
2. Format the USB in vfat
3. Copy out/[RELEASE].img.gz and out/board.itb to your usb drive's root
4. Run
```
usb start
usb tree
fatls usb 0:1 # For fat
ext4ls usb 0:1 # For ext4 if you insist
fatload usb 0 0x80000000 board.itb
---
setenv bootargs "console=ttyS0,115200 earlycon=uart8250,mmio,0x21c0500 root=/dev/ram0 rootwait rw"
bootm 0x80000000
```
## Flash the image
5. Inside initramfs, run
```
flash-emmc.sh
```
After building, flash the generated image to your device.
4. If it boots, create the A/B deployment scheme
- (WORK IN PROGRESS)
Start with one of these guides:
#### tftp (network is required)
1. Put out/[RELEASE].img.gz and out/board.itb into your ftp server
```
setenv ipaddr 10.0.0.153
setenv serverip 10.0.0.129
tftp 0x80000000 board.itb
- [Flash over USB](docs/flashing-usb.md)
- [Flash over network / TFTP](docs/flashing-network.md)
setenv bootargs "console=ttyS0,115200 earlycon=uart8250,mmio,0x21c0500 root=/dev/ram0 rootwait rw"
bootm 0x80000000
```
2. Inside initramfs, run
```
flash-emmc.sh
```
USB flashing is usually the easiest path when you are setting up the device for the first time.
---
### Making sub stages
```bash
make build-base # The image the builds kernel and everything
make kernel # Builds our kernel from NXP
make initramfs
make itb # Builds out/board.itb (contains the kernel and the initramfs)
```
## First boot
## Architecture
- A/B deployment (So we can just do kubectl apply upgrade.yaml with version jumps. Agent will walk through it automatically)
- Read-only OS
The default configuration is intended to boot as a first-time Kubernetes control-plane node.
## Upgrade process
Rough idea
Default-style control-plane configuration looks like this:
```bash
./configure
# - asks for some config for kubelet
# - Join a cluster? Start a cluster?
make cluster-config \
MKS_HOSTNAME=monok8s-master \
MKS_CLUSTER_ROLE=control-plane \
MKS_INIT_CONTROL_PLANE=true \
MKS_MGMT_ADDRESS=10.0.0.10/24 \
MKS_APISERVER_ADVERTISE_ADDRESS=10.0.0.10
```
If you are just trying the image for the first time, start with the default control-plane setup. Worker-node setup is still incomplete.
For all available configuration values, see:
- [configs/cluster.env.default](configs/cluster.env.default)
For worker node
```
make cluster-config \
MKS_HOSTNAME=monok8s-worker \
MKS_CLUSTER_ROLE=worker \
MKS_INIT_CONTROL_PLANE=no \
MKS_MGMT_ADDRESS=10.0.0.10/24 \
MKS_APISERVER_ADVERTISE_ADDRESS=10.0.0.10 \
MKS_API_SERVER_ENDPOINT=10.0.0.1:6443 \
MKS_CNI_PLUGIN=none \
MKS_BOOTSTRAP_TOKEN=abcd12.ef3456789abcdef0 \
MKS_DISCOVERY_TOKEN_CA_CERT_HASH=sha256:9f1c2b3a4d5e6f7890abc1234567890abcdef1234567890abcdef1234567890ab
```
---
## Getting shell access
For first-time setup, UART is the most direct option because it is already part of the flashing process.
After the device is running, the recommended path is:
- [Install an SSH pod](docs/installing-ssh-pod.md)
---
## Upgrading
monok8s includes a Kubernetes-style OS upgrade flow using the `OSUpgrade` custom resource.
See:
- [OTA upgrade guide](docs/ota.md)
The currently tested upgrade chain is:
- `1.33.3 -> 1.33.10`
- `1.33.10 -> 1.34.6`
- `1.34.6 -> 1.35.3`
Tested worker node upgrade chain:
- `1.33.3 -> 1.34.1`
- `1.33.1 -> 1.35.3`
---
## Current status
This project is usable for experimenting with a single control-plane device image, but it is still a development project.
Working today:
- initramfs boot flow
- Alpine boot
- Kubernetes control-plane bootstrap
- default bridge CNI
- control-plane OS upgrade path
Still in progress:
- Kubernetes worker-node support
- Cilium support
- VPP/DPAA networking experiments
---
## Common build issue
### `chroot: failed to run command '/bin/sh': Exec format error`
This usually means the build host cannot run ARM64 binaries.
On Debian, install ARM64 emulation support:
```bash
sudo apt-get install -y qemu-user-static binfmt-support
```
Then run the build again:
```bash
make release
# Copy the new image to the upgrade-scheduler
kubectl cp -n kube-system upgrade-scheduler:/tmp/upgrade.img
# Upgrade scheduler reads the file that issue a self-reboot
reboot
# uboot to boot into partition B
```
```yaml
PENDING
```
---
## NOTES
### The device's dts files are located at here
https://github.com/we-are-mono/OpenWRT-ASK/tree/mono-25.12.0-rc3/target/linux/layerscape/files/arch/arm64/boot/dts/freescale
* We need both `mono-gateway-dk-sdk.dts` and `mono-gateway-dk.dts` since the sdk one includes the non-sdk one.
* The actual dts being used is the `mono-gateway-dk-sdk.dts`
## Notes
This is not a general-purpose Linux distribution. It is a device image for experimenting with Kubernetes on Mono's Gateway Development Kit.
The safest path is:
1. build the default image,
2. flash it,
3. boot the control-plane,
4. confirm Kubernetes is running,
5. customize only after the base image works.

View File

@@ -2,11 +2,12 @@
set -euo pipefail
source /utils.sh
/preload-k8s-images.sh || exit 1
mkdir -p "$ROOTFS/var/cache/apk"
mkdir -p "$ROOTFS/opt/monok8s/config"
mkdir -p "$ROOTFS/build"
export CTL_BIN_LAYER=$( skopeo inspect docker-daemon:localhost/monok8s/node-control:$TAG | jq -r '.Layers[0] | sub("^sha256:"; "")' )
mkdir -p \
"$ROOTFS/dev" \
"$ROOTFS/proc" \
@@ -14,7 +15,12 @@ mkdir -p \
"$ROOTFS/run" \
"$ROOTFS/data" \
"$ROOTFS/var" \
"$ROOTFS/tmp"
"$ROOTFS/tmp" \
"$ROOTFS/build" \
"$ROOTFS/var/cache/apk" \
"$ROOTFS/usr/lib/monok8s/crds" \
"$ROOTFS/usr/lib/monok8s/migrations.d/k8s" \
"$ROOTFS/opt/monok8s/config"
mount --bind /var/cache/apk "$ROOTFS/var/cache/apk"
mount --bind /dev "$ROOTFS/dev"
@@ -25,7 +31,16 @@ mount --bind /run "$ROOTFS/run"
cp /usr/bin/qemu-aarch64-static "$ROOTFS/usr/bin/"
cp /etc/resolv.conf "$ROOTFS/etc/resolv.conf"
cp /build/crio.tar.gz "$ROOTFS/build/"
cp -r /build/rootfs/* "$ROOTFS/"
cp /build/crds/*.yaml "$ROOTFS/usr/lib/monok8s/crds"
KUBE_MINOR=$(printf '%s\n' "$KUBE_VERSION" | sed -E 's/^v?([0-9]+\.[0-9]+).*/\1/')
MIG_SRC="/build/migrations/k8s/$KUBE_MINOR"
MIG_DST="$ROOTFS/usr/lib/monok8s/migrations.d/k8s/$KUBE_MINOR"
if [ -d "$MIG_SRC" ]; then
mkdir -p "$MIG_DST"
cp -a "$MIG_SRC"/. "$MIG_DST"/
fi
chroot "$ROOTFS" /bin/sh -c "ln -s /var/cache/apk /etc/apk/cache"
# chroot "$ROOTFS" /bin/sh -c "apk update"
@@ -47,55 +62,179 @@ umount "$ROOTFS/sys"
umount "$ROOTFS/run"
rm -r "$ROOTFS/build"
rm "$ROOTFS/etc/resolv.conf"
### Begin making full disk image for the device
echo "##################################################### Packaging RootFS "$( du -sh "$ROOTFS/" )
#!/bin/bash
set -euo pipefail
IMG=output.img
SIZE=8GB
ROOTFS="${ROOTFS:?ROOTFS is required}"
BOARD_ITB="${BOARD_ITB:-/build/board.itb}"
dd if=/dev/zero of="$IMG" bs=1 count=0 seek=$SIZE
IMG="${IMG:-output.img}"
DISK_SIZE="${DISK_SIZE:-8G}"
ROOTFS_IMG="${ROOTFS_IMG:-rootfs.ext4}"
ROOTFS_IMG_ZST="${ROOTFS_IMG_ZST:-rootfs.ext4.zst}"
ROOTFS_PART_SIZE_MIB="${ROOTFS_PART_SIZE_MIB:-2560}"
FAKE_DEV="/tmp/dev"
MNT_ROOTFS_IMG="/mnt/rootfs-img"
MNT_DATA="/mnt/data"
LOOP=""
ROOTFS_LOOP=""
TMP_LOOP=""
ROOTFS_TMP_LOOP=""
cleanup_fake_nodes() {
local prefix="$1"
[ -n "$prefix" ] || return 0
find "$FAKE_DEV" -maxdepth 1 -type b -name "${prefix}*" -exec rm -f {} \; 2>/dev/null || true
}
cleanup() {
set +e
mountpoint -q "$MNT_ROOTFS_IMG" && umount "$MNT_ROOTFS_IMG"
mountpoint -q "$MNT_DATA" && umount "$MNT_DATA"
if [ -n "$ROOTFS_LOOP" ]; then
losetup -d "$ROOTFS_LOOP" 2>/dev/null || true
fi
if [ -n "$LOOP" ]; then
losetup -d "$LOOP" 2>/dev/null || true
fi
if [ -n "$ROOTFS_LOOP" ]; then
cleanup_fake_nodes "$(basename "$ROOTFS_LOOP")"
fi
if [ -n "$LOOP" ]; then
cleanup_fake_nodes "$(basename "$LOOP")"
fi
}
trap cleanup EXIT
mkdir -p "$FAKE_DEV" "$MNT_ROOTFS_IMG" "$MNT_DATA"
echo "##################################################### Packaging RootFS $(du -sh "$ROOTFS" | awk '{print $1}')"
ensure_loop_ready
###############################################################################
# 1. Build reusable rootfs ext4 image once
###############################################################################
ROOTFS_BYTES=$(du -s -B1 "$ROOTFS" | awk '{print $1}')
EXTRA_BYTES=$((256 * 1024 * 1024))
IMG_BYTES=$(( ROOTFS_BYTES + ROOTFS_BYTES / 4 + EXTRA_BYTES ))
ALIGN=$((4 * 1024 * 1024))
IMG_BYTES=$(( (IMG_BYTES + ALIGN - 1) / ALIGN * ALIGN ))
MAX_BYTES=$(( ROOTFS_PART_SIZE_MIB * 1024 * 1024 ))
if [ "$IMG_BYTES" -ge "$MAX_BYTES" ]; then
echo "ERROR: estimated rootfs image size $IMG_BYTES exceeds slot size $MAX_BYTES" >&2
exit 1
fi
rm -f "$ROOTFS_IMG" "$ROOTFS_IMG_ZST"
truncate -s "$IMG_BYTES" "$ROOTFS_IMG"
mkfs.ext4 -F -L rootfs "$ROOTFS_IMG"
ROOTFS_LOOP=$(losetup --find --show -P "$ROOTFS_IMG")
/sync-loop.sh "$ROOTFS_LOOP"
# For a raw ext4 image there is usually no partition, so mount the loop device directly.
mount "$ROOTFS_LOOP" "$MNT_ROOTFS_IMG"
(
cd "$ROOTFS"
tar cpf - --exclude='./var' .
) | (
cd "$MNT_ROOTFS_IMG"
tar xpf -
)
mkdir -p "$MNT_ROOTFS_IMG/var"
mkdir -p "$MNT_ROOTFS_IMG/boot"
cp "$BOARD_ITB" "$MNT_ROOTFS_IMG/boot/kernel.itb"
sync
umount "$MNT_ROOTFS_IMG"
losetup -d "$ROOTFS_LOOP"
cleanup_fake_nodes "$(basename "$ROOTFS_LOOP")"
ROOTFS_LOOP=""
e2fsck -fy "$ROOTFS_IMG"
resize2fs -M "$ROOTFS_IMG"
e2fsck -fy "$ROOTFS_IMG"
echo "##################################################### Compressing OTA Image"
zstd -19 -T0 -f "$ROOTFS_IMG" -o "$ROOTFS_IMG_ZST"
tee >(wc -c > "${ROOTFS_IMG}.size") < "$ROOTFS_IMG" | sha256sum > "${ROOTFS_IMG}.sha256"
tee >(wc -c > "${ROOTFS_IMG_ZST}.size") < "$ROOTFS_IMG_ZST" | sha256sum > "${ROOTFS_IMG_ZST}.sha256"
cat > catalog.txt <<EOF
- version: $KUBE_VERSION
url: "monok8s-$KUBE_VERSION-$TAG.ext4.zst"
checksum: $(cut -d' ' -f1 < "${ROOTFS_IMG}.sha256")
size: $(< "${ROOTFS_IMG}.size")
zChecksum: $(cut -d' ' -f1 < "${ROOTFS_IMG_ZST}.sha256")
zSize: $(< "${ROOTFS_IMG_ZST}.size")
tag: "$TAG"
EOF
###############################################################################
# 2. Build full disk image
###############################################################################
rm -f "$IMG"
truncate -s "$DISK_SIZE" "$IMG"
sgdisk -o "$IMG" \
-n 1:2048:+64M -t 1:0700 -c 1:config \
-n 2:0:+2G -t 2:8300 -c 2:rootfsA \
-n 3:0:+2G -t 3:8300 -c 3:rootfsB \
-n 2:0:+2560M -t 2:8300 -c 2:rootfsA \
-n 3:0:+2560M -t 3:8300 -c 3:rootfsB \
-n 4:0:0 -t 4:8300 -c 4:data
losetup -D
LOOP=$(losetup --find --show -P "$IMG")
/sync-loop.sh "$LOOP"
TMP_LOOP="/tmp$LOOP"
TMP_LOOP="$FAKE_DEV/$(basename "$LOOP")"
mkfs.vfat -F 32 -n MONOK8S_CFG "${TMP_LOOP}p1"
mkfs.ext4 -F "${TMP_LOOP}p2"
mkfs.ext4 -F "${TMP_LOOP}p3"
mkfs.ext4 -F "${TMP_LOOP}p4"
mkfs.ext4 -F -L rootfsB "${TMP_LOOP}p3"
mkfs.ext4 -F -L data "${TMP_LOOP}p4"
mkdir -p /mnt/img-root /mnt/data
dd if="$ROOTFS_IMG" of="${TMP_LOOP}p2" bs=4M conv=fsync
mount "${TMP_LOOP}p2" /mnt/img-root
mount "${TMP_LOOP}p4" /mnt/data
# Grow each filesystem to fill its partition
e2fsck -fy "${TMP_LOOP}p2"
resize2fs "${TMP_LOOP}p2"
# Put the real /var onto the data partition
cp -a "$ROOTFS/var" /mnt/data/
mkdir -p /mnt/data/etc-overlay/work
mkdir -p /mnt/data/etc-overlay/upper
# Populate data partition
mount "${TMP_LOOP}p4" "$MNT_DATA"
# Copy rootfs to root partition, but exclude /var
cp -a "$ROOTFS"/. /mnt/img-root/
rm -rf /mnt/img-root/var
mkdir -p /mnt/img-root/var
mkdir -p /mnt/img-root/boot
cp /build/board.itb /mnt/img-root/boot/kernel.itb
cp -a "$ROOTFS/var" "$MNT_DATA/"
mkdir -p "$MNT_DATA/etc-overlay/work"
mkdir -p "$MNT_DATA/etc-overlay/upper"
sync
umount /mnt/img-root
umount /mnt/data
umount "$MNT_DATA"
losetup -d "$LOOP"
cleanup_fake_nodes "$(basename "$LOOP")"
LOOP=""
echo "##################################################### Compressing Image"
echo "Built artifacts:"
echo " Full disk image: $IMG"
echo " Rootfs OTA image: $ROOTFS_IMG"
echo " Rootfs OTA compressed: $ROOTFS_IMG_ZST"
echo "##################################################### Compressing Full Disk Image"
gzip "/build/$IMG"

View File

@@ -7,9 +7,9 @@ rc-update add devfs sysinit
rc-update add procfs sysinit
rc-update add sysfs sysinit
rc-update add cgroups sysinit
rc-update add hwclock boot
rc-update add fancontrol boot
rc-update add loopback boot
rc-update add hostname boot
rc-update add localmount boot
rc-update add apply-node-config default
rc-update add bootstrap-cluster default
rc-update add local default

View File

@@ -4,13 +4,14 @@ cd /build
echo "##################################################### Installing basic packages"
apk add alpine-base \
openrc busybox-openrc bash nftables \
lm-sensors lm-sensors-fancontrol lm-sensors-fancontrol-openrc e2fsprogs
openrc busybox-openrc bash nftables nmap-ncat \
lm-sensors lm-sensors-fancontrol lm-sensors-fancontrol-openrc e2fsprogs u-boot-tools
# For diagnotics
apk add \
iproute2 iproute2-ss curl bind-tools procps strace tcpdump lsof jq binutils \
openssl nftables conntrack-tools ethtool findmnt kmod coreutils util-linux
openssl conntrack-tools ethtool findmnt kmod coreutils util-linux zstd libcap-utils \
iotop sysstat
echo '[ -x /bin/bash ] && exec /bin/bash -l' >> "/root/.profile"
# Compat layer for kubelet for now. Will look into building it myself later. If needed
@@ -31,9 +32,20 @@ if [ $? -ne 0 ]; then
exit $?
fi
sed -i "s/default_runtime = \"crun\"/\0\ncgroup_manager = \"cgroupfs\"/g" /etc/crio/crio.conf.d/10-crio.conf
sed -i -e "s/default_runtime = \"crun\"/\0\ncgroup_manager = \"cgroupfs\"/g" \
/etc/crio/crio.conf.d/10-crio.conf
grep cgroup_manager /etc/crio/crio.conf.d/10-crio.conf || exit 1
echo "##################################################### Installing Control Agent"
CTL_BIN=$(find /usr/lib/monok8s/imagestore -name "ctl" | grep "$CTL_BIN_LAYER")
if [ -z "$CTL_BIN" ]; then
echo "Unable to locate the control agent binary"
exit 1
fi
ln -s "$CTL_BIN" /usr/local/bin/ctl
ctl version
mkdir -p /var/run/crio
mkdir -p /var/lib/containers/storage
mkdir -p /var/lib/cni

View File

@@ -0,0 +1,35 @@
#!/bin/sh
set -eu
# Kubernetes removed the kubelet flag:
# --pod-infra-container-image
#
# Timeline:
# - Deprecated before v1.27
# - Removed in newer kubelet versions (>=1.27+)
# - kubeadm may still write it into:
# /var/lib/kubelet/kubeadm-flags.env
#
# This causes kubelet to fail with:
# "unknown flag: --pod-infra-container-image"
#
# References:
# - https://github.com/kubernetes/kubeadm/issues/3281
# - https://github.com/kubernetes/kubernetes/pull/122739
# - https://kubernetes.io/blog/2022/04/07/upcoming-changes-in-kubernetes-1-24/
#
# Root cause:
# - Sandbox (pause) image is now managed by CRI (containerd/CRI-O),
# not kubelet flags.
#
# Fix:
# - Strip the flag from kubeadm-flags.env during upgrade
FILE=/var/lib/kubelet/kubeadm-flags.env
[ -f "$FILE" ] || exit 0
grep -q -- '--pod-infra-container-image=' "$FILE" || exit 0
sed -i 's/ --pod-infra-container-image=[^"]*//g' "$FILE"
echo "Removed deprecated kubelet flag --pod-infra-container-image from $FILE"

View File

@@ -18,6 +18,17 @@ STORAGE_DRIVER="${STORAGE_DRIVER:-overlay}"
# Optional. Needed by many overlay-in-container setups.
FUSE_OVERLAYFS="${FUSE_OVERLAYFS:-/usr/bin/fuse-overlayfs}"
# Optional extra images to include in addition to kubeadm's required images.
# Example:
# EXTRA_IMAGES=(
# "docker.io/library/busybox:1.36"
# "quay.io/cilium/cilium:v1.17.2"
# )
EXTRA_IMAGES=(
"${EXTRA_IMAGES[@]:-}"
"docker-daemon:localhost/monok8s/node-control:$TAG"
)
# Keep archive cache version/arch scoped so downloads do not get mixed.
ARCHIVE_DIR="${KUBE_IMG_CACHE}/archives/${ARCH}/${KUBE_VERSION}"
@@ -70,48 +81,96 @@ fi
export CONTAINERS_STORAGE_CONF="${STORAGE_CONF}"
# Sanity check: list required images using the target kubeadm binary.
mapfile -t kube_images < <(
# Get kubeadm-required images.
mapfile -t kubeadm_images < <(
qemu-aarch64-static \
"${ROOTFS}/usr/local/bin/kubeadm" \
config images list \
--kubernetes-version "${KUBE_VERSION}"
)
if [ "${#kube_images[@]}" -eq 0 ]; then
echo "No kubeadm images returned"
if [ "${#kubeadm_images[@]}" -eq 0 ] && [ "${#EXTRA_IMAGES[@]}" -eq 0 ]; then
echo "No images to seed"
exit 1
fi
for img in "${kube_images[@]}"; do
# Merge kubeadm images + EXTRA_IMAGES, preserving order and removing duplicates.
images=()
declare -A seen=()
for img in "${kubeadm_images[@]}" "${EXTRA_IMAGES[@]}"; do
[ -n "${img}" ] || continue
if [ -z "${seen[$img]+x}" ]; then
images+=("${img}")
seen["$img"]=1
fi
done
echo "Images to seed:"
for img in "${images[@]}"; do
echo " ${img}"
done
echo
for img in "${images[@]}"; do
[ -n "${img}" ] || continue
safe_name="$(printf '%s' "${img}" | sed 's#/#_#g; s#:#__#g')"
case "$img" in
docker-daemon:*)
SRC="$img"
IMG_REF="${img#docker-daemon:}"
USE_ARCHIVE=0
;;
docker://*)
SRC="$img"
IMG_REF="${img#docker://}"
USE_ARCHIVE=1
;;
oci-archive:*)
SRC="$img"
IMG_REF="${img#oci-archive:}"
USE_ARCHIVE=0
;;
*)
# Default behavior for non-prefixed refs:
# treat as remote registry image.
SRC="docker://${img}"
IMG_REF="$img"
USE_ARCHIVE=1
;;
esac
safe_name="$(printf '%s' "${IMG_REF}" | sed 's#/#_#g; s#:#__#g')"
archive="${ARCHIVE_DIR}/${safe_name}.tar"
if [ ! -f "${archive}" ]; then
echo "Caching archive: ${img}"
skopeo copy \
--override-os "${OS}" \
--override-arch "${ARCH}" \
"docker://${img}" \
"oci-archive:${archive}"
if [ "${USE_ARCHIVE}" = "1" ]; then
if [ ! -f "${archive}" ]; then
echo "Caching archive: ${IMG_REF}"
skopeo copy \
--override-os "${OS}" \
--override-arch "${ARCH}" \
"${SRC}" \
"oci-archive:${archive}"
else
echo "Archive hit: ${IMG_REF}"
fi
SRC_FINAL="oci-archive:${archive}"
else
echo "Archive hit: ${img}"
echo "Using direct source: ${SRC}"
SRC_FINAL="${SRC}"
fi
# If already present in the target containers/storage, skip.
if skopeo inspect "containers-storage:${img}" >/dev/null 2>&1; then
echo "Store hit: ${img}"
if skopeo inspect "containers-storage:${IMG_REF}" >/dev/null 2>&1; then
echo "Store hit: ${IMG_REF}"
continue
fi
echo "Seeding CRI-O store: ${img}"
echo "Seeding CRI-O store: ${IMG_REF}"
skopeo copy \
--override-os "${OS}" \
--override-arch "${ARCH}" \
"oci-archive:${archive}" \
"containers-storage:${img}"
"${SRC_FINAL}" \
"containers-storage:${IMG_REF}"
done
echo

View File

@@ -1,9 +1 @@
[storage]
driver = "overlay"
runroot = "/run/containers/storage"
graphroot = "/var/lib/containers/storage"
[storage.options]
additionalimagestores = [
"/usr/lib/monok8s/imagestore"
]
# Generated file. DO NOT MODIFY.

View File

@@ -0,0 +1,4 @@
[crio.runtime]
default_sysctls = [
"net.ipv4.ip_unprivileged_port_start=20"
]

View File

@@ -1,23 +0,0 @@
#!/sbin/openrc-run
export PATH="/usr/local/bin:/usr/local/sbin:$PATH"
name="Apply node config"
description="Apply node configurations using node.env from /opt/monok8s/config"
command="/opt/scripts/apply-node-config.sh"
LOG_DIR="/var/log/monok8s"
LOG_FILE="$LOG_DIR/apply-node-config.log"
depend() {
need localmount
}
start() {
checkpath --directory "$LOG_DIR"
ebegin "Applying node config"
"$command" >>"$LOG_FILE" 2>&1
eend $?
}

View File

@@ -1,24 +0,0 @@
#!/sbin/openrc-run
export PATH="/usr/local/bin:/usr/local/sbin:$PATH"
name="Bootstrap cluster"
description="Apply cluster configurations using node.env from /opt/monok8s/config"
command="/opt/scripts/bootstrap-cluster.sh"
LOG_DIR="/var/log/monok8s"
LOG_FILE="$LOG_DIR/bootstrap.log"
depend() {
need apply-node-config
use net
}
start() {
checkpath --directory "$LOG_DIR"
ebegin "Applying cluster config"
"$command" >>"$LOG_FILE" 2>&1 &
eend $?
}

View File

@@ -0,0 +1,41 @@
#!/bin/sh
set -eu
exec >>/var/log/monok8s/boot.log 2>&1
export PATH="/usr/local/bin:/usr/local/sbin:$PATH"
MIGRATIONS_LIB=/usr/lib/monok8s/lib/migrations.sh
CONFIG_DIR=/opt/monok8s/config
BOOT_STATE=/run/monok8s/boot-state.env
BOOTPART_FILE="$CONFIG_DIR/.bootpart"
MIGRATION_STATE_DIR="$CONFIG_DIR/migration-state"
mkdir -p /dev/hugepages
mountpoint -q /dev/hugepages || mount -t hugetlbfs none /dev/hugepages
echo 256 > /proc/sys/vm/nr_hugepages
CUR=$(grep '^BOOT_PART=' "$BOOT_STATE" | cut -d= -f2-)
LAST=$(cat "$BOOTPART_FILE" 2>/dev/null || true)
slot_changed=0
if [ "$CUR" != "$LAST" ]; then
slot_changed=1
echo "Slot changed ($LAST -> $CUR)"
fi
# shellcheck source=/dev/null
. "$MIGRATIONS_LIB"
if [ "$slot_changed" -eq 1 ]; then
monok8s_cleanup_runtime_state
fi
K8S_MINOR="$(monok8s_detect_k8s_minor || true)"
if [ -n "$K8S_MINOR" ]; then
monok8s_run_migration_dir \
"/usr/lib/monok8s/migrations.d/k8s/$K8S_MINOR" \
"$MIGRATION_STATE_DIR/k8s/$K8S_MINOR"
fi
/usr/lib/monok8s/lib/supervised-init.sh &

View File

@@ -1,149 +0,0 @@
#!/bin/bash
set -euo pipefail
CONFIG_DIR="${CONFIG_DIR:-/opt/monok8s/config}"
NODE_ENV="${NODE_ENV:-$CONFIG_DIR/node.env}"
log() {
echo "[monok8s-node] $*"
}
fail() {
echo "[monok8s-node] ERROR: $*" >&2
exit 1
}
need_cmd() {
command -v "$1" >/dev/null 2>&1 || fail "missing required command: $1"
}
require_file() {
[ -f "$1" ] || fail "required file not found: $1"
}
load_config() {
require_file "$NODE_ENV"
# shellcheck disable=SC1090
. "$NODE_ENV"
HOSTNAME="${HOSTNAME:-}"
MGMT_IFACE="${MGMT_IFACE:-}"
MGMT_ADDRESS="${MGMT_ADDRESS:-}"
MGMT_GATEWAY="${MGMT_GATEWAY:-}"
}
validate_config() {
[ -n "$HOSTNAME" ] || fail "HOSTNAME is required"
[ -n "$MGMT_IFACE" ] || fail "MGMT_IFACE is required"
[ -n "$MGMT_ADDRESS" ] || fail "MGMT_ADDRESS is required"
case "$MGMT_ADDRESS" in
*/*)
;;
*)
fail "MGMT_ADDRESS must include a CIDR prefix, example: 10.0.0.13/24"
;;
esac
}
check_prereqs() {
need_cmd ip
need_cmd hostname
need_cmd grep
need_cmd awk
need_cmd cut
need_cmd mkdir
need_cmd printf
need_cmd cat
}
configure_mgmt_interface() {
local addr_ip
ip link show "$MGMT_IFACE" >/dev/null 2>&1 || fail "interface not found: $MGMT_IFACE"
log "bringing up interface: $MGMT_IFACE"
ip link set "$MGMT_IFACE" up
addr_ip="${MGMT_ADDRESS%/*}"
if ip -o addr show dev "$MGMT_IFACE" | awk '{print $4}' | cut -d/ -f1 | grep -Fx "$addr_ip" >/dev/null 2>&1; then
log "address already present on $MGMT_IFACE: $MGMT_ADDRESS"
else
log "assigning $MGMT_ADDRESS to $MGMT_IFACE"
ip addr add "$MGMT_ADDRESS" dev "$MGMT_IFACE"
fi
if [ -n "${MGMT_GATEWAY:-}" ]; then
log "ensuring default route via $MGMT_GATEWAY"
ip route replace default via "$MGMT_GATEWAY" dev "$MGMT_IFACE"
fi
}
set_hostname_if_needed() {
local current_hostname
current_hostname="$(hostname 2>/dev/null || true)"
if [ "$current_hostname" != "$HOSTNAME" ]; then
log "setting hostname to $HOSTNAME"
hostname "$HOSTNAME"
mkdir -p /etc
printf '%s\n' "$HOSTNAME" > /etc/hostname
if [ -f /etc/hosts ]; then
if ! grep -Eq "[[:space:]]$HOSTNAME([[:space:]]|$)" /etc/hosts; then
printf '127.0.0.1\tlocalhost %s\n' "$HOSTNAME" >> /etc/hosts
fi
else
cat > /etc/hosts <<EOF
127.0.0.1 localhost $HOSTNAME
EOF
fi
else
log "hostname already set: $HOSTNAME"
fi
}
ensure_ip_forward() {
local current
current="$(cat /proc/sys/net/ipv4/ip_forward 2>/dev/null || echo 0)"
if [ "$current" != "1" ]; then
log "enabling IPv4 forwarding"
echo 1 > /proc/sys/net/ipv4/ip_forward
fi
mkdir -p /etc/sysctl.d
cat > /etc/sysctl.d/99-monok8s.conf <<'EOF'
net.ipv4.ip_forward = 1
EOF
}
print_summary() {
log "node configuration applied"
log "hostname: $HOSTNAME"
log "interface: $MGMT_IFACE"
log "address: $MGMT_ADDRESS"
if [ -n "${MGMT_GATEWAY:-}" ]; then
log "gateway: $MGMT_GATEWAY"
else
log "gateway: <not set>"
fi
}
main() {
load_config
validate_config
check_prereqs
ensure_ip_forward
configure_mgmt_interface
set_hostname_if_needed
print_summary
}
main "$@"

View File

@@ -1,579 +0,0 @@
#!/bin/sh
set -eu
CONFIG_DIR="${CONFIG_DIR:-/opt/monok8s/config}"
CLUSTER_ENV="${CONFIG_DIR}/cluster.env"
KUBEADM_CONFIG_OUT="${KUBEADM_CONFIG_OUT:-/tmp/kubeadm-init.yaml}"
log() {
echo "[monok8s] $*"
}
fail() {
echo "[monok8s] ERROR: $*" >&2
exit 1
}
need_cmd() {
command -v "$1" >/dev/null 2>&1 || fail "missing required command: $1"
}
require_file() {
[ -f "$1" ] || fail "required file not found: $1"
}
load_config() {
require_file "$CLUSTER_ENV"
# shellcheck disable=SC1090
. "$CLUSTER_ENV"
: "${KUBERNETES_VERSION:?KUBERNETES_VERSION is required}"
: "${NODE_NAME:?NODE_NAME is required}"
: "${APISERVER_ADVERTISE_ADDRESS:?APISERVER_ADVERTISE_ADDRESS is required}"
POD_SUBNET="${POD_SUBNET:-10.244.0.0/16}"
SERVICE_SUBNET="${SERVICE_SUBNET:-10.96.0.0/12}"
CLUSTER_NAME="${CLUSTER_NAME:-monok8s}"
CLUSTER_DOMAIN="${CLUSTER_DOMAIN:-cluster.local}"
CONTAINER_RUNTIME_ENDPOINT="${CONTAINER_RUNTIME_ENDPOINT:-unix:///var/run/crio/crio.sock}"
SANS="${SANS:-}"
ALLOW_SCHEDULING_ON_CONTROL_PLANE="${ALLOW_SCHEDULING_ON_CONTROL_PLANE:-yes}"
SKIP_IMAGE_CHECK="${SKIP_IMAGE_CHECK:-no}"
KUBECONFIG_USER_HOME="${KUBECONFIG_USER_HOME:-/root}"
BOOTSTRAP_MODE="${BOOTSTRAP_MODE:-init}"
JOIN_KIND="${JOIN_KIND:-worker}"
API_SERVER_ENDPOINT="${API_SERVER_ENDPOINT:-}"
BOOTSTRAP_TOKEN="${BOOTSTRAP_TOKEN:-}"
DISCOVERY_TOKEN_CA_CERT_HASH="${DISCOVERY_TOKEN_CA_CERT_HASH:-}"
CONTROL_PLANE_CERT_KEY="${CONTROL_PLANE_CERT_KEY:-}"
CNI_PLUGIN="${CNI_PLUGIN:-none}"
}
validate_config() {
case "$BOOTSTRAP_MODE" in
init)
;;
join)
: "${API_SERVER_ENDPOINT:?API_SERVER_ENDPOINT is required for join mode}"
: "${BOOTSTRAP_TOKEN:?BOOTSTRAP_TOKEN is required for join mode}"
: "${DISCOVERY_TOKEN_CA_CERT_HASH:?DISCOVERY_TOKEN_CA_CERT_HASH is required for join mode}"
case "$JOIN_KIND" in
worker|control-plane)
;;
*)
fail "JOIN_KIND must be 'worker' or 'control-plane'"
;;
esac
if [ "$JOIN_KIND" = "control-plane" ]; then
: "${CONTROL_PLANE_CERT_KEY:?CONTROL_PLANE_CERT_KEY is required for JOIN_KIND=control-plane}"
fi
;;
*)
fail "BOOTSTRAP_MODE must be 'init' or 'join'"
;;
esac
}
normalize_version() {
# strip leading "v"
echo "${1#v}"
}
version_major_minor() {
normalize_version "$1" | awk -F. '{ print $1 "." $2 }'
}
version_eq() {
[ "$(normalize_version "$1")" = "$(normalize_version "$2")" ]
}
version_lt() {
[ "$(printf '%s\n%s\n' "$(normalize_version "$1")" "$(normalize_version "$2")" | sort -V | head -n1)" != "$(normalize_version "$2")" ]
}
version_gt() {
[ "$(printf '%s\n%s\n' "$(normalize_version "$1")" "$(normalize_version "$2")" | sort -V | tail -n1)" = "$(normalize_version "$1")" ] \
&& ! version_eq "$1" "$2"
}
minor_diff() {
a="$(version_major_minor "$1")"
b="$(version_major_minor "$2")"
a_major="${a%.*}"
a_minor="${a#*.}"
b_major="${b%.*}"
b_minor="${b#*.}"
[ "$a_major" = "$b_major" ] || fail "major version change unsupported here: $1 -> $2"
echo $((b_minor - a_minor))
}
get_kubeadm_binary_version() {
kubeadm version -o short
}
get_cluster_server_version() {
kubectl --kubeconfig /etc/kubernetes/admin.conf version -o yaml \
| awk '
$1 == "serverVersion:" { in_server=1; next }
in_server && $1 == "gitVersion:" { print $2; exit }
'
}
get_api_server_version_from_kubelet_kubeconfig() {
kubectl --kubeconfig /etc/kubernetes/kubelet.conf version -o yaml \
| awk '
$1 == "serverVersion:" { in_server=1; next }
in_server && $1 == "gitVersion:" { print $2; exit }
'
}
validate_target_matches_local_binaries() {
kubeadm_ver="$(get_kubeadm_binary_version)"
if ! version_eq "$kubeadm_ver" "$KUBERNETES_VERSION"; then
fail "kubeadm binary version ($kubeadm_ver) does not match target KUBERNETES_VERSION ($KUBERNETES_VERSION)"
fi
}
decide_bootstrap_action() {
case "$BOOTSTRAP_MODE" in
init)
if [ -f /etc/kubernetes/admin.conf ]; then
BOOTSTRAP_ACTION="upgrade-control-plane"
else
BOOTSTRAP_ACTION="init"
fi
;;
join)
if [ -f /etc/kubernetes/kubelet.conf ]; then
BOOTSTRAP_ACTION="upgrade-node"
else
BOOTSTRAP_ACTION="join"
fi
;;
*)
fail "unsupported BOOTSTRAP_MODE: $BOOTSTRAP_MODE"
;;
esac
log "selected bootstrap action: $BOOTSTRAP_ACTION"
}
validate_upgrade_path() {
current="$1"
target="$2"
if version_eq "$current" "$target"; then
log "cluster is already at target version: $target"
return 0
fi
if version_gt "$current" "$target"; then
fail "downgrade is not supported: current=$current target=$target"
fi
diff="$(minor_diff "$current" "$target")"
case "$diff" in
0|1)
;;
*)
fail "unsupported upgrade path: current=$current target=$target (minor skip too large)"
;;
esac
}
check_upgrade_prereqs() {
validate_target_matches_local_binaries
}
run_kubeadm_upgrade_apply() {
current_version="$(get_cluster_server_version)"
log "current control-plane version: $current_version"
log "target control-plane version: $KUBERNETES_VERSION"
validate_upgrade_path "$current_version" "$KUBERNETES_VERSION"
if version_eq "$current_version" "$KUBERNETES_VERSION"; then
log "control-plane already at target version; skipping kubeadm upgrade apply"
return 0
fi
log "running kubeadm upgrade plan..."
kubeadm upgrade plan "$KUBERNETES_VERSION"
log "running kubeadm upgrade apply..."
kubeadm upgrade apply -y "$KUBERNETES_VERSION"
}
run_kubeadm_upgrade_node() {
cluster_version="$(get_api_server_version_from_kubelet_kubeconfig)"
log "cluster/control-plane version visible from this node: $cluster_version"
log "target node version: $KUBERNETES_VERSION"
if ! version_eq "$cluster_version" "$KUBERNETES_VERSION"; then
fail "control-plane version ($cluster_version) does not match target ($KUBERNETES_VERSION); upgrade control-plane first"
fi
log "running kubeadm upgrade node..."
kubeadm upgrade node
}
check_prereqs() {
need_cmd kubeadm
need_cmd kubelet
need_cmd kubectl
need_cmd crictl
need_cmd rc-service
need_cmd awk
need_cmd ip
need_cmd grep
need_cmd sed
need_cmd hostname
}
check_apiserver_reachable() {
host="${API_SERVER_ENDPOINT%:*}"
port="${API_SERVER_ENDPOINT##*:}"
need_cmd nc
log "checking API server reachability: ${host}:${port}"
for _ in $(seq 1 20); do
if nc -z "$host" "$port" >/dev/null 2>&1; then
log "API server is reachable"
return 0
fi
sleep 1
done
fail "cannot reach API server at ${host}:${port}"
}
start_crio() {
rc-service crio start
}
check_crio_running() {
log "waiting for CRI-O to become ready..."
last_status="unknown"
for _ in $(seq 1 30); do
if rc-service crio status >/dev/null 2>&1; then
last_status="service-running"
if crictl --runtime-endpoint "$CONTAINER_RUNTIME_ENDPOINT" info >/dev/null 2>&1; then
log "CRI-O is up"
return 0
fi
last_status="service-running-but-runtime-not-ready"
else
last_status="service-not-running"
fi
sleep 1
done
fail "CRI-O did not become ready in time (${last_status})"
}
image_present() {
wanted="$1"
repo="${wanted%:*}"
tag="${wanted##*:}"
crictl --runtime-endpoint "$CONTAINER_RUNTIME_ENDPOINT" images \
| awk 'NR>1 { print $1 ":" $2 }' \
| grep -Fx "$repo:$tag" >/dev/null 2>&1
}
check_required_images() {
[ "$SKIP_IMAGE_CHECK" = "yes" ] && {
log "skipping image check (SKIP_IMAGE_CHECK=yes)"
return 0
}
log "checking required Kubernetes images for $KUBERNETES_VERSION..."
missing_any=0
for img in $(kubeadm config images list --kubernetes-version "$KUBERNETES_VERSION"); do
if image_present "$img"; then
log "found image: $img"
else
echo "[monok8s] MISSING image: $img" >&2
missing_any=1
fi
done
[ "$missing_any" -eq 0 ] || fail "preload the Kubernetes images before bootstrapping"
log "all required images are present"
}
check_not_already_bootstrapped() {
case "$BOOTSTRAP_MODE" in
init)
if [ -f /etc/kubernetes/admin.conf ]; then
fail "cluster already appears initialized (/etc/kubernetes/admin.conf exists)"
fi
;;
join)
if [ -f /etc/kubernetes/kubelet.conf ]; then
fail "node already appears joined (/etc/kubernetes/kubelet.conf exists)"
fi
;;
esac
}
run_kubeadm_join() {
log "running kubeadm join..."
case "$JOIN_KIND" in
worker)
kubeadm join "${API_SERVER_ENDPOINT}" \
--token "${BOOTSTRAP_TOKEN}" \
--discovery-token-ca-cert-hash "${DISCOVERY_TOKEN_CA_CERT_HASH}" \
--node-name "${NODE_NAME}" \
--cri-socket "${CONTAINER_RUNTIME_ENDPOINT}"
;;
control-plane)
kubeadm join "${API_SERVER_ENDPOINT}" \
--token "${BOOTSTRAP_TOKEN}" \
--discovery-token-ca-cert-hash "${DISCOVERY_TOKEN_CA_CERT_HASH}" \
--control-plane \
--certificate-key "${CONTROL_PLANE_CERT_KEY}" \
--apiserver-advertise-address "${APISERVER_ADVERTISE_ADDRESS}" \
--node-name "${NODE_NAME}" \
--cri-socket "${CONTAINER_RUNTIME_ENDPOINT}"
;;
esac
}
generate_kubeadm_config() {
log "generating kubeadm config at $KUBEADM_CONFIG_OUT..."
SAN_LINES=""
if [ -n "${SANS:-}" ]; then
old_ifs="$IFS"
IFS=','
for san in $SANS; do
san_trimmed="$(echo "$san" | sed 's/^ *//;s/ *$//')"
[ -n "$san_trimmed" ] && SAN_LINES="${SAN_LINES} - \"${san_trimmed}\"
"
done
IFS="$old_ifs"
fi
cat > "$KUBEADM_CONFIG_OUT" <<EOF
apiVersion: kubeadm.k8s.io/v1beta4
kind: InitConfiguration
localAPIEndpoint:
advertiseAddress: ${APISERVER_ADVERTISE_ADDRESS}
bindPort: 6443
nodeRegistration:
name: ${NODE_NAME}
criSocket: ${CONTAINER_RUNTIME_ENDPOINT}
imagePullPolicy: IfNotPresent
kubeletExtraArgs:
- name: hostname-override
value: "${NODE_NAME}"
- name: node-ip
value: "${APISERVER_ADVERTISE_ADDRESS}"
- name: pod-manifest-path
value: "/etc/kubernetes/manifests"
---
apiVersion: kubeadm.k8s.io/v1beta4
kind: ClusterConfiguration
clusterName: ${CLUSTER_NAME}
kubernetesVersion: ${KUBERNETES_VERSION}
networking:
podSubnet: ${POD_SUBNET}
serviceSubnet: ${SERVICE_SUBNET}
dnsDomain: ${CLUSTER_DOMAIN}
apiServer:
certSANs:
- "${APISERVER_ADVERTISE_ADDRESS}"
${SAN_LINES}---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
cgroupDriver: cgroupfs
containerRuntimeEndpoint: ${CONTAINER_RUNTIME_ENDPOINT}
EOF
}
run_kubeadm_init() {
log "running kubeadm init..."
kubeadm init --config "$KUBEADM_CONFIG_OUT"
}
require_local_ip() {
wanted_ip="$1"
ip -o addr show | awk '{print $4}' | cut -d/ -f1 | grep -Fx "$wanted_ip" >/dev/null 2>&1 \
|| fail "required local IP is not present on any interface: $wanted_ip"
}
validate_network_requirements() {
case "$BOOTSTRAP_MODE" in
init)
require_local_ip "$APISERVER_ADVERTISE_ADDRESS"
;;
join)
require_local_ip "$APISERVER_ADVERTISE_ADDRESS"
check_apiserver_reachable
;;
*)
fail "unsupported BOOTSTRAP_MODE: $BOOTSTRAP_MODE"
;;
esac
}
wait_for_node() {
log "waiting for node registration: $NODE_NAME"
for _ in $(seq 1 60); do
if kubectl --kubeconfig /etc/kubernetes/admin.conf get node "$NODE_NAME" >/dev/null 2>&1; then
return 0
fi
sleep 1
done
fail "node $NODE_NAME did not register in time"
}
apply_local_node_metadata_if_possible() {
if [ "$BOOTSTRAP_MODE" != "init" ]; then
log "skipping node labels/annotations from this node (not control-plane init mode)"
return 0
fi
wait_for_node
if [ -n "${NODE_ANNOTATIONS:-}" ]; then
kubectl --kubeconfig /etc/kubernetes/admin.conf annotate node "$NODE_NAME" $(printf '%s' "$NODE_ANNOTATIONS" | tr ',' ' ') --overwrite
fi
if [ -n "${NODE_LABELS:-}" ]; then
kubectl --kubeconfig /etc/kubernetes/admin.conf label node "$NODE_NAME" $(printf '%s' "$NODE_LABELS" | tr ',' ' ') --overwrite
fi
}
install_cni_if_requested() {
case "${CNI_PLUGIN}" in
none)
if [ -f /etc/cni/net.d/10-crio-bridge.conflist ]; then
mv /etc/cni/net.d/10-crio-bridge.conflist \
/etc/cni/net.d/10-crio-bridge.conflist.disabled
fi
log "bootstrap bridge CNI disabled; install a cluster CNI (e.g., flannel) for pod networking"
;;
bridge)
if [ -f /etc/cni/net.d/10-crio-bridge.conflist.disabled ]; then
mv /etc/cni/net.d/10-crio-bridge.conflist.disabled \
/etc/cni/net.d/10-crio-bridge.conflist
fi
log "bootstrap bridge CNI enabled"
;;
*)
fail "unsupported CNI_PLUGIN: ${CNI_PLUGIN}"
;;
esac
}
allow_single_node_scheduling() {
if [ "$ALLOW_SCHEDULING_ON_CONTROL_PLANE" != "yes" ]; then
log "leaving control-plane taint in place"
return 0
fi
log "removing control-plane taint so this single node can schedule workloads..."
kubectl --kubeconfig /etc/kubernetes/admin.conf taint nodes "$NODE_NAME" node-role.kubernetes.io/control-plane- >/dev/null 2>&1 || true
}
print_next_steps() {
echo
echo "[monok8s] bootstrap complete"
echo
case "$BOOTSTRAP_MODE" in
init)
cat <<EOF
Try these now:
export KUBECONFIG=/root/.kube/config
kubectl get nodes -o wide
kubectl describe nodes
Notes:
- On a fresh cluster without a CNI, nodes may stay NotReady.
- If you want pods to run on this same node, keep ALLOW_SCHEDULING_ON_CONTROL_PLANE=yes.
EOF
;;
join)
cat <<EOF
This node has attempted to join the cluster.
Check from the control-plane node:
kubectl get nodes -o wide
kubectl describe node ${NODE_NAME}
Notes:
- This node can join without a CNI.
- Without a cluster CNI, the node may remain NotReady.
EOF
;;
esac
}
main() {
load_config
validate_config
check_prereqs
validate_network_requirements
decide_bootstrap_action
install_cni_if_requested
start_crio
check_crio_running
case "$BOOTSTRAP_ACTION" in
init)
check_required_images
generate_kubeadm_config
run_kubeadm_init
rc-service kubelet restart
apply_local_node_metadata_if_possible
allow_single_node_scheduling
;;
upgrade-control-plane)
check_upgrade_prereqs
check_required_images
run_kubeadm_upgrade_apply
rc-service kubelet restart
apply_local_node_metadata_if_possible
allow_single_node_scheduling
;;
join)
run_kubeadm_join
;;
upgrade-node)
check_upgrade_prereqs
run_kubeadm_upgrade_node
rc-service kubelet restart
;;
*)
fail "unsupported BOOTSTRAP_ACTION: $BOOTSTRAP_ACTION"
;;
esac
print_next_steps
}
main "$@"

View File

@@ -0,0 +1,58 @@
#!/bin/sh
monok8s_detect_k8s_minor() {
local env_file=${1:-}
local version major_minor
version=$(/usr/local/bin/ctl version -k 2>/dev/null || true)
[ -n "$version" ] || return 1
version=${version#v}
major_minor=$(printf '%s\n' "$version" | cut -d. -f1,2)
[ -n "$major_minor" ] || return 1
printf '%s\n' "$major_minor"
}
monok8s_cleanup_runtime_state() {
echo "Cleaning runtime state"
rm -rf \
/var/lib/containers \
/var/lib/cni \
/var/lib/kubelet/pods \
/var/lib/kubelet/plugins \
/var/lib/kubelet/plugins_registry \
/var/lib/kubelet/device-plugins \
/run/containers \
/run/netns
mkdir -p \
/var/lib/containers \
/var/lib/kubelet \
/var/lib/cni
}
monok8s_run_migration_dir() {
dir=$1
state_dir=$2
[ -d "$dir" ] || return 0
mkdir -p "$state_dir"
for script in "$dir"/*.sh; do
[ -e "$script" ] || continue
name=$(basename "$script")
stamp="$state_dir/$name.done"
if [ -e "$stamp" ]; then
continue
fi
echo "Running migration: $script"
sh "$script"
: > "$stamp"
done
}

View File

@@ -0,0 +1,57 @@
#!/bin/sh
set -eu
CONFIG_DIR=/opt/monok8s/config
LOG=/var/log/monok8s/bootstrap.log
STATE_DIR=/run/monok8s
FAIL_COUNT_FILE="$STATE_DIR/bootstrap-fail-count"
LOCK_DIR="$STATE_DIR/supervised-init.lock"
# For debugging
HOLD_FILE="$CONFIG_DIR/bootstrap.hold"
mkdir -p "$STATE_DIR" /var/log/monok8s
if ! mkdir "$LOCK_DIR" 2>/dev/null; then
echo "[$(date -Is)] supervised-init already running" >> "$LOG"
exit 0
fi
trap 'rmdir "$LOCK_DIR"' EXIT INT TERM
fail_count=0
if [ -f "$FAIL_COUNT_FILE" ]; then
fail_count="$(cat "$FAIL_COUNT_FILE" 2>/dev/null || echo 0)"
case "$fail_count" in
''|*[!0-9]*) fail_count=0 ;;
esac
fi
while true; do
if [ -f "$HOLD_FILE" ]; then
echo "[$(date -Is)] bootstrap held by $HOLD_FILE" >> "$LOG"
sleep 300
continue
fi
echo "[$(date -Is)] starting ctl init" >> "$LOG"
if /usr/local/bin/ctl init --env-file "$CONFIG_DIR/cluster.env" >> "$LOG" 2>&1; then
echo "[$(date -Is)] ctl init succeeded" >> "$LOG"
rm -f "$FAIL_COUNT_FILE"
exit 0
fi
fail_count=$((fail_count + 1))
echo "$fail_count" > "$FAIL_COUNT_FILE"
echo "[$(date -Is)] ctl init failed, count=$fail_count" >> "$LOG"
case "$fail_count" in
1) sleep 10 ;;
2) sleep 30 ;;
3) sleep 60 ;;
4) sleep 120 ;;
*) sleep 300 ;;
esac
done

View File

@@ -1,21 +1,29 @@
#!/bin/bash
set -euo pipefail
DEVICE="$1"
FAKE_DEV="/tmp/dev"
mkdir -p "$FAKE_DEV"
PARENT_NAME=$(basename "$DEVICE")
echo "Refreshing partition table..."
partx -u "$DEVICE" 2>/dev/null || partx -a "$DEVICE"
echo "Refreshing partition table for $DEVICE..."
partx -u "$DEVICE" 2>/dev/null || partx -a "$DEVICE" || true
# Remove old fake nodes for this loop device first
find "$FAKE_DEV" -maxdepth 1 -type b -name "${PARENT_NAME}*" -exec rm -f {} \;
# Find partitions and their Major:Minor numbers
lsblk -rn -o NAME,MAJ:MIN "$DEVICE" | while read -r NAME MAJMIN; do
# Skip the parent loop0
if [[ "$NAME" == "loop0" ]]; then continue; fi
# Skip the parent loop device itself
if [[ "$NAME" == "$PARENT_NAME" ]]; then
continue
fi
PART_PATH="$FAKE_DEV/$NAME"
MAJOR=$(echo $MAJMIN | cut -d: -f1)
MINOR=$(echo $MAJMIN | cut -d: -f2)
MAJOR="${MAJMIN%%:*}"
MINOR="${MAJMIN##*:}"
echo "Creating node: $PART_PATH (b $MAJOR $MINOR)"
rm -f "$PART_PATH"
mknod "$PART_PATH" b "$MAJOR" "$MINOR"
done

57
alpine/utils.sh Executable file
View File

@@ -0,0 +1,57 @@
#!/bin/bash
ensure_loop_ready() {
# The loop kernel module is host-side. This only works if the container
# has permission and modprobe exists; otherwise the host must load it.
if ! grep -qw loop /proc/modules 2>/dev/null; then
modprobe loop 2>/dev/null || true
fi
# /dev/loop-control: char device 10:237
if [ ! -e /dev/loop-control ]; then
echo "Creating missing /dev/loop-control" >&2
mknod /dev/loop-control c 10 237 || {
echo "ERROR: cannot create /dev/loop-control" >&2
echo "Run container with --privileged, or pass --device=/dev/loop-control and loop devices." >&2
exit 1
}
chmod 600 /dev/loop-control || true
fi
if [ ! -c /dev/loop-control ]; then
echo "ERROR: /dev/loop-control exists but is not a character device" >&2
ls -l /dev/loop-control >&2 || true
exit 1
fi
# Create a reasonable pool of loop block devices.
# loopN block devices are major 7, minor N.
for i in $(seq 0 31); do
if [ ! -e "/dev/loop$i" ]; then
echo "Creating missing /dev/loop$i" >&2
mknod "/dev/loop$i" b 7 "$i" || {
echo "ERROR: cannot create /dev/loop$i" >&2
echo "Run container with --privileged, or pre-create/pass loop devices." >&2
exit 1
}
chmod 660 "/dev/loop$i" || true
fi
if [ ! -b "/dev/loop$i" ]; then
echo "ERROR: /dev/loop$i exists but is not a block device" >&2
ls -l "/dev/loop$i" >&2 || true
exit 1
fi
done
# Smoke test: ask losetup for a free loop device.
if ! losetup -f >/dev/null 2>&1; then
echo "ERROR: losetup cannot find/use a loop device" >&2
echo "Debug info:" >&2
ls -l /dev/loop-control /dev/loop* >&2 || true
grep -w loop /proc/modules >&2 || true
echo >&2
echo "Docker likely needs --privileged, or at minimum CAP_SYS_ADMIN plus loop devices." >&2
exit 1
fi
}

View File

@@ -3,16 +3,23 @@ DOCKER_IMAGE_ROOT=monok8s
# Image tag
TAG=dev
# The Linux kernel, from NXP
NXP_VERSION=lf-6.18.2-1.0.0
CRIO_VERSION=cri-o.arm64.v1.35.1
# NXP's Linux Factory
LINUX_FACTORY=6.18.2-1.0.0
NXP_VERSION=lf-$(LINUX_FACTORY)
FMLIB_VERSION=lf-$(LINUX_FACTORY)
FMC_VERSION=lf-$(LINUX_FACTORY)
DPDK_VERSION=lf-$(LINUX_FACTORY)
VPP_VERSION=lf-$(LINUX_FACTORY)
VPP_UPSTREAM_VERSION=23.10
MONO_ASK_VERSION=mt-$(LINUX_FACTORY)
CRIO_VERSION=cri-o.arm64.v1.35.2
KUBE_VERSION=v1.35.3
# Mono's tutorial said fsl-ls1046a-rdb.dtb but our shipped board is not that one
# We need fsl-ls1046a-rdb-sdk.dtb here
DEVICE_TREE_TARGET=mono-gateway-dk-sdk
# Arch, should always be arm64 for our board. This is here in case branching off to other devices
ARCH=arm64
@@ -21,6 +28,7 @@ CROSS_COMPILE=aarch64-linux-gnu-
# Tools for initramfs
BUSYBOX_VERSION=1_36_1
E2FSPROGS_VERSION=1.47.4
UBOOT_VERSION=v2026.01
## Alpine Linux
ALPINE_VER=3.23.3
@@ -31,3 +39,11 @@ ALPINE_HOSTNAME=monok8s-hostname
# Upper case [A-Z]_ only, used for naming env vars
BUILD_TAG=MONOK8S
# Optional apt cache
# example: apt-cacher-ng.eco-system.svc.cluster.local:3142
APT_PROXY=
# remote image repository prefix to push to
# e.g. ghcr.io/monok8s
IMAGE_REPOSITORY=

59
clitools/README.md Normal file
View File

@@ -0,0 +1,59 @@
Installing `controller-gen`
```
export PATH="$(go env GOPATH)/bin:$PATH"
go install sigs.k8s.io/controller-tools/cmd/controller-gen@latest
```
## For development workflow
Run this on the gateway device
```bash
while true; do nc -l -p 1234 -e sh; done
```
Run this script on your dev machine
```bash
make build
SIZE=$(wc -c < ./bin/ctl-linux-aarch64-dev)
(
echo 'base64 -d > /var/ctl <<'"'"'EOF'"'"''
pv -s "$SIZE" < ./bin/ctl-linux-aarch64-dev | base64
echo 'EOF'
echo "export DEBUG=1"
echo 'chmod +x /var/ctl'
echo "echo Running /var/ctl $@"
echo "/var/ctl $@ 2>&1"
) | nc 10.0.0.10 1234
```
And use it like this
```bash
./send.sh start_crio
```
### Join token
Create join token from control plane
```
kubeadm token create --print-join-command
```
Generate join token using kubectl
```
TOKEN_NAME=bootstrap-token-iwotl0
API_SERVER=$(kubectl config view --minify -o jsonpath='{.clusters[0].cluster.server}' | sed 's|https://||') && \
TOKEN=$(kubectl -n kube-system get secret ${TOKEN_NAME} -o jsonpath='{.data.token-id}' | base64 -d).$(kubectl -n kube-system get secret ${TOKEN_NAME} -o jsonpath='{.data.token-secret}' | base64 -d) && \
HASH=$(kubectl -n kube-public get configmap cluster-info -o jsonpath='{.data.kubeconfig}' \
| grep 'certificate-authority-data' \
| awk '{print $2}' \
| base64 -d \
| openssl x509 -pubkey -noout \
| openssl rsa -pubin -outform der 2>/dev/null \
| openssl dgst -sha256 -hex \
| awk '{print "sha256:" $2}')
echo "export API_SERVER_ENDPOINT=${API_SERVER}"
echo "export BOOTSTRAP_TOKEN=${TOKEN}"
echo "export TOKEN_CACERT_HASH=${HASH}"
```

15
clitools/cmd/ctl/main.go Normal file
View File

@@ -0,0 +1,15 @@
package main
import (
"fmt"
"os"
"example.com/monok8s/pkg/cmd/root"
)
func main() {
if err := root.NewRootCmd().Execute(); err != nil {
fmt.Fprintf(os.Stderr, "error: %v\n", err)
os.Exit(1)
}
}

20
clitools/devtools/run.sh Executable file
View File

@@ -0,0 +1,20 @@
#!/bin/bash
SCRIPT_DIR="$(dirname "${BASH_SOURCE[0]}")"
PROJ_ROOT="$( realpath "$SCRIPT_DIR"/../ )"
OUT_DIR="$PROJ_ROOT"/out
if [ "$1" == "controller" ]; then
if [ -f "$OUT_DIR/tls.key" ] && [ -f "$OUT_DIR/tls.crt" ]; then
echo "Use existing certs"
else
echo "Generating self signed certs"
openssl req -x509 -newkey rsa:2048 -nodes -days 365 \
-keyout "$OUT_DIR"/tls.key -out "$OUT_DIR"/tls.crt \
-subj "/CN=127.0.0.1" \
-addext "subjectAltName=IP:127.0.0.1,DNS:localhost"
fi
go run "$PROJ_ROOT"/cmd/ctl $@ --tls-cert-file "$OUT_DIR"/tls.crt --tls-private-key-file "$OUT_DIR"/tls.key
else
go run "$PROJ_ROOT"/cmd/ctl $@
fi

View File

@@ -0,0 +1,14 @@
ARG BASE_IMAGE=localhost/monok8s/ctl-build-base:dev
FROM ${BASE_IMAGE} AS build
WORKDIR /src
RUN GOBIN=/usr/local/bin go install sigs.k8s.io/controller-tools/cmd/controller-gen@v0.20.1
COPY . .
RUN mkdir -p /out && \
controller-gen crd paths=./pkg/apis/... output:crd:dir=/out
FROM scratch
COPY --from=build /out/ /

View File

@@ -0,0 +1,42 @@
ARG BASE_IMAGE=localhost/monok8s/ctl-build-base:dev
FROM --platform=$BUILDPLATFORM ${BASE_IMAGE} AS build
ARG VERSION=dev
ARG TARGETOS
ARG TARGETARCH
WORKDIR /src
COPY . .
RUN test -f pkg/buildinfo/buildinfo_gen.go
RUN mkdir -p /out && \
GOOS=${TARGETOS} GOARCH=${TARGETARCH} CGO_ENABLED=0 \
go build -trimpath -ldflags="-s -w" \
-o /out/ctl ./cmd/ctl
FROM alpine:latest AS cacerts
FROM scratch
ARG VERSION
ARG TARGETOS
ARG TARGETARCH
ENV VERSION=${VERSION}
WORKDIR /
COPY --from=build /out/ctl /ctl
COPY --from=cacerts /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/
COPY out/uboot-tools/${TARGETOS}_${TARGETARCH}/fw_printenv /fw_printenv
COPY out/uboot-tools/${TARGETOS}_${TARGETARCH}/fw_setenv /fw_setenv
ENV PATH=/
ENTRYPOINT ["/ctl"]
CMD ["agent"]

View File

@@ -0,0 +1,8 @@
FROM golang:1.26-alpine
WORKDIR /src
RUN apk add --no-cache git build-base
COPY go.mod go.sum ./
RUN go mod download

View File

@@ -0,0 +1,26 @@
FROM alpine:3.23.0 AS base
# We seperate the packages so this line can be cached upstream
RUN apk add --no-cache curl ca-certificates
RUN apk add --no-cache git
# ---- uboot ----
FROM base AS uboot
ARG UBOOT_VERSION
WORKDIR /work
RUN apk add --no-cache git tar gzip
RUN git clone \
--depth 1 \
--branch "${UBOOT_VERSION}" \
--filter=blob:none \
https://github.com/u-boot/u-boot.git src
RUN mkdir -p /out && \
tar -C /work/src -zcf "/out/uboot-${UBOOT_VERSION}.tar.gz" .
# ---- final exported artifact set ----
FROM scratch
COPY --from=uboot /out/ /

View File

@@ -0,0 +1,51 @@
ARG TAG=dev
ARG DOCKER_IMAGE_ROOT=monok8s
FROM alpine:3.22 AS build
RUN apk add --no-cache \
bash \
build-base \
linux-headers \
meson \
ninja \
pkgconf \
python3 \
py3-elftools \
coreutils \
file \
git \
bsd-compat-headers
RUN mkdir /src
WORKDIR /src
ARG DPDK_TAR
ARG DPDK_VERSION
COPY ${DPDK_TAR} /tmp/
RUN set -eux; \
mkdir -p /src/dpdk; \
tar -xf "/tmp/$(basename "${DPDK_TAR}")" -C /src/dpdk --strip-components=1
RUN set -eux; \
meson setup /src/dpdk/build /src/dpdk \
--buildtype=release \
-Dplatform=dpaa \
-Dtests=false \
-Ddisable_drivers=crypto/*,compress/*,baseband/*,dma/*,event/*,regex/*,ml/*,gpu/*,raw/*,net/pcap,net/tap,net/vhost,net/virtio,net/ixgbe,net/i40e,net/txgbe,net/ring,net/af_packet; \
meson configure /src/dpdk/build | tee /tmp/meson-config.txt; \
grep -Ei 'dpaa|platform|disable_drivers' /tmp/meson-config.txt || true; \
ninja -C /src/dpdk/build; \
DESTDIR=/out ninja -C /src/dpdk/build install
RUN set -eux; \
mkdir -p /artifact/bin; \
test -x /src/dpdk/build/app/dpdk-testpmd; \
cp -r /src/dpdk/build/app/dpdk-testpmd /artifact/bin/
FROM scratch AS export
COPY --from=build /out/ /
COPY --from=build /artifact/ /

View File

@@ -0,0 +1,41 @@
FROM alpine:3.23 AS build
RUN apk add --no-cache \
build-base \
bison \
flex \
linux-headers \
file \
binutils \
tar
WORKDIR /src
ARG UBOOT_TAR
ARG UBOOT_VERSION
COPY ${UBOOT_TAR} /tmp/
RUN tar zxf "/tmp/$(basename "${UBOOT_TAR}")"
RUN make tools-only_defconfig
# Build the env tools using the supported target.
RUN make -j"$(nproc)" \
HOSTCC=gcc \
HOSTLD=gcc \
HOSTCFLAGS='-O2' \
HOSTLDFLAGS='-static' \
envtools
# fw_setenv is the same program; create the link ourselves.
RUN ln -sf fw_printenv tools/env/fw_setenv
RUN file tools/env/fw_printenv tools/env/fw_setenv
RUN readelf -d tools/env/fw_printenv || true
RUN ! readelf -d tools/env/fw_printenv | grep -q '(NEEDED)'
FROM scratch AS export
COPY --from=build /src/tools/env/fw_printenv /fw_printenv
COPY --from=build /src/tools/env/fw_setenv /fw_setenv

86
clitools/go.mod Normal file
View File

@@ -0,0 +1,86 @@
module example.com/monok8s
go 1.26.0
require (
github.com/emicklei/go-restful/v3 v3.12.2
github.com/klauspost/compress v1.18.5
github.com/spf13/cobra v1.10.2
golang.org/x/sys v0.39.0
gopkg.in/yaml.v3 v3.0.1
k8s.io/api v0.35.0
k8s.io/apiextensions-apiserver v0.35.0
k8s.io/apimachinery v0.35.0
k8s.io/apiserver v0.35.0
k8s.io/cli-runtime v0.34.0
k8s.io/client-go v0.35.0
k8s.io/code-generator v0.35.0
k8s.io/klog/v2 v2.130.1
sigs.k8s.io/controller-tools v0.20.1
sigs.k8s.io/yaml v1.6.0
)
require (
github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/blang/semver/v4 v4.0.0 // indirect
github.com/cespare/xxhash/v2 v2.3.0 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/fatih/color v1.18.0 // indirect
github.com/fxamacker/cbor/v2 v2.9.0 // indirect
github.com/go-errors/errors v1.4.2 // indirect
github.com/go-logr/logr v1.4.3 // indirect
github.com/go-openapi/jsonpointer v0.21.0 // indirect
github.com/go-openapi/jsonreference v0.20.2 // indirect
github.com/go-openapi/swag v0.23.0 // indirect
github.com/gobuffalo/flect v1.0.3 // indirect
github.com/google/btree v1.1.3 // indirect
github.com/google/gnostic-models v0.7.0 // indirect
github.com/google/uuid v1.6.0 // indirect
github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 // indirect
github.com/inconshreveable/mousetrap v1.1.0 // indirect
github.com/josharian/intern v1.0.0 // indirect
github.com/json-iterator/go v1.1.12 // indirect
github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de // indirect
github.com/mailru/easyjson v0.7.7 // indirect
github.com/mattn/go-colorable v0.1.13 // indirect
github.com/mattn/go-isatty v0.0.20 // indirect
github.com/moby/term v0.5.0 // indirect
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee // indirect
github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 // indirect
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
github.com/peterbourgon/diskv v2.0.1+incompatible // indirect
github.com/prometheus/client_golang v1.23.2 // indirect
github.com/prometheus/client_model v0.6.2 // indirect
github.com/prometheus/common v0.66.1 // indirect
github.com/prometheus/procfs v0.16.1 // indirect
github.com/spf13/pflag v1.0.10 // indirect
github.com/x448/float16 v0.8.4 // indirect
github.com/xlab/treeprint v1.2.0 // indirect
go.opentelemetry.io/otel v1.36.0 // indirect
go.opentelemetry.io/otel/trace v1.36.0 // indirect
go.yaml.in/yaml/v2 v2.4.3 // indirect
go.yaml.in/yaml/v3 v3.0.4 // indirect
golang.org/x/mod v0.31.0 // indirect
golang.org/x/net v0.48.0 // indirect
golang.org/x/oauth2 v0.30.0 // indirect
golang.org/x/sync v0.19.0 // indirect
golang.org/x/term v0.38.0 // indirect
golang.org/x/text v0.32.0 // indirect
golang.org/x/time v0.9.0 // indirect
golang.org/x/tools v0.40.0 // indirect
google.golang.org/protobuf v1.36.8 // indirect
gopkg.in/evanphx/json-patch.v4 v4.13.0 // indirect
gopkg.in/inf.v0 v0.9.1 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
k8s.io/component-base v0.35.0 // indirect
k8s.io/gengo/v2 v2.0.0-20250922181213-ec3ebc5fd46b // indirect
k8s.io/kube-openapi v0.0.0-20250910181357-589584f1c912 // indirect
k8s.io/utils v0.0.0-20251002143259-bc988d571ff4 // indirect
sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730 // indirect
sigs.k8s.io/kustomize/api v0.20.1 // indirect
sigs.k8s.io/kustomize/kyaml v0.20.1 // indirect
sigs.k8s.io/randfill v1.0.0 // indirect
sigs.k8s.io/structured-merge-diff/v6 v6.3.0 // indirect
)

267
clitools/go.sum Normal file
View File

@@ -0,0 +1,267 @@
cel.dev/expr v0.24.0 h1:56OvJKSH3hDGL0ml5uSxZmz3/3Pq4tJ+fb1unVLAFcY=
cel.dev/expr v0.24.0/go.mod h1:hLPLo1W4QUmuYdA72RBX06QTs6MXw941piREPl3Yfiw=
github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 h1:L/gRVlceqvL25UVaW/CKtUDjefjrs0SPonmDGUVOYP0=
github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E=
github.com/Masterminds/semver/v3 v3.4.0 h1:Zog+i5UMtVoCU8oKka5P7i9q9HgrJeGzI9SA1Xbatp0=
github.com/Masterminds/semver/v3 v3.4.0/go.mod h1:4V+yj/TJE1HU9XfppCwVMZq3I84lprf4nC11bSS5beM=
github.com/antlr4-go/antlr/v4 v4.13.0 h1:lxCg3LAv+EUK6t1i0y1V6/SLeUi0eKEKdhQAlS8TVTI=
github.com/antlr4-go/antlr/v4 v4.13.0/go.mod h1:pfChB/xh/Unjila75QW7+VU4TSnWnnk9UTnmpPaOR2g=
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM=
github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ=
github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8=
github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE=
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g=
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY=
github.com/creack/pty v1.1.18/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/emicklei/go-restful/v3 v3.12.2 h1:DhwDP0vY3k8ZzE0RunuJy8GhNpPL6zqLkDf9B/a0/xU=
github.com/emicklei/go-restful/v3 v3.12.2/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
github.com/fatih/color v1.18.0 h1:S8gINlzdQ840/4pfAwic/ZE0djQEH3wM94VfqLTZcOM=
github.com/fatih/color v1.18.0/go.mod h1:4FelSpRwEGDpQ12mAdzqdOukCy4u8WUtOY6lkT/6HfU=
github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg=
github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
github.com/fsnotify/fsnotify v1.9.0 h1:2Ml+OJNzbYCTzsxtv8vKSFD9PbJjmhYF14k/jKC7S9k=
github.com/fsnotify/fsnotify v1.9.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0=
github.com/fxamacker/cbor/v2 v2.9.0 h1:NpKPmjDBgUfBms6tr6JZkTHtfFGcMKsw3eGcmD/sapM=
github.com/fxamacker/cbor/v2 v2.9.0/go.mod h1:vM4b+DJCtHn+zz7h3FFp/hDAI9WNWCsZj23V5ytsSxQ=
github.com/go-errors/errors v1.4.2 h1:J6MZopCL4uSllY1OfXM374weqZFFItUbrImctkmUxIA=
github.com/go-errors/errors v1.4.2/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og=
github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI=
github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs=
github.com/go-openapi/jsonpointer v0.21.0 h1:YgdVicSA9vH5RiHs9TZW5oyafXZFc6+2Vc1rr/O9oNQ=
github.com/go-openapi/jsonpointer v0.21.0/go.mod h1:IUyH9l/+uyhIYQ/PXVA41Rexl+kOkAPDdXEYns6fzUY=
github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE=
github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k=
github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14=
github.com/go-openapi/swag v0.23.0 h1:vsEVJDUo2hPJ2tu0/Xc+4noaxyEffXNIs3cOULZ+GrE=
github.com/go-openapi/swag v0.23.0/go.mod h1:esZ8ITTYEsH1V2trKHjAN8Ai7xHb8RV+YSZ577vPjgQ=
github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI=
github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8=
github.com/gobuffalo/flect v1.0.3 h1:xeWBM2nui+qnVvNM4S3foBhCAL2XgPU+a7FdpelbTq4=
github.com/gobuffalo/flect v1.0.3/go.mod h1:A5msMlrHtLqh9umBSnvabjsMrCcCpAyzglnDvkbYKHs=
github.com/google/btree v1.1.3 h1:CVpQJjYgC4VbzxeGVHfvZrv1ctoYCAI8vbl07Fcxlyg=
github.com/google/btree v1.1.3/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4=
github.com/google/cel-go v0.26.0 h1:DPGjXackMpJWH680oGY4lZhYjIameYmR+/6RBdDGmaI=
github.com/google/cel-go v0.26.0/go.mod h1:A9O8OU9rdvrK5MQyrqfIxo1a0u4g3sF8KB6PUIaryMM=
github.com/google/gnostic-models v0.7.0 h1:qwTtogB15McXDaNqTZdzPJRHvaVJlAl+HVQnLmJEJxo=
github.com/google/gnostic-models v0.7.0/go.mod h1:whL5G0m6dmc5cPxKc5bdKdEN3UjI7OUGxBlw57miDrQ=
github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/pprof v0.0.0-20250403155104-27863c87afa6 h1:BHT72Gu3keYf3ZEu2J0b1vyeLSOYI8bm5wbJM/8yDe8=
github.com/google/pprof v0.0.0-20250403155104-27863c87afa6/go.mod h1:boTsfXsheKC2y+lKOCMpSfarhxDeIzfZG1jqGcPl3cA=
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 h1:+ngKgrYPPJrOjhax5N+uePQ0Fh1Z7PheYoUI/0nzkPA=
github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA=
github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3 h1:5ZPtiqj0JL5oKWmcsq4VMaAW5ukBEgSGXEN89zeH1Jo=
github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3/go.mod h1:ndYquD05frm2vACXE1nsccT4oJzjhw2arTS2cpUD1PI=
github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY=
github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=
github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=
github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
github.com/klauspost/compress v1.18.5 h1:/h1gH5Ce+VWNLSWqPzOVn6XBO+vJbCNGvjoaGBFW2IE=
github.com/klauspost/compress v1.18.5/go.mod h1:cwPg85FWrGar70rWktvGQj8/hthj3wpl0PGDogxkrSQ=
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=
github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de h1:9TO3cAIGXtEhnIaL+V+BEER86oLrvS+kWobKpbJuye0=
github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de/go.mod h1:zAbeS9B/r2mtpb6U+EI2rYA5OAXxsYw6wTamcNW+zcE=
github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0=
github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA=
github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg=
github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
github.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0=
github.com/moby/term v0.5.0/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y=
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee h1:W5t00kpgFdJifH4BDsTlE89Zl93FEloxaWZfGcifgq8=
github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 h1:n6/2gBQ3RWajuToeY6ZtZTIKv2v7ThUy5KKusIT0yc0=
github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00/go.mod h1:Pm3mSP3c5uWn86xMLZ5Sa7JB9GsEZySvHYXCTK4E9q4=
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE=
github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU=
github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE=
github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU=
github.com/onsi/ginkgo/v2 v2.27.2 h1:LzwLj0b89qtIy6SSASkzlNvX6WktqurSHwkk2ipF/Ns=
github.com/onsi/ginkgo/v2 v2.27.2/go.mod h1:ArE1D/XhNXBXCBkKOLkbsb2c81dQHCRcF5zwn/ykDRo=
github.com/onsi/gomega v1.38.3 h1:eTX+W6dobAYfFeGC2PV6RwXRu/MyT+cQguijutvkpSM=
github.com/onsi/gomega v1.38.3/go.mod h1:ZCU1pkQcXDO5Sl9/VVEGlDyp+zm0m1cmeG5TOzLgdh4=
github.com/peterbourgon/diskv v2.0.1+incompatible h1:UBdAOUP5p4RWqPBg048CAvpKN+vxiaj6gdUUzhl4XmI=
github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/prometheus/client_golang v1.23.2 h1:Je96obch5RDVy3FDMndoUsjAhG5Edi49h0RJWRi/o0o=
github.com/prometheus/client_golang v1.23.2/go.mod h1:Tb1a6LWHB3/SPIzCoaDXI4I8UHKeFTEQ1YCr+0Gyqmg=
github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk=
github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE=
github.com/prometheus/common v0.66.1 h1:h5E0h5/Y8niHc5DlaLlWLArTQI7tMrsfQjHV+d9ZoGs=
github.com/prometheus/common v0.66.1/go.mod h1:gcaUsgf3KfRSwHY4dIMXLPV0K/Wg1oZ8+SbZk/HH/dA=
github.com/prometheus/procfs v0.16.1 h1:hZ15bTNuirocR6u0JZ6BAHHmwS1p8B4P6MRqxtzMyRg=
github.com/prometheus/procfs v0.16.1/go.mod h1:teAbpZRB1iIAJYREa1LsoWUXykVXA1KlTmWl8x/U+Is=
github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ=
github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/sergi/go-diff v1.2.0 h1:XU+rvMAioB0UC3q1MFrIQy4Vo5/4VsRDQQXHsEya6xQ=
github.com/sergi/go-diff v1.2.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM=
github.com/spf13/cobra v1.10.2 h1:DMTTonx5m65Ic0GOoRY2c16WCbHxOOw6xxezuLaBpcU=
github.com/spf13/cobra v1.10.2/go.mod h1:7C1pvHqHw5A4vrJfjNwvOdzYu0Gml16OCs2GRiTUUS4=
github.com/spf13/pflag v1.0.9/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
github.com/spf13/pflag v1.0.10 h1:4EBh2KAYBwaONj6b2Ye1GiHfwjqyROoF4RwYO+vPwFk=
github.com/spf13/pflag v1.0.10/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
github.com/stoewer/go-strcase v1.3.0 h1:g0eASXYtp+yvN9fK8sH94oCIk0fau9uV1/ZdJ0AVEzs=
github.com/stoewer/go-strcase v1.3.0/go.mod h1:fAH5hQ5pehh+j3nZfvwdk2RgEgQjAoM8wodgtPmh1xo=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY=
github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U=
github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U=
github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM=
github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg=
github.com/xlab/treeprint v1.2.0 h1:HzHnuAF1plUN2zGlAFHbSQP2qJ0ZAD3XF5XD7OesXRQ=
github.com/xlab/treeprint v1.2.0/go.mod h1:gj5Gd3gPdKtR1ikdDK6fnFLdmIS0X30kTTuNd/WEJu0=
go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA=
go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A=
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0 h1:F7Jx+6hwnZ41NSFTO5q4LYDtJRXBf2PD0rNBkeB/lus=
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0/go.mod h1:UHB22Z8QsdRDrnAtX4PntOl36ajSxcdUMt1sF7Y6E7Q=
go.opentelemetry.io/otel v1.36.0 h1:UumtzIklRBY6cI/lllNZlALOF5nNIzJVb16APdvgTXg=
go.opentelemetry.io/otel v1.36.0/go.mod h1:/TcFMXYjyRNh8khOAO9ybYkqaDBb/70aVwkNML4pP8E=
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.34.0 h1:OeNbIYk/2C15ckl7glBlOBp5+WlYsOElzTNmiPW/x60=
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.34.0/go.mod h1:7Bept48yIeqxP2OZ9/AqIpYS94h2or0aB4FypJTc8ZM=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.34.0 h1:tgJ0uaNS4c98WRNUEx5U3aDlrDOI5Rs+1Vifcw4DJ8U=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.34.0/go.mod h1:U7HYyW0zt/a9x5J1Kjs+r1f/d4ZHnYFclhYY2+YbeoE=
go.opentelemetry.io/otel/metric v1.36.0 h1:MoWPKVhQvJ+eeXWHFBOPoBOi20jh6Iq2CcCREuTYufE=
go.opentelemetry.io/otel/metric v1.36.0/go.mod h1:zC7Ks+yeyJt4xig9DEw9kuUFe5C3zLbVjV2PzT6qzbs=
go.opentelemetry.io/otel/sdk v1.36.0 h1:b6SYIuLRs88ztox4EyrvRti80uXIFy+Sqzoh9kFULbs=
go.opentelemetry.io/otel/sdk v1.36.0/go.mod h1:+lC+mTgD+MUWfjJubi2vvXWcVxyr9rmlshZni72pXeY=
go.opentelemetry.io/otel/trace v1.36.0 h1:ahxWNuqZjpdiFAyrIoQ4GIiAIhxAunQR6MUoKrsNd4w=
go.opentelemetry.io/otel/trace v1.36.0/go.mod h1:gQ+OnDZzrybY4k4seLzPAWNwVBBVlF2szhehOBB/tGA=
go.opentelemetry.io/proto/otlp v1.5.0 h1:xJvq7gMzB31/d406fB8U5CBdyQGw4P399D1aQWU/3i4=
go.opentelemetry.io/proto/otlp v1.5.0/go.mod h1:keN8WnHxOy8PG0rQZjJJ5A2ebUoafqWp0eVQ4yIXvJ4=
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
go.yaml.in/yaml/v2 v2.4.3 h1:6gvOSjQoTB3vt1l+CU+tSyi/HOjfOjRLJ4YwYZGwRO0=
go.yaml.in/yaml/v2 v2.4.3/go.mod h1:zSxWcmIDjOzPXpjlTTbAsKokqkDNAVtZO0WOMiT90s8=
go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc=
go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg=
golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 h1:2dVuKD2vS7b0QIHQbpyTISPd0LeHDbnYEryqj5Q1ug8=
golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56/go.mod h1:M4RDyNAINzryxdtnbRXRL/OHtkFuWGRjvuhBJpk2IlY=
golang.org/x/mod v0.31.0 h1:HaW9xtz0+kOcWKwli0ZXy79Ix+UW/vOfmWI5QVd2tgI=
golang.org/x/mod v0.31.0/go.mod h1:43JraMp9cGx1Rx3AqioxrbrhNsLl2l/iNAvuBkrezpg=
golang.org/x/net v0.48.0 h1:zyQRTTrjc33Lhh0fBgT/H3oZq9WuvRR5gPC70xpDiQU=
golang.org/x/net v0.48.0/go.mod h1:+ndRgGjkh8FGtu1w1FGbEC31if4VrNVMuKTgcAAnQRY=
golang.org/x/oauth2 v0.30.0 h1:dnDm7JmhM45NNpd8FDDeLhK6FwqbOf4MLCM9zb1BOHI=
golang.org/x/oauth2 v0.30.0/go.mod h1:B++QgG3ZKulg6sRPGD/mqlHQs5rB3Ml9erfeDY7xKlU=
golang.org/x/sync v0.19.0 h1:vV+1eWNmZ5geRlYjzm2adRgW2/mcpevXNg50YZtPCE4=
golang.org/x/sync v0.19.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.39.0 h1:CvCKL8MeisomCi6qNZ+wbb0DN9E5AATixKsvNtMoMFk=
golang.org/x/sys v0.39.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
golang.org/x/term v0.38.0 h1:PQ5pkm/rLO6HnxFR7N2lJHOZX6Kez5Y1gDSJla6jo7Q=
golang.org/x/term v0.38.0/go.mod h1:bSEAKrOT1W+VSu9TSCMtoGEOUcKxOKgl3LE5QEF/xVg=
golang.org/x/text v0.32.0 h1:ZD01bjUt1FQ9WJ0ClOL5vxgxOI/sVCNgX1YtKwcY0mU=
golang.org/x/text v0.32.0/go.mod h1:o/rUWzghvpD5TXrTIBuJU77MTaN0ljMWE47kxGJQ7jY=
golang.org/x/time v0.9.0 h1:EsRrnYcQiGH+5FfbgvV4AP7qEZstoyrHB0DzarOQ4ZY=
golang.org/x/time v0.9.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
golang.org/x/tools v0.40.0 h1:yLkxfA+Qnul4cs9QA3KnlFu0lVmd8JJfoq+E41uSutA=
golang.org/x/tools v0.40.0/go.mod h1:Ik/tzLRlbscWpqqMRjyWYDisX8bG13FrdXp3o4Sr9lc=
golang.org/x/tools/go/expect v0.1.1-deprecated h1:jpBZDwmgPhXsKZC6WhL20P4b/wmnpsEAGHaNy0n/rJM=
golang.org/x/tools/go/expect v0.1.1-deprecated/go.mod h1:eihoPOH+FgIqa3FpoTwguz/bVUSGBlGQU67vpBeOrBY=
golang.org/x/tools/go/packages/packagestest v0.1.1-deprecated h1:1h2MnaIAIXISqTFKdENegdpAgUXz6NrPEsbIeWaBRvM=
golang.org/x/tools/go/packages/packagestest v0.1.1-deprecated/go.mod h1:RVAQXBGNv1ib0J382/DPCRS/BPnsGebyM1Gj5VSDpG8=
google.golang.org/genproto/googleapis/api v0.0.0-20250303144028-a0af3efb3deb h1:p31xT4yrYrSM/G4Sn2+TNUkVhFCbG9y8itM2S6Th950=
google.golang.org/genproto/googleapis/api v0.0.0-20250303144028-a0af3efb3deb/go.mod h1:jbe3Bkdp+Dh2IrslsFCklNhweNTBgSYanP1UXhJDhKg=
google.golang.org/genproto/googleapis/rpc v0.0.0-20250528174236-200df99c418a h1:v2PbRU4K3llS09c7zodFpNePeamkAwG3mPrAery9VeE=
google.golang.org/genproto/googleapis/rpc v0.0.0-20250528174236-200df99c418a/go.mod h1:qQ0YXyHHx3XkvlzUtpXDkS29lDSafHMZBAZDc03LQ3A=
google.golang.org/grpc v1.72.2 h1:TdbGzwb82ty4OusHWepvFWGLgIbNo1/SUynEN0ssqv8=
google.golang.org/grpc v1.72.2/go.mod h1:wH5Aktxcg25y1I3w7H69nHfXdOG3UiadoBtjh3izSDM=
google.golang.org/protobuf v1.36.8 h1:xHScyCOEuuwZEc6UtSOvPbAT4zRh0xcNRYekJwfqyMc=
google.golang.org/protobuf v1.36.8/go.mod h1:fuxRtAxBytpl4zzqUh6/eyUujkJdNiuEkXntxiD/uRU=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
gopkg.in/evanphx/json-patch.v4 v4.13.0 h1:czT3CmqEaQ1aanPc5SdlgQrrEIb8w/wwCvWWnfEbYzo=
gopkg.in/evanphx/json-patch.v4 v4.13.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWMG4EsWvDvM72M=
gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
k8s.io/api v0.35.0 h1:iBAU5LTyBI9vw3L5glmat1njFK34srdLmktWwLTprlY=
k8s.io/api v0.35.0/go.mod h1:AQ0SNTzm4ZAczM03QH42c7l3bih1TbAXYo0DkF8ktnA=
k8s.io/apiextensions-apiserver v0.35.0 h1:3xHk2rTOdWXXJM+RDQZJvdx0yEOgC0FgQ1PlJatA5T4=
k8s.io/apiextensions-apiserver v0.35.0/go.mod h1:E1Ahk9SADaLQ4qtzYFkwUqusXTcaV2uw3l14aqpL2LU=
k8s.io/apimachinery v0.35.0 h1:Z2L3IHvPVv/MJ7xRxHEtk6GoJElaAqDCCU0S6ncYok8=
k8s.io/apimachinery v0.35.0/go.mod h1:jQCgFZFR1F4Ik7hvr2g84RTJSZegBc8yHgFWKn//hns=
k8s.io/apiserver v0.35.0 h1:CUGo5o+7hW9GcAEF3x3usT3fX4f9r8xmgQeCBDaOgX4=
k8s.io/apiserver v0.35.0/go.mod h1:QUy1U4+PrzbJaM3XGu2tQ7U9A4udRRo5cyxkFX0GEds=
k8s.io/cli-runtime v0.34.0 h1:N2/rUlJg6TMEBgtQ3SDRJwa8XyKUizwjlOknT1mB2Cw=
k8s.io/cli-runtime v0.34.0/go.mod h1:t/skRecS73Piv+J+FmWIQA2N2/rDjdYSQzEE67LUUs8=
k8s.io/client-go v0.35.0 h1:IAW0ifFbfQQwQmga0UdoH0yvdqrbwMdq9vIFEhRpxBE=
k8s.io/client-go v0.35.0/go.mod h1:q2E5AAyqcbeLGPdoRB+Nxe3KYTfPce1Dnu1myQdqz9o=
k8s.io/code-generator v0.35.0 h1:TvrtfKYZTm9oDF2z+veFKSCcgZE3Igv0svY+ehCmjHQ=
k8s.io/code-generator v0.35.0/go.mod h1:iS1gvVf3c/T71N5DOGYO+Gt3PdJ6B9LYSvIyQ4FHzgc=
k8s.io/component-base v0.35.0 h1:+yBrOhzri2S1BVqyVSvcM3PtPyx5GUxCK2tinZz1G94=
k8s.io/component-base v0.35.0/go.mod h1:85SCX4UCa6SCFt6p3IKAPej7jSnF3L8EbfSyMZayJR0=
k8s.io/gengo/v2 v2.0.0-20250922181213-ec3ebc5fd46b h1:gMplByicHV/TJBizHd9aVEsTYoJBnnUAT5MHlTkbjhQ=
k8s.io/gengo/v2 v2.0.0-20250922181213-ec3ebc5fd46b/go.mod h1:CgujABENc3KuTrcsdpGmrrASjtQsWCT7R99mEV4U/fM=
k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk=
k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE=
k8s.io/kube-openapi v0.0.0-20250910181357-589584f1c912 h1:Y3gxNAuB0OBLImH611+UDZcmKS3g6CthxToOb37KgwE=
k8s.io/kube-openapi v0.0.0-20250910181357-589584f1c912/go.mod h1:kdmbQkyfwUagLfXIad1y2TdrjPFWp2Q89B3qkRwf/pQ=
k8s.io/utils v0.0.0-20251002143259-bc988d571ff4 h1:SjGebBtkBqHFOli+05xYbK8YF1Dzkbzn+gDM4X9T4Ck=
k8s.io/utils v0.0.0-20251002143259-bc988d571ff4/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.2 h1:jpcvIRr3GLoUoEKRkHKSmGjxb6lWwrBlJsXc+eUYQHM=
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.2/go.mod h1:Ve9uj1L+deCXFrPOk1LpFXqTg7LCFzFso6PA48q/XZw=
sigs.k8s.io/controller-tools v0.20.1 h1:gkfMt9YodI0K85oT8rVi80NTXO/kDmabKR5Ajn5GYxs=
sigs.k8s.io/controller-tools v0.20.1/go.mod h1:b4qPmjGU3iZwqn34alUU5tILhNa9+VXK+J3QV0fT/uU=
sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730 h1:IpInykpT6ceI+QxKBbEflcR5EXP7sU1kvOlxwZh5txg=
sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730/go.mod h1:mdzfpAEoE6DHQEN0uh9ZbOCuHbLK5wOm7dK4ctXE9Tg=
sigs.k8s.io/kustomize/api v0.20.1 h1:iWP1Ydh3/lmldBnH/S5RXgT98vWYMaTUL1ADcr+Sv7I=
sigs.k8s.io/kustomize/api v0.20.1/go.mod h1:t6hUFxO+Ph0VxIk1sKp1WS0dOjbPCtLJ4p8aADLwqjM=
sigs.k8s.io/kustomize/kyaml v0.20.1 h1:PCMnA2mrVbRP3NIB6v9kYCAc38uvFLVs8j/CD567A78=
sigs.k8s.io/kustomize/kyaml v0.20.1/go.mod h1:0EmkQHRUsJxY8Ug9Niig1pUMSCGHxQ5RklbpV/Ri6po=
sigs.k8s.io/randfill v1.0.0 h1:JfjMILfT8A6RbawdsK2JXGBR5AQVfd+9TbzrlneTyrU=
sigs.k8s.io/randfill v1.0.0/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY=
sigs.k8s.io/structured-merge-diff/v6 v6.3.0 h1:jTijUJbW353oVOd9oTlifJqOGEkUw2jB/fXCbTiQEco=
sigs.k8s.io/structured-merge-diff/v6 v6.3.0/go.mod h1:M3W8sfWvn2HhQDIbGWj3S099YozAsymCo/wrT5ohRUE=
sigs.k8s.io/yaml v1.6.0 h1:G8fkbMSAFqgEFgh4b1wmtzDnioxFCUgTZhlbj5P9QYs=
sigs.k8s.io/yaml v1.6.0/go.mod h1:796bPqUfzR/0jLAl6XjHl3Ck7MiyVv8dbTdyT3/pMf4=

View File

@@ -0,0 +1 @@
/* MIT License */

9
clitools/hack/tool.go Normal file
View File

@@ -0,0 +1,9 @@
//go:build tools
// +build tools
package tools
import (
_ "k8s.io/code-generator"
_ "sigs.k8s.io/controller-tools/cmd/controller-gen"
)

36
clitools/hack/update-codegen.sh Executable file
View File

@@ -0,0 +1,36 @@
#!/usr/bin/env bash
set -euo pipefail
MODULE_ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)"
echo "MODULE ROOT: ${MODULE_ROOT}"
cd "${MODULE_ROOT}"
mkdir -p \
"${MODULE_ROOT}/pkg/generated/clientset" \
"${MODULE_ROOT}/pkg/generated/listers" \
"${MODULE_ROOT}/pkg/generated/informers"
controller-gen \
object:headerFile=hack/boilerplate.go.txt \
paths=./pkg/apis/...
MODULE="$(go list -m -f '{{.Path}}')"
echo "MODULE: ${MODULE}"
CODEGEN_PKG="$(go list -f '{{.Dir}}' -m k8s.io/code-generator)"
echo "CODEGEN PKG: ${CODEGEN_PKG}"
source "${CODEGEN_PKG}/kube_codegen.sh"
mkdir -p "${MODULE_ROOT}/pkg/generated"
kube::codegen::gen_helpers \
--boilerplate "${MODULE_ROOT}/hack/boilerplate.go.txt" \
"${MODULE_ROOT}/pkg/apis"
kube::codegen::gen_client \
--with-watch \
--output-dir "${MODULE_ROOT}/pkg/generated" \
--output-pkg "${MODULE}/pkg/generated" \
--boilerplate "${MODULE_ROOT}/hack/boilerplate.go.txt" \
"${MODULE_ROOT}/pkg/apis"

183
clitools/makefile Normal file
View File

@@ -0,0 +1,183 @@
include ../build.env
-include ../build.env.work
export
BUILD_PLATFORM ?= linux/amd64
# Should be the same as upstream version in production
VERSION ?= dev
UBOOT_VERSION ?= v2026.01
# Target kube version
KUBE_VERSION ?= v1.33.3
GIT_REV := $(shell git rev-parse HEAD)
PACKAGES_DIR := packages
OUT_DIR := out
UBOOT_TOOLS_OUT := $(OUT_DIR)/uboot-tools
UBOOT_TAR := $(PACKAGES_DIR)/uboot-$(UBOOT_VERSION).tar.gz
BUILDINFO_FILE := pkg/buildinfo/buildinfo_gen.go
ASSETS_PATH := ./pkg/assets
BUILDX_BUILDER := container-builder
LOCAL_REGISTRY := registry
LOCAL_REGISTRY_PORT := 5000
CTL_BUILD_BASE_REPO := localhost:5000/monok8s/ctl-build-base
CTL_IMAGE_REPO := localhost:5000/monok8s/node-control
CTL_BUILD_BASE_IMAGE := $(CTL_BUILD_BASE_REPO):$(VERSION)
CTL_IMAGE := $(CTL_IMAGE_REPO):$(VERSION)
DOWNLOAD_PACKAGES_STAMP := $(PACKAGES_DIR)/.download-packages.stamp
$(PACKAGES_DIR):
mkdir -p $@
$(OUT_DIR):
mkdir -p $@
# Keep buildinfo host-side since it is just generated source and lets Docker see it in build context.
.buildinfo:
@mkdir -p $(dir $(BUILDINFO_FILE))
@printf '%s\n' \
'package buildinfo' \
'' \
'const (' \
' Version = "$(VERSION)"' \
' KubeVersion = "$(KUBE_VERSION)"' \
' GitRevision = "$(GIT_REV)"' \
' Timestamp = "'$$(TZ=UTC date +%Y%m%d.%H%M%S)'"' \
')' \
> $(BUILDINFO_FILE)
ensure-buildx:
@if ! docker buildx inspect $(BUILDX_BUILDER) >/dev/null 2>&1; then \
echo "Creating buildx builder $(BUILDX_BUILDER)..."; \
docker buildx create \
--name $(BUILDX_BUILDER) \
--driver docker-container \
--driver-opt network=host \
--bootstrap --use; \
else \
echo "Using existing buildx builder $(BUILDX_BUILDER)"; \
docker buildx use $(BUILDX_BUILDER); \
fi
ensure-registry:
@if ! docker container inspect $(LOCAL_REGISTRY) >/dev/null 2>&1; then \
echo "Creating local registry..."; \
docker run -d \
--restart=always \
-p $(LOCAL_REGISTRY_PORT):5000 \
--name $(LOCAL_REGISTRY) \
registry:2; \
else \
if [ "$$(docker inspect -f '{{.State.Running}}' $(LOCAL_REGISTRY))" != "true" ]; then \
echo "Starting existing local registry..."; \
docker start $(LOCAL_REGISTRY); \
fi; \
fi
$(DOWNLOAD_PACKAGES_STAMP): docker/download-packages.Dockerfile makefile | $(PACKAGES_DIR)
docker build \
-f docker/download-packages.Dockerfile \
--build-arg UBOOT_VERSION=$(UBOOT_VERSION) \
--output type=local,dest=./$(PACKAGES_DIR) .
@touch $@
uboot-tools: $(DOWNLOAD_PACKAGES_STAMP)
rm -rf "$(UBOOT_TOOLS_OUT)"
mkdir -p "$(UBOOT_TOOLS_OUT)"
docker buildx build \
--platform linux/amd64,linux/arm64 \
-f docker/uboot-tools.Dockerfile \
--build-arg UBOOT_VERSION=$(UBOOT_VERSION) \
--build-arg UBOOT_TAR=$(UBOOT_TAR) \
--output type=local,dest=./$(UBOOT_TOOLS_OUT),platform-split=true .
ctl-build-base: ensure-buildx ensure-registry
docker buildx build \
--platform linux/amd64,linux/arm64 \
-f docker/ctl-build-base.Dockerfile \
-t $(CTL_BUILD_BASE_IMAGE) \
--output type=image,push=true,registry.insecure=true .
build-crds: ctl-build-base | $(OUT_DIR)
mkdir -p "$(OUT_DIR)/crds"
docker buildx build \
--platform $(BUILD_PLATFORM) \
-f docker/crdgen.Dockerfile \
--build-arg BASE_IMAGE=$(CTL_BUILD_BASE_IMAGE) \
--output type=local,dest=./$(OUT_DIR)/crds .
rm -rf "$(ASSETS_PATH)/crds"
mkdir -p "$(ASSETS_PATH)/crds"
cp -R "$(OUT_DIR)/crds/." "$(ASSETS_PATH)/crds/"
build-agent: .buildinfo build-crds uboot-tools
docker buildx build \
--platform linux/amd64,linux/arm64 \
-f docker/ctl-agent.Dockerfile \
--build-arg BASE_IMAGE=$(CTL_BUILD_BASE_IMAGE) \
--build-arg VERSION=$(VERSION) \
-t $(CTL_IMAGE) \
--output type=image,push=true,registry.insecure=true .
build-local: .buildinfo build-crds uboot-tools
docker buildx build \
--platform linux/arm64 \
-f docker/ctl-agent.Dockerfile \
--build-arg BASE_IMAGE=$(CTL_BUILD_BASE_IMAGE) \
--build-arg VERSION=$(VERSION) \
--load \
-t localhost/monok8s/node-control:$(VERSION) .
push-agent: .buildinfo build-crds uboot-tools
test -n "$(IMAGE_REPOSITORY)"
docker buildx build \
--platform linux/amd64,linux/arm64 \
-f docker/ctl-agent.Dockerfile \
--build-arg BASE_IMAGE=$(CTL_BUILD_BASE_IMAGE) \
--build-arg VERSION=$(VERSION) \
-t $(IMAGE_REPOSITORY)/node-control:$(VERSION) \
--push .
run-agent:
docker run --rm \
-v "$$(pwd)/out:/work/out" \
$(CTL_IMAGE) \
agent --env-file /work/out/cluster.env
clean:
-docker image rm localhost/monok8s/node-control:$(VERSION) >/dev/null 2>&1 || true
rm -rf \
$(OUT_DIR)/crds \
$(BUILDINFO_FILE)
distclean: clean
rm -rf $(OUT_DIR)
dockerclean:
@echo "Removing tagged images..."
- docker rmi \
localhost/monok8s/ctl-build-base:$(VERSION) \
localhost/monok8s/node-control:$(VERSION) \
localhost/monok8s/crdgen:$(VERSION) \
2>/dev/null || true
@echo "Removing dangling build cache/images..."
- docker image prune -f
- docker builder prune -f
pkgclean:
rm -rf $(PACKAGES_DIR)
all: build-agent build-local
.PHONY: \
all clean dockerclean \
.buildinfo ensure-buildx ensure-registry \
build-crds build-local build-agent build-agent-local push-agent \
uboot-tools run-agent run-agent-local

View File

@@ -0,0 +1,55 @@
// +kubebuilder:object:generate=true
// +groupName=monok8s.io
package v1alpha1
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
)
var (
Group = "monok8s.io"
Version = "v1alpha1"
APIVersion = "monok8s.io/v1alpha1"
AltPartDeviceLink = "/dev/mksaltpart"
BootStateFile = "/run/monok8s/boot-state.env"
CatalogURL = "https://example.com/monok8s.io/v1alpha1/catalog.yaml"
NodeControlKey = "monok8s.io/node-control"
NodeControlName = "node-control"
ControllerName = "node-controller"
NodeAgentName = "node-agent"
EnvConfigDir = "/opt/monok8s/config"
Label = "monok8s.io/label"
MonoKSConfigCRD = "monoksconfigs.monok8s.io"
OSUpgradeCRD = "osupgrades.monok8s.io"
)
var (
SchemeGroupVersion = schema.GroupVersion{Group: Group, Version: Version}
SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes)
AddToScheme = SchemeBuilder.AddToScheme
)
func addKnownTypes(scheme *runtime.Scheme) error {
scheme.AddKnownTypes(SchemeGroupVersion,
&MonoKSConfig{},
&MonoKSConfigList{},
&OSUpgrade{},
&OSUpgradeList{},
&OSUpgradeProgress{},
&OSUpgradeProgressList{},
)
metav1.AddToGroupVersion(scheme, SchemeGroupVersion)
return nil
}
func NodeAgentLabels() map[string]string {
return map[string]string{
"app.kubernetes.io/name": NodeAgentName,
"app.kubernetes.io/component": "agent",
"app.kubernetes.io/part-of": "monok8s",
"app.kubernetes.io/managed-by": NodeControlName,
}
}

View File

@@ -0,0 +1,76 @@
package v1alpha1
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
)
type MonoKSConfig struct {
metav1.TypeMeta `json:",inline" yaml:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty" yaml:"metadata,omitempty"`
Spec MonoKSConfigSpec `json:"spec,omitempty" yaml:"spec,omitempty"`
Status *MonoKSConfigStatus `json:"status,omitempty" yaml:"status,omitempty"`
}
type MonoKSConfigList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitempty"`
Items []MonoKSConfig `json:"items"`
}
type MonoKSConfigSpec struct {
KubernetesVersion string `json:"kubernetesVersion,omitempty" yaml:"kubernetesVersion,omitempty"`
NodeName string `json:"nodeName,omitempty" yaml:"nodeName,omitempty"`
ClusterName string `json:"clusterName,omitempty" yaml:"clusterName,omitempty"`
ClusterDomain string `json:"clusterDomain,omitempty" yaml:"clusterDomain,omitempty"`
ClusterRole string `json:"clusterRole,omitempty" yaml:"clusterRole,omitempty"`
InitControlPlane bool `json:"initControlPlane,omitempty" yaml:"initControlPlane,omitempty"`
EnableNodeControl bool `json:"enableNodeControl,omitempty" yaml:"enableNodeControl,omitempty"`
PodSubnet string `json:"podSubnet,omitempty" yaml:"podSubnet,omitempty"`
ServiceSubnet string `json:"serviceSubnet,omitempty" yaml:"serviceSubnet,omitempty"`
APIServerAdvertiseAddress string `json:"apiServerAdvertiseAddress,omitempty" yaml:"apiServerAdvertiseAddress,omitempty"`
APIServerEndpoint string `json:"apiServerEndpoint,omitempty" yaml:"apiServerEndpoint,omitempty"`
ContainerRuntimeEndpoint string `json:"containerRuntimeEndpoint,omitempty" yaml:"containerRuntimeEndpoint,omitempty"`
BootstrapToken string `json:"bootstrapToken,omitempty" yaml:"bootstrapToken,omitempty"`
DiscoveryTokenCACertHash string `json:"discoveryTokenCACertHash,omitempty" yaml:"discoveryTokenCACertHash,omitempty"`
ControlPlaneCertKey string `json:"controlPlaneCertKey,omitempty" yaml:"controlPlaneCertKey,omitempty"`
CNIPlugin string `json:"cniPlugin,omitempty" yaml:"cniPlugin,omitempty"`
AllowSchedulingOnControlPlane bool `json:"allowSchedulingOnControlPlane,omitempty" yaml:"allowSchedulingOnControlPlane,omitempty"`
SkipImageCheck bool `json:"skipImageCheck,omitempty" yaml:"skipImageCheck,omitempty"`
KubeProxyNodePortAddresses []string `json:"kubeProxyNodePortAddresses,omitempty" yaml:"kubeProxyNodePortAddresses,omitempty"`
SubjectAltNames []string `json:"subjectAltNames,omitempty" yaml:"subjectAltNames,omitempty"`
NodeLabels map[string]string `json:"nodeLabels,omitempty" yaml:"nodeLabels,omitempty"`
Network NetworkSpec `json:"network,omitempty" yaml:"network,omitempty"`
}
type NetworkSpec struct {
Hostname string `json:"hostname,omitempty" yaml:"hostname,omitempty"`
ManagementIface string `json:"managementIface,omitempty" yaml:"managementIface,omitempty"`
ManagementCIDR string `json:"managementCIDR,omitempty" yaml:"managementCIDR,omitempty"`
ManagementGW string `json:"managementGateway,omitempty" yaml:"managementGateway,omitempty"`
DNSNameservers []string `json:"dnsNameservers,omitempty" yaml:"dnsNameservers,omitempty"`
DNSSearchDomains []string `json:"dnsSearchDomains,omitempty" yaml:"dnsSearchDomains,omitempty"`
}
type MonoKSConfigStatus struct {
Phase string `json:"phase,omitempty"`
ObservedGeneration int64 `json:"observedGeneration,omitempty"`
Conditions []metav1.Condition `json:"conditions,omitempty"`
AppliedSteps []string `json:"appliedSteps,omitempty"`
}
func (in *MonoKSConfig) DeepCopyObject() runtime.Object {
if in == nil {
return nil
}
out := *in
return &out
}
func (in *MonoKSConfigList) DeepCopyObject() runtime.Object {
if in == nil {
return nil
}
out := *in
return &out
}

View File

@@ -0,0 +1,171 @@
package v1alpha1
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
type OSUpgradePhase string
const (
OSUpgradePhasePending OSUpgradePhase = "Pending"
OSUpgradePhaseAccepted OSUpgradePhase = "Accepted"
OSUpgradePhaseRejected OSUpgradePhase = "Rejected"
)
type OSUpgradeProgressPhase string
const (
OSUpgradeProgressPhasePending OSUpgradeProgressPhase = "pending"
OSUpgradeProgressPhaseDownloading OSUpgradeProgressPhase = "downloading"
OSUpgradeProgressPhaseWriting OSUpgradeProgressPhase = "writing"
OSUpgradeProgressPhaseVerifying OSUpgradeProgressPhase = "verifying"
OSUpgradeProgressPhaseCompleted OSUpgradeProgressPhase = "completed"
OSUpgradeProgressPhaseFailed OSUpgradeProgressPhase = "failed"
OSUpgradeProgressPhaseRejected OSUpgradeProgressPhase = "rejected"
// Rebooting is the point-of-no-return phase.
//
// Once a node reaches Rebooting, the agent may have already changed the boot
// environment and requested a reboot. The controller must not supersede,
// retry, retarget, or otherwise mutate this progress object until the node
// comes back and the agent reports Completed or Failed.
OSUpgradeProgressPhaseRebooting OSUpgradeProgressPhase = "rebooting"
)
// +genclient
// +kubebuilder:object:root=true
// +kubebuilder:subresource:status
// +kubebuilder:resource:scope=Namespaced,shortName=osu
// +kubebuilder:printcolumn:name="Desired",type=string,JSONPath=`.spec.desiredVersion`
// +kubebuilder:printcolumn:name="Resolved",type=string,JSONPath=`.status.resolvedVersion`
// +kubebuilder:printcolumn:name="Phase",type=string,JSONPath=`.status.phase`
type OSUpgrade struct {
metav1.TypeMeta `json:",inline" yaml:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty" yaml:"metadata,omitempty"`
// Specification of the desired behavior of the OSUpgrade.
Spec OSUpgradeSpec `json:"spec,omitempty" yaml:"spec,omitempty"`
// Most recently observed status of the OSUpgrade.
Status *OSUpgradeStatus `json:"status,omitempty" yaml:"status,omitempty"`
}
// +kubebuilder:object:root=true
type OSUpgradeList struct {
metav1.TypeMeta `json:",inline" yaml:",inline"`
metav1.ListMeta `json:"metadata,omitempty" yaml:"metadata,omitempty"`
Items []OSUpgrade `json:"items" yaml:"items"`
}
type OSUpgradeSpec struct {
// +kubebuilder:validation:MinLength=1
DesiredVersion string `json:"desiredVersion,omitempty" yaml:"desiredVersion,omitempty"`
// +kubebuilder:validation:Enum=fast;balanced;safe
// +kubebuilder:default=balanced
// Profiles (TODO)
// safe - api-server can be responsive most of the time
// balanced - api-server can sometimes be unresponsive
// fast - disable throttling. Good for worker node.
FlashProfile string `json:"flashProfile,omitempty" yaml:"flashProfile,omitempty"`
Catalog *VersionCatalogSource `json:"catalog,omitempty" yaml:"catalog,omitempty"`
NodeSelector *metav1.LabelSelector `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty"`
}
type VersionCatalogSource struct {
URL string `json:"url,omitempty" yaml:"url,omitempty"`
Inline string `json:"inline,omitempty" yaml:"inline,omitempty"`
ConfigMap string `json:"configMapRef,omitempty" yaml:"configMapRef,omitempty"`
}
type OSUpgradeStatus struct {
Phase OSUpgradePhase `json:"phase,omitempty" yaml:"phase,omitempty"`
ResolvedVersion string `json:"resolvedVersion,omitempty" yaml:"resolvedVersion,omitempty"`
ObservedGeneration int64 `json:"observedGeneration,omitempty" yaml:"observedGeneration,omitempty"`
Conditions []metav1.Condition `json:"conditions,omitempty" yaml:"conditions,omitempty"`
Reason string `json:"reason,omitempty" yaml:"reason,omitempty"`
Message string `json:"message,omitempty" yaml:"message,omitempty"`
}
// +genclient
// +kubebuilder:object:root=true
// +kubebuilder:subresource:status
// +kubebuilder:resource:scope=Namespaced,shortName=osup
// +kubebuilder:printcolumn:name="Node",type=string,JSONPath=`.spec.nodeName`
// +kubebuilder:printcolumn:name="Source",type=string,JSONPath=`.spec.sourceRef.name`
// +kubebuilder:printcolumn:name="Current",type=string,JSONPath=`.status.currentVersion`
// +kubebuilder:printcolumn:name="Target",type=string,JSONPath=`.status.targetVersion`
// +kubebuilder:printcolumn:name="Phase",type=string,JSONPath=`.status.phase`
type OSUpgradeProgress struct {
metav1.TypeMeta `json:",inline" yaml:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty" yaml:"metadata,omitempty"`
// Specification of the desired behavior of the OSUpgradeProgress.
Spec OSUpgradeProgressSpec `json:"spec,omitempty" yaml:"spec,omitempty"`
// Most recently observed status of the OSUpgradeProgress.
Status *OSUpgradeProgressStatus `json:"status,omitempty" yaml:"status,omitempty"`
}
// +kubebuilder:object:root=true
type OSUpgradeProgressList struct {
metav1.TypeMeta `json:",inline" yaml:",inline"`
metav1.ListMeta `json:"metadata,omitempty" yaml:"metadata,omitempty"`
Items []OSUpgradeProgress `json:"items" yaml:"items"`
}
type OSUpgradeProgressSpec struct {
SourceRef OSUpgradeSourceRef `json:"sourceRef,omitempty" yaml:"sourceRef,omitempty"`
// RetryNonce triggers a retry when its value changes.
// Users can update this field (for example, set it to the current time)
// to request a retry of a failed OS upgrade.
RetryNonce string `json:"retryNonce,omitempty" yaml:"retryNonce,omitempty"`
NodeName string `json:"nodeName,omitempty" yaml:"nodeName,omitempty"`
}
type OSUpgradeSourceRef struct {
Name string `json:"name,omitempty" yaml:"name,omitempty"`
Namespace string `json:"namespace,omitempty" yaml:"namespace,omitempty"`
}
type OSUpgradeProgressStatus struct {
CurrentVersion string `json:"currentVersion,omitempty" yaml:"currentVersion,omitempty"`
TargetVersion string `json:"targetVersion,omitempty" yaml:"targetVersion,omitempty"`
Phase OSUpgradeProgressPhase `json:"phase,omitempty" yaml:"phase,omitempty"`
StartedAt *metav1.Time `json:"startedAt,omitempty" yaml:"startedAt,omitempty"`
CompletedAt *metav1.Time `json:"completedAt,omitempty" yaml:"completedAt,omitempty"`
LastUpdatedAt *metav1.Time `json:"lastUpdatedAt,omitempty" yaml:"lastUpdatedAt,omitempty"`
RetryCount int32 `json:"retryCount,omitempty" yaml:"retryCount,omitempty"`
InactivePartition string `json:"inactivePartition,omitempty" yaml:"inactivePartition,omitempty"`
FailureReason string `json:"failureReason,omitempty" yaml:"failureReason,omitempty"`
Message string `json:"message,omitempty" yaml:"message,omitempty"`
PlannedPath []string `json:"plannedPath,omitempty" yaml:"plannedPath,omitempty"`
CurrentStep int32 `json:"currentStep,omitempty" yaml:"currentStep,omitempty"`
CurrentFrom string `json:"currentFrom,omitempty" yaml:"currentFrom,omitempty"`
CurrentTo string `json:"currentTo,omitempty" yaml:"currentTo,omitempty"`
// ObservedRetryNonce records the last retryNonce value the agent accepted.
// When spec.retryNonce is changed by the user and differs from this value,
// the agent may retry a failed upgrade.
// +optional
ObservedRetryNonce string `json:"observedRetryNonce,omitempty"`
}
func (osu OSUpgrade) StatusPhase() string {
phase := ""
if osu.Status != nil {
phase = string(osu.Status.Phase)
}
return phase
}
func (osup OSUpgradeProgress) StatusPhase() string {
phase := ""
if osup.Status != nil {
phase = string(osup.Status.Phase)
}
return phase
}

View File

@@ -0,0 +1,395 @@
//go:build !ignore_autogenerated
/* MIT License */
// Code generated by controller-gen. DO NOT EDIT.
package v1alpha1
import (
"k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
)
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *MonoKSConfig) DeepCopyInto(out *MonoKSConfig) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
if in.Status != nil {
in, out := &in.Status, &out.Status
*out = new(MonoKSConfigStatus)
(*in).DeepCopyInto(*out)
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonoKSConfig.
func (in *MonoKSConfig) DeepCopy() *MonoKSConfig {
if in == nil {
return nil
}
out := new(MonoKSConfig)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *MonoKSConfigList) DeepCopyInto(out *MonoKSConfigList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]MonoKSConfig, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonoKSConfigList.
func (in *MonoKSConfigList) DeepCopy() *MonoKSConfigList {
if in == nil {
return nil
}
out := new(MonoKSConfigList)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *MonoKSConfigSpec) DeepCopyInto(out *MonoKSConfigSpec) {
*out = *in
if in.KubeProxyNodePortAddresses != nil {
in, out := &in.KubeProxyNodePortAddresses, &out.KubeProxyNodePortAddresses
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.SubjectAltNames != nil {
in, out := &in.SubjectAltNames, &out.SubjectAltNames
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.NodeLabels != nil {
in, out := &in.NodeLabels, &out.NodeLabels
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
in.Network.DeepCopyInto(&out.Network)
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonoKSConfigSpec.
func (in *MonoKSConfigSpec) DeepCopy() *MonoKSConfigSpec {
if in == nil {
return nil
}
out := new(MonoKSConfigSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *MonoKSConfigStatus) DeepCopyInto(out *MonoKSConfigStatus) {
*out = *in
if in.Conditions != nil {
in, out := &in.Conditions, &out.Conditions
*out = make([]v1.Condition, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.AppliedSteps != nil {
in, out := &in.AppliedSteps, &out.AppliedSteps
*out = make([]string, len(*in))
copy(*out, *in)
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonoKSConfigStatus.
func (in *MonoKSConfigStatus) DeepCopy() *MonoKSConfigStatus {
if in == nil {
return nil
}
out := new(MonoKSConfigStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *NetworkSpec) DeepCopyInto(out *NetworkSpec) {
*out = *in
if in.DNSNameservers != nil {
in, out := &in.DNSNameservers, &out.DNSNameservers
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.DNSSearchDomains != nil {
in, out := &in.DNSSearchDomains, &out.DNSSearchDomains
*out = make([]string, len(*in))
copy(*out, *in)
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkSpec.
func (in *NetworkSpec) DeepCopy() *NetworkSpec {
if in == nil {
return nil
}
out := new(NetworkSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *OSUpgrade) DeepCopyInto(out *OSUpgrade) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
if in.Status != nil {
in, out := &in.Status, &out.Status
*out = new(OSUpgradeStatus)
(*in).DeepCopyInto(*out)
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OSUpgrade.
func (in *OSUpgrade) DeepCopy() *OSUpgrade {
if in == nil {
return nil
}
out := new(OSUpgrade)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *OSUpgrade) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *OSUpgradeList) DeepCopyInto(out *OSUpgradeList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]OSUpgrade, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OSUpgradeList.
func (in *OSUpgradeList) DeepCopy() *OSUpgradeList {
if in == nil {
return nil
}
out := new(OSUpgradeList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *OSUpgradeList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *OSUpgradeProgress) DeepCopyInto(out *OSUpgradeProgress) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
out.Spec = in.Spec
if in.Status != nil {
in, out := &in.Status, &out.Status
*out = new(OSUpgradeProgressStatus)
(*in).DeepCopyInto(*out)
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OSUpgradeProgress.
func (in *OSUpgradeProgress) DeepCopy() *OSUpgradeProgress {
if in == nil {
return nil
}
out := new(OSUpgradeProgress)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *OSUpgradeProgress) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *OSUpgradeProgressList) DeepCopyInto(out *OSUpgradeProgressList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]OSUpgradeProgress, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OSUpgradeProgressList.
func (in *OSUpgradeProgressList) DeepCopy() *OSUpgradeProgressList {
if in == nil {
return nil
}
out := new(OSUpgradeProgressList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *OSUpgradeProgressList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *OSUpgradeProgressSpec) DeepCopyInto(out *OSUpgradeProgressSpec) {
*out = *in
out.SourceRef = in.SourceRef
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OSUpgradeProgressSpec.
func (in *OSUpgradeProgressSpec) DeepCopy() *OSUpgradeProgressSpec {
if in == nil {
return nil
}
out := new(OSUpgradeProgressSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *OSUpgradeProgressStatus) DeepCopyInto(out *OSUpgradeProgressStatus) {
*out = *in
if in.StartedAt != nil {
in, out := &in.StartedAt, &out.StartedAt
*out = (*in).DeepCopy()
}
if in.CompletedAt != nil {
in, out := &in.CompletedAt, &out.CompletedAt
*out = (*in).DeepCopy()
}
if in.LastUpdatedAt != nil {
in, out := &in.LastUpdatedAt, &out.LastUpdatedAt
*out = (*in).DeepCopy()
}
if in.PlannedPath != nil {
in, out := &in.PlannedPath, &out.PlannedPath
*out = make([]string, len(*in))
copy(*out, *in)
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OSUpgradeProgressStatus.
func (in *OSUpgradeProgressStatus) DeepCopy() *OSUpgradeProgressStatus {
if in == nil {
return nil
}
out := new(OSUpgradeProgressStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *OSUpgradeSourceRef) DeepCopyInto(out *OSUpgradeSourceRef) {
*out = *in
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OSUpgradeSourceRef.
func (in *OSUpgradeSourceRef) DeepCopy() *OSUpgradeSourceRef {
if in == nil {
return nil
}
out := new(OSUpgradeSourceRef)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *OSUpgradeSpec) DeepCopyInto(out *OSUpgradeSpec) {
*out = *in
if in.Catalog != nil {
in, out := &in.Catalog, &out.Catalog
*out = new(VersionCatalogSource)
**out = **in
}
if in.NodeSelector != nil {
in, out := &in.NodeSelector, &out.NodeSelector
*out = new(v1.LabelSelector)
(*in).DeepCopyInto(*out)
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OSUpgradeSpec.
func (in *OSUpgradeSpec) DeepCopy() *OSUpgradeSpec {
if in == nil {
return nil
}
out := new(OSUpgradeSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *OSUpgradeStatus) DeepCopyInto(out *OSUpgradeStatus) {
*out = *in
if in.Conditions != nil {
in, out := &in.Conditions, &out.Conditions
*out = make([]v1.Condition, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OSUpgradeStatus.
func (in *OSUpgradeStatus) DeepCopy() *OSUpgradeStatus {
if in == nil {
return nil
}
out := new(OSUpgradeStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *VersionCatalogSource) DeepCopyInto(out *VersionCatalogSource) {
*out = *in
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VersionCatalogSource.
func (in *VersionCatalogSource) DeepCopy() *VersionCatalogSource {
if in == nil {
return nil
}
out := new(VersionCatalogSource)
in.DeepCopyInto(out)
return out
}

View File

@@ -0,0 +1,178 @@
---
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
controller-gen.kubebuilder.io/version: v0.20.1
name: monoksconfigs.monok8s.io
spec:
group: monok8s.io
names:
kind: MonoKSConfig
listKind: MonoKSConfigList
plural: monoksconfigs
singular: monoksconfig
scope: Namespaced
versions:
- name: v1alpha1
schema:
openAPIV3Schema:
properties:
apiVersion:
description: |-
APIVersion defines the versioned schema of this representation of an object.
Servers should convert recognized schemas to the latest internal value, and
may reject unrecognized values.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
type: string
kind:
description: |-
Kind is a string value representing the REST resource this object represents.
Servers may infer this from the endpoint the client submits requests to.
Cannot be updated.
In CamelCase.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
type: string
metadata:
type: object
spec:
properties:
allowSchedulingOnControlPlane:
type: boolean
apiServerAdvertiseAddress:
type: string
apiServerEndpoint:
type: string
bootstrapToken:
type: string
clusterDomain:
type: string
clusterName:
type: string
clusterRole:
type: string
cniPlugin:
type: string
containerRuntimeEndpoint:
type: string
controlPlaneCertKey:
type: string
discoveryTokenCACertHash:
type: string
enableNodeControl:
type: boolean
initControlPlane:
type: boolean
kubeProxyNodePortAddresses:
items:
type: string
type: array
kubernetesVersion:
type: string
network:
properties:
dnsNameservers:
items:
type: string
type: array
dnsSearchDomains:
items:
type: string
type: array
hostname:
type: string
managementCIDR:
type: string
managementGateway:
type: string
managementIface:
type: string
type: object
nodeLabels:
additionalProperties:
type: string
type: object
nodeName:
type: string
podSubnet:
type: string
serviceSubnet:
type: string
skipImageCheck:
type: boolean
subjectAltNames:
items:
type: string
type: array
type: object
status:
properties:
appliedSteps:
items:
type: string
type: array
conditions:
items:
description: Condition contains details for one aspect of the current
state of this API Resource.
properties:
lastTransitionTime:
description: |-
lastTransitionTime is the last time the condition transitioned from one status to another.
This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable.
format: date-time
type: string
message:
description: |-
message is a human readable message indicating details about the transition.
This may be an empty string.
maxLength: 32768
type: string
observedGeneration:
description: |-
observedGeneration represents the .metadata.generation that the condition was set based upon.
For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date
with respect to the current state of the instance.
format: int64
minimum: 0
type: integer
reason:
description: |-
reason contains a programmatic identifier indicating the reason for the condition's last transition.
Producers of specific condition types may define expected values and meanings for this field,
and whether the values are considered a guaranteed API.
The value should be a CamelCase string.
This field may not be empty.
maxLength: 1024
minLength: 1
pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$
type: string
status:
description: status of the condition, one of True, False, Unknown.
enum:
- "True"
- "False"
- Unknown
type: string
type:
description: type of condition in CamelCase or in foo.example.com/CamelCase.
maxLength: 316
pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$
type: string
required:
- lastTransitionTime
- message
- reason
- status
- type
type: object
type: array
observedGeneration:
format: int64
type: integer
phase:
type: string
type: object
type: object
served: true
storage: true

View File

@@ -0,0 +1,124 @@
---
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
controller-gen.kubebuilder.io/version: v0.20.1
name: osupgradeprogresses.monok8s.io
spec:
group: monok8s.io
names:
kind: OSUpgradeProgress
listKind: OSUpgradeProgressList
plural: osupgradeprogresses
shortNames:
- osup
singular: osupgradeprogress
scope: Namespaced
versions:
- additionalPrinterColumns:
- jsonPath: .spec.nodeName
name: Node
type: string
- jsonPath: .spec.sourceRef.name
name: Source
type: string
- jsonPath: .status.currentVersion
name: Current
type: string
- jsonPath: .status.targetVersion
name: Target
type: string
- jsonPath: .status.phase
name: Phase
type: string
name: v1alpha1
schema:
openAPIV3Schema:
properties:
apiVersion:
description: |-
APIVersion defines the versioned schema of this representation of an object.
Servers should convert recognized schemas to the latest internal value, and
may reject unrecognized values.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
type: string
kind:
description: |-
Kind is a string value representing the REST resource this object represents.
Servers may infer this from the endpoint the client submits requests to.
Cannot be updated.
In CamelCase.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
type: string
metadata:
type: object
spec:
description: Specification of the desired behavior of the OSUpgradeProgress.
properties:
nodeName:
type: string
retryNonce:
description: |-
RetryNonce triggers a retry when its value changes.
Users can update this field (for example, set it to the current time)
to request a retry of a failed OS upgrade.
type: string
sourceRef:
properties:
name:
type: string
namespace:
type: string
type: object
type: object
status:
description: Most recently observed status of the OSUpgradeProgress.
properties:
completedAt:
format: date-time
type: string
currentFrom:
type: string
currentStep:
format: int32
type: integer
currentTo:
type: string
currentVersion:
type: string
failureReason:
type: string
inactivePartition:
type: string
lastUpdatedAt:
format: date-time
type: string
message:
type: string
observedRetryNonce:
description: |-
ObservedRetryNonce records the last retryNonce value the agent accepted.
When spec.retryNonce is changed by the user and differs from this value,
the agent may retry a failed upgrade.
type: string
phase:
type: string
plannedPath:
items:
type: string
type: array
retryCount:
format: int32
type: integer
startedAt:
format: date-time
type: string
targetVersion:
type: string
type: object
type: object
served: true
storage: true
subresources:
status: {}

View File

@@ -0,0 +1,202 @@
---
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
controller-gen.kubebuilder.io/version: v0.20.1
name: osupgrades.monok8s.io
spec:
group: monok8s.io
names:
kind: OSUpgrade
listKind: OSUpgradeList
plural: osupgrades
shortNames:
- osu
singular: osupgrade
scope: Namespaced
versions:
- additionalPrinterColumns:
- jsonPath: .spec.desiredVersion
name: Desired
type: string
- jsonPath: .status.resolvedVersion
name: Resolved
type: string
- jsonPath: .status.phase
name: Phase
type: string
name: v1alpha1
schema:
openAPIV3Schema:
properties:
apiVersion:
description: |-
APIVersion defines the versioned schema of this representation of an object.
Servers should convert recognized schemas to the latest internal value, and
may reject unrecognized values.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
type: string
kind:
description: |-
Kind is a string value representing the REST resource this object represents.
Servers may infer this from the endpoint the client submits requests to.
Cannot be updated.
In CamelCase.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
type: string
metadata:
type: object
spec:
description: Specification of the desired behavior of the OSUpgrade.
properties:
catalog:
properties:
configMapRef:
type: string
inline:
type: string
url:
type: string
type: object
desiredVersion:
minLength: 1
type: string
flashProfile:
default: balanced
description: |-
Profiles (TODO)
safe - api-server can be responsive most of the time
balanced - api-server can sometimes be unresponsive
fast - disable throttling. Good for worker node.
enum:
- fast
- balanced
- safe
type: string
nodeSelector:
description: |-
A label selector is a label query over a set of resources. The result of matchLabels and
matchExpressions are ANDed. An empty label selector matches all objects. A null
label selector matches no objects.
properties:
matchExpressions:
description: matchExpressions is a list of label selector requirements.
The requirements are ANDed.
items:
description: |-
A label selector requirement is a selector that contains values, a key, and an operator that
relates the key and values.
properties:
key:
description: key is the label key that the selector applies
to.
type: string
operator:
description: |-
operator represents a key's relationship to a set of values.
Valid operators are In, NotIn, Exists and DoesNotExist.
type: string
values:
description: |-
values is an array of string values. If the operator is In or NotIn,
the values array must be non-empty. If the operator is Exists or DoesNotExist,
the values array must be empty. This array is replaced during a strategic
merge patch.
items:
type: string
type: array
x-kubernetes-list-type: atomic
required:
- key
- operator
type: object
type: array
x-kubernetes-list-type: atomic
matchLabels:
additionalProperties:
type: string
description: |-
matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels
map is equivalent to an element of matchExpressions, whose key field is "key", the
operator is "In", and the values array contains only "value". The requirements are ANDed.
type: object
type: object
x-kubernetes-map-type: atomic
type: object
status:
description: Most recently observed status of the OSUpgrade.
properties:
conditions:
items:
description: Condition contains details for one aspect of the current
state of this API Resource.
properties:
lastTransitionTime:
description: |-
lastTransitionTime is the last time the condition transitioned from one status to another.
This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable.
format: date-time
type: string
message:
description: |-
message is a human readable message indicating details about the transition.
This may be an empty string.
maxLength: 32768
type: string
observedGeneration:
description: |-
observedGeneration represents the .metadata.generation that the condition was set based upon.
For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date
with respect to the current state of the instance.
format: int64
minimum: 0
type: integer
reason:
description: |-
reason contains a programmatic identifier indicating the reason for the condition's last transition.
Producers of specific condition types may define expected values and meanings for this field,
and whether the values are considered a guaranteed API.
The value should be a CamelCase string.
This field may not be empty.
maxLength: 1024
minLength: 1
pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$
type: string
status:
description: status of the condition, one of True, False, Unknown.
enum:
- "True"
- "False"
- Unknown
type: string
type:
description: type of condition in CamelCase or in foo.example.com/CamelCase.
maxLength: 316
pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$
type: string
required:
- lastTransitionTime
- message
- reason
- status
- type
type: object
type: array
message:
type: string
observedGeneration:
format: int64
type: integer
phase:
type: string
reason:
type: string
resolvedVersion:
type: string
type: object
type: object
served: true
storage: true
subresources:
status: {}

View File

@@ -0,0 +1,6 @@
package assets
import "embed"
//go:embed crds/*.yaml
var CRDs embed.FS

View File

@@ -0,0 +1,49 @@
package assets
import (
"fmt"
"io"
"path/filepath"
"sort"
)
func PrintCRDs(out io.Writer) error {
entries, err := CRDs.ReadDir("crds")
if err != nil {
return err
}
names := make([]string, 0, len(entries))
for _, entry := range entries {
if entry.IsDir() {
continue
}
if filepath.Ext(entry.Name()) != ".yaml" {
continue
}
names = append(names, entry.Name())
}
sort.Strings(names)
for _, name := range names {
b, err := CRDs.ReadFile("crds/" + name)
if err != nil {
return err
}
if _, err := fmt.Fprintln(out, "---"); err != nil {
return err
}
if _, err := out.Write(b); err != nil {
return err
}
if len(b) == 0 || b[len(b)-1] != '\n' {
if _, err := fmt.Fprintln(out); err != nil {
return err
}
}
}
return nil
}

View File

@@ -0,0 +1,65 @@
package bootstrap
import (
"fmt"
"example.com/monok8s/pkg/node"
"example.com/monok8s/pkg/node/uboot"
)
type Registry struct {
steps map[string]node.Step
}
func NewRegistry(ctx *node.NodeContext) *Registry {
netCfg := node.NetworkConfig{
Hostname: ctx.Config.Spec.Network.Hostname,
MgmtIface: ctx.Config.Spec.Network.ManagementIface,
MgmtAddress: ctx.Config.Spec.Network.ManagementCIDR,
MgmtGateway: ctx.Config.Spec.Network.ManagementGW,
DNSNameservers: ctx.Config.Spec.Network.DNSNameservers,
DNSSearchDomains: ctx.Config.Spec.Network.DNSSearchDomains,
}
return &Registry{
steps: map[string]node.Step{
"ApplyNodeControlDaemonSetResources": node.ApplyNodeControlDaemonSetResources,
"ApplyLocalNodeMetadataIfPossible": node.ApplyLocalNodeMetadataIfPossible,
"CheckForVersionSkew": node.CheckForVersionSkew,
"ClassifyBootstrapAction": node.ClassifyBootstrapAction,
"ConfigureABBoot": uboot.ConfigureABBoot,
"ConfigureDNS": node.ConfigureDNS(netCfg),
"ConfigureDefaultCNI": node.ConfigureDefaultCNI,
"ConfigureHostname": node.ConfigureHostname(netCfg),
"ConfigureMgmtInterface": node.ConfigureMgmtInterface(netCfg),
"ConfigureUBootCommands": uboot.ConfigureUBootCommands,
"DetectLocalClusterState": node.DetectLocalClusterState,
"EngageControlGate": node.EngageControlGate,
"EnsureIPForward": node.EnsureIPForward,
"MountAltImageStore": node.MountAltImageStore,
"ReconcileControlPlane": node.ReconcileControlPlane,
"ReconcileWorker": node.ReconcileWorker,
"ReleaseControlGate": node.ReleaseControlGate,
"RunKubeadmInit": node.RunKubeadmInit,
"RunKubeadmJoin": node.RunKubeadmJoin,
"RunKubeadmUpgradeApply": node.RunKubeadmUpgradeApply,
"RunKubeadmUpgradeNode": node.RunKubeadmUpgradeNode,
"StartCRIO": node.StartCRIO,
"UnmountAltImageStore": node.UnmountAltImageStore,
"ValidateNodeIPAndAPIServerReachability": node.ValidateNodeIPAndAPIServerReachability,
"ValidateRequiredImagesPresent": node.ValidateRequiredImagesPresent,
"WaitForExistingClusterIfNeeded": node.WaitForExistingClusterIfNeeded,
// Diagnostics
"DiagTestDiskWrite": node.DiagTestDiskWrite,
},
}
}
func (r *Registry) Get(name string) (node.Step, error) {
step, ok := r.steps[name]
if !ok {
return nil, fmt.Errorf("unknown step %q", name)
}
return step, nil
}

View File

@@ -0,0 +1,204 @@
package bootstrap
import (
"context"
"fmt"
monov1alpha1 "example.com/monok8s/pkg/apis/monok8s/v1alpha1"
"example.com/monok8s/pkg/node"
"example.com/monok8s/pkg/system"
"k8s.io/klog/v2"
)
type Runner struct {
NodeCtx *node.NodeContext
Registry *Registry
initSteps []StepInfo
}
type StepInfo struct {
RegKey string
Name string
Desc string
}
type StepSelection struct {
Indices []int // 1-based
}
func NewRunner(cfg *monov1alpha1.MonoKSConfig) *Runner {
runnerCfg := system.RunnerConfig{}
nctx := &node.NodeContext{
Config: cfg,
SystemRunner: system.NewRunner(runnerCfg),
}
return &Runner{
NodeCtx: nctx,
Registry: NewRegistry(nctx),
initSteps: []StepInfo{
{
RegKey: "ConfigureHostname",
Name: "Configure hostname",
Desc: "Set system hostname according to cluster configuration",
},
{
RegKey: "ConfigureMgmtInterface",
Name: "Configure management interface",
Desc: "Configure management network interface, IP address, and gateway",
},
{
RegKey: "ConfigureDNS",
Name: "Configure DNS",
Desc: "Set system DNS resolver configuration for cluster and external access",
},
{
RegKey: "EnsureIPForward",
Name: "Ensure IP forwarding",
Desc: "Enable kernel IP forwarding required for pod networking",
},
{
RegKey: "ConfigureDefaultCNI",
Name: "Configure default CNI",
Desc: "Install or configure default container networking (CNI bridge, IPAM, etc.)",
},
{
RegKey: "MountAltImageStore",
Name: "Mount alt image store for CRI-O. Needed for upgrade.",
Desc: "Will be unmount after kubeadm upgrade apply",
},
{
RegKey: "EngageControlGate",
Name: "Engage the control gate",
Desc: "Prevents agent watching resources prematurely",
},
{
RegKey: "StartCRIO",
Name: "Start CRI-O runtime",
Desc: "Start container runtime and verify it is ready for Kubernetes workloads",
},
{
RegKey: "ValidateRequiredImagesPresent",
Name: "Validate required images",
Desc: "Ensure all required Kubernetes images are present or available locally",
},
{
RegKey: "ValidateNodeIPAndAPIServerReachability",
Name: "Validate Node IP and wether API Server is available",
Desc: "Verify the local ip address with the api server advertise address. Contact remote api server",
},
{
RegKey: "DetectLocalClusterState",
Name: "Detect local cluster state",
Desc: "Inspect local node to determine existing Kubernetes membership and configuration",
},
{
RegKey: "ClassifyBootstrapAction",
Name: "Classify bootstrap action",
Desc: "Decide whether to init, join, upgrade, or reconcile based on local state and desired version",
},
{
RegKey: "RunKubeadmInit",
Name: "Run kubeadm init",
Desc: "Initialize a new Kubernetes control plane using kubeadm",
},
{
RegKey: "RunKubeadmJoin",
Name: "Run kubeadm join",
Desc: "Join node to existing cluster as worker or control-plane",
},
{
RegKey: "WaitForExistingClusterIfNeeded",
Name: "Wait for existing cluster",
Desc: "Block until control plane is reachable when joining or reconciling an existing cluster",
},
{
RegKey: "CheckForVersionSkew",
Name: "Check for version skew",
Desc: "Validate wether version satisfy the requirements againts current cluster if any",
},
{
RegKey: "ReconcileControlPlane",
Name: "Reconcile control plane",
Desc: "Ensure control plane components match desired state without full reinitialization",
},
{
RegKey: "ReconcileWorker",
Name: "Reconcile worker node",
Desc: "Reconcile the worker node",
},
{
RegKey: "RunKubeadmUpgradeApply",
Name: "Run kubeadm upgrade apply",
Desc: "Upgrade control plane components using kubeadm",
},
{
RegKey: "RunKubeadmUpgradeNode",
Name: "Run kubeadm upgrade node",
Desc: "Upgrade node components (kubelet, config) to match control plane",
},
{
RegKey: "UnmountAltImageStore",
Name: "Unmount alt image store",
Desc: "Rewrite CRIO storage.conf. Then restart CRIO. Then unmount.",
},
{
RegKey: "ApplyLocalNodeMetadataIfPossible",
Name: "Apply node metadata",
Desc: "Apply labels/annotations to the local node if API server is reachable",
},
{
RegKey: "ConfigureUBootCommands",
Name: "Ensure fw_env config and u-boot-tools availablilty",
Desc: "Install or generate /etc/fw_env.config for U-Boot environment access",
},
{
RegKey: "ConfigureABBoot",
Name: "Configure A/B booting environment",
Desc: "Make A/B booting possible",
},
{
RegKey: "ApplyNodeControlDaemonSetResources",
Name: "Apply daemonset for control agent",
Desc: "Control agent handles OSUpgrade resources",
},
{
RegKey: "ReleaseControlGate",
Name: "Release the control gate",
Desc: "Allow agent to start watching resources",
},
},
}
}
func (r *Runner) RunNamedStep(ctx context.Context, name string) error {
step, err := r.Registry.Get(name)
if err != nil {
return err
}
return step(ctx, r.NodeCtx)
}
func (r *Runner) InitSteps() []StepInfo {
return r.initSteps
}
func (r *Runner) Init(ctx context.Context) error {
for i, step := range r.initSteps {
if err := r.RunNamedStep(ctx, step.RegKey); err != nil {
return fmt.Errorf("step %d (%s): %w", i+1, step.Name, err)
}
}
klog.Info("All steps completed successfully")
return nil
}
func (r *Runner) InitSelected(ctx context.Context, sel StepSelection) error {
for _, idx := range sel.Indices {
step := r.initSteps[idx-1]
if err := r.RunNamedStep(ctx, step.RegKey); err != nil {
return fmt.Errorf("step %d (%s): %w", idx, step.Name, err)
}
}
return nil
}

View File

@@ -0,0 +1 @@
Use `make build` to generate the files. Do not modify.

View File

@@ -0,0 +1,30 @@
package catalog
import (
"encoding/hex"
"fmt"
"strings"
)
func (c *CatalogImage) SHA256() (string, error) {
if c.Checksum == "" {
return "", fmt.Errorf("checksum is empty")
}
const prefix = "sha256:"
if !strings.HasPrefix(c.Checksum, prefix) {
return "", fmt.Errorf("unsupported checksum format (expected sha256:...)")
}
hash := strings.TrimPrefix(c.Checksum, prefix)
if len(hash) != 64 {
return "", fmt.Errorf("invalid sha256 length: got %d, want 64", len(hash))
}
if _, err := hex.DecodeString(hash); err != nil {
return "", fmt.Errorf("invalid sha256 hex: %w", err)
}
return hash, nil
}

View File

@@ -0,0 +1,111 @@
package catalog
import (
"context"
"fmt"
"io"
"net/http"
"os"
"sigs.k8s.io/yaml"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
monov1alpha1 "example.com/monok8s/pkg/apis/monok8s/v1alpha1"
)
const (
DefaultCachePath = "/var/lib/monok8s/catalog.yaml"
)
// ResolveCatalog resolves catalog using priority:
// Inline > ConfigMap > URL > cached
func ResolveCatalog(ctx context.Context,
kubeClient kubernetes.Interface,
namespace string, src *monov1alpha1.VersionCatalogSource,
) (*VersionCatalog, error) {
// Inline
if src != nil && src.Inline != "" {
return parseCatalog([]byte(src.Inline))
}
// ConfigMap
if src != nil && src.ConfigMap != "" {
cm, err := kubeClient.CoreV1().ConfigMaps(namespace).Get(ctx, src.ConfigMap, metav1.GetOptions{})
if err != nil {
return nil, fmt.Errorf("get catalog configmap: %w", err)
}
data, ok := cm.Data["catalog.yaml"]
if !ok {
return nil, fmt.Errorf("configmap %s missing key catalog.yaml", src.ConfigMap)
}
return parseCatalog([]byte(data))
}
// URL
if src != nil && src.URL != "" {
cat, err := fetchCatalog(src.URL)
if err == nil {
_ = os.WriteFile(DefaultCachePath, mustMarshal(cat), 0644)
return cat, nil
}
// fallback to cache
if cached, err2 := loadCached(); err2 == nil {
return cached, nil
}
return nil, fmt.Errorf("fetch catalog failed and no cache: %w", err)
}
// fallback cache
if cached, err := loadCached(); err == nil {
return cached, nil
}
return nil, fmt.Errorf("no catalog source available")
}
func fetchCatalog(url string) (*VersionCatalog, error) {
resp, err := http.Get(url)
if err != nil {
return nil, err
}
defer resp.Body.Close()
if resp.StatusCode != 200 {
return nil, fmt.Errorf("http %d", resp.StatusCode)
}
body, err := io.ReadAll(resp.Body)
if err != nil {
return nil, err
}
return parseCatalog(body)
}
func loadCached() (*VersionCatalog, error) {
data, err := os.ReadFile(DefaultCachePath)
if err != nil {
return nil, err
}
return parseCatalog(data)
}
func parseCatalog(data []byte) (*VersionCatalog, error) {
var c VersionCatalog
if err := yaml.Unmarshal(data, &c); err != nil {
return nil, fmt.Errorf("parse catalog: %w", err)
}
return &c, nil
}
func mustMarshal(c *VersionCatalog) []byte {
b, _ := yaml.Marshal(c)
return b
}

View File

@@ -0,0 +1,15 @@
package catalog
type VersionCatalog struct {
Stable string `json:"stable" yaml:"stable"`
Images []CatalogImage `json:"images" yaml:"images"`
Blocked []string `json:"blocked,omitempty" yaml:"blocked,omitempty"`
}
type CatalogImage struct {
Version string `json:"version" yaml:"version"`
Patch int `json:"patch" yaml:"version"`
URL string `json:"url" yaml:"url"`
Checksum string `json:"checksum,omitempty" yaml:"checksum,omitempty"`
Size int64 `json:"size,omitempty" yaml:"size,omitempty"`
}

View File

@@ -0,0 +1,252 @@
package agent
import (
"context"
"fmt"
"os"
"path/filepath"
"time"
"github.com/spf13/cobra"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/watch"
"k8s.io/cli-runtime/pkg/genericclioptions"
"k8s.io/klog/v2"
monov1alpha1 "example.com/monok8s/pkg/apis/monok8s/v1alpha1"
mkscmd "example.com/monok8s/pkg/cmd"
osupgradeController "example.com/monok8s/pkg/controller/osupgrade"
"example.com/monok8s/pkg/kube"
"example.com/monok8s/pkg/templates"
)
func NewCmdAgent(flags *genericclioptions.ConfigFlags) *cobra.Command {
var envFile string
cmd := &cobra.Command{
Use: "agent --env-file path",
Short: "Watch OSUpgradeProgress resources for this node and process upgrades",
RunE: func(cmd *cobra.Command, _ []string) error {
ns, _, err := flags.ToRawKubeConfigLoader().Namespace()
if err != nil {
return err
}
if envFile == "" {
return fmt.Errorf("--env-file is required")
}
if err := mkscmd.LoadEnvFile(envFile); err != nil {
return fmt.Errorf("load env file %q: %w", envFile, err)
}
vals := templates.LoadTemplateValuesFromEnv()
rendered := templates.DefaultMonoKSConfig(vals)
cfg := &rendered
if cfg.Spec.NodeName == "" {
return fmt.Errorf("node name is empty in rendered config")
}
ctx := cmd.Context()
if err := waitForControlGate(ctx, envFile, 2*time.Second); err != nil {
return fmt.Errorf("wait for control gate to release: %w", err)
}
klog.InfoS("starting agent",
"node", cfg.Spec.NodeName,
"namespace", ns,
"envFile", envFile,
)
clients, err := kube.NewClients(flags)
if err != nil {
return fmt.Errorf("create kube clients: %w", err)
}
return runWatchLoop(ctx, clients, ns, cfg.Spec.NodeName)
},
}
cmd.Flags().StringVar(&envFile, "env-file", "", "path to env file containing MKS_* variables")
return cmd
}
func waitForControlGate(ctx context.Context, envFile string, pollInterval time.Duration) error {
dir := filepath.Dir(envFile)
marker := filepath.Join(dir, ".control-gate")
if pollInterval <= 0 {
pollInterval = 2 * time.Second
}
ticker := time.NewTicker(pollInterval)
defer ticker.Stop()
for {
_, err := os.Stat(marker)
if err == nil {
klog.InfoS("Control gate is present; waiting before starting watch loop", "path", marker)
} else if os.IsNotExist(err) {
klog.InfoS("Control gate not present; starting watch loop", "path", marker)
return nil
} else {
return fmt.Errorf("stat upgrade marker %s: %w", marker, err)
}
select {
case <-ctx.Done():
return ctx.Err()
case <-ticker.C:
}
}
}
func runWatchLoop(ctx context.Context, clients *kube.Clients, namespace, nodeName string) error {
var resourceVersion string
for {
if ctx.Err() != nil {
return ctx.Err()
}
err := watchOnce(ctx, clients, namespace, nodeName, &resourceVersion)
if err != nil {
if ctx.Err() != nil {
return ctx.Err()
}
klog.ErrorS(err, "watch failed; retrying",
"namespace", namespace,
"node", nodeName,
"resourceVersion", resourceVersion,
)
select {
case <-ctx.Done():
return ctx.Err()
case <-time.After(2 * time.Second):
}
continue
}
}
}
func watchOnce(
ctx context.Context,
clients *kube.Clients,
namespace string,
nodeName string,
resourceVersion *string,
) error {
list, err := clients.MonoKS.
Monok8sV1alpha1().
OSUpgradeProgresses(namespace).
List(ctx, metav1.ListOptions{})
if err != nil {
return fmt.Errorf("list osupgradeprogresses: %w", err)
}
for i := range list.Items {
item := &list.Items[i]
if !targetsNode(item, nodeName) {
continue
}
klog.InfoS("found existing osupgradeprogress",
"name", item.Name,
"node", nodeName,
"phase", item.StatusPhase(),
"resourceVersion", item.ResourceVersion,
)
if err := osupgradeController.HandleOSUpgradeProgress(ctx, clients, namespace, nodeName, item); err != nil {
klog.ErrorS(err, "failed to handle existing osupgradeprogress",
"name", item.Name,
"node", nodeName,
)
}
}
*resourceVersion = list.ResourceVersion
w, err := clients.MonoKS.
Monok8sV1alpha1().
OSUpgradeProgresses(namespace).
Watch(ctx, metav1.ListOptions{
ResourceVersion: *resourceVersion,
})
if err != nil {
return fmt.Errorf("watch osupgradeprogresses: %w", err)
}
defer w.Stop()
klog.InfoS("watching osupgradeprogresses",
"namespace", namespace,
"node", nodeName,
"resourceVersion", *resourceVersion,
)
for {
select {
case <-ctx.Done():
return ctx.Err()
case evt, ok := <-w.ResultChan():
if !ok {
return fmt.Errorf("watch channel closed")
}
switch evt.Type {
case watch.Bookmark:
obj, ok := evt.Object.(*monov1alpha1.OSUpgradeProgress)
if ok && obj != nil && obj.ResourceVersion != "" {
*resourceVersion = obj.ResourceVersion
}
continue
case watch.Error:
return fmt.Errorf("watch returned error event")
}
osup, ok := evt.Object.(*monov1alpha1.OSUpgradeProgress)
if !ok {
klog.V(1).InfoS("skipping unexpected watch object type",
"type", fmt.Sprintf("%T", evt.Object),
)
continue
}
if osup.ResourceVersion != "" {
*resourceVersion = osup.ResourceVersion
}
if !targetsNode(osup, nodeName) {
continue
}
klog.V(4).InfoS("received osupgradeprogress event",
"name", osup.Name,
"node", nodeName,
"phase", osup.StatusPhase(),
"eventType", evt.Type,
"resourceVersion", osup.ResourceVersion,
)
if err := osupgradeController.HandleOSUpgradeProgress(ctx, clients, namespace, nodeName, osup); err != nil {
klog.ErrorS(err, "failed to handle osupgradeprogress",
"name", osup.Name,
"node", nodeName,
"eventType", evt.Type,
)
}
}
}
}
func targetsNode(osup *monov1alpha1.OSUpgradeProgress, nodeName string) bool {
if osup == nil {
return false
}
return osup.Spec.NodeName == nodeName
}

View File

@@ -0,0 +1,30 @@
package checkconfig
import (
"fmt"
"example.com/monok8s/pkg/config"
"github.com/spf13/cobra"
)
func NewCmdCheckConfig() *cobra.Command {
var configPath string
cmd := &cobra.Command{
Use: "checkconfig",
Short: "Validate a MonoKSConfig",
RunE: func(cmd *cobra.Command, _ []string) error {
path, err := (config.Loader{}).ResolvePath(configPath)
if err != nil {
return err
}
cfg, err := (config.Loader{}).Load(path)
if err != nil {
return err
}
fmt.Fprintf(cmd.OutOrStdout(), "OK: %s (%s / %s)\n", path, cfg.Spec.NodeName, cfg.Spec.KubernetesVersion)
return nil
},
}
cmd.Flags().StringVarP(&configPath, "config", "c", "", "path to MonoKSConfig yaml")
return cmd
}

View File

@@ -0,0 +1,217 @@
package controller
import (
"context"
"errors"
"fmt"
"net"
"net/http"
"os"
"time"
"github.com/spf13/cobra"
"k8s.io/cli-runtime/pkg/genericclioptions"
"k8s.io/klog/v2"
mkscontroller "example.com/monok8s/pkg/controller"
osupgradectrl "example.com/monok8s/pkg/controller/osupgrade"
"example.com/monok8s/pkg/kube"
)
type ServerConfig struct {
Namespace string `json:"namespace,omitempty"`
TLSCertFile string `json:"tlsCertFile,omitempty"`
TLSPrivateKeyFile string `json:"tlsPrivateKeyFile,omitempty"`
}
func NewCmdController(flags *genericclioptions.ConfigFlags) *cobra.Command {
var conf ServerConfig
cmd := &cobra.Command{
Use: "controller",
Short: "Start a controller that handles OSUpgrade resources",
RunE: func(cmd *cobra.Command, _ []string) error {
ns, _, err := flags.ToRawKubeConfigLoader().Namespace()
if err != nil {
return err
}
conf.Namespace = ns
ctx := cmd.Context()
klog.InfoS("starting controller", "namespace", conf.Namespace)
clients, err := kube.NewClients(flags)
if err != nil {
return err
}
ctx, cancel := context.WithCancel(ctx)
defer cancel()
httpErrCh := make(chan error, 1)
watchErrCh := make(chan error, 1)
go func() {
klog.InfoS("starting OSUpgrade watch loop", "namespace", conf.Namespace)
watchErrCh <- osupgradectrl.Watch(ctx, clients, conf.Namespace)
}()
go func() {
httpErrCh <- listenAndServe(ctx, clients, conf)
}()
select {
case <-ctx.Done():
klog.InfoS("controller context canceled")
return ctx.Err()
case err := <-watchErrCh:
if err != nil && !errors.Is(err, context.Canceled) {
cancel()
return err
}
cancel()
return nil
case err := <-httpErrCh:
if err != nil && !errors.Is(err, context.Canceled) {
cancel()
return err
}
cancel()
return nil
}
},
}
cmd.Flags().StringVar(&conf.TLSCertFile, "tls-cert-file", conf.TLSCertFile,
"File containing x509 Certificate used for serving HTTPS (with intermediate certs, if any, concatenated after server cert).")
cmd.Flags().StringVar(&conf.TLSPrivateKeyFile, "tls-private-key-file", conf.TLSPrivateKeyFile,
"File containing x509 private key matching --tls-cert-file.")
return cmd
}
func listenAndServe(ctx context.Context, clients *kube.Clients, conf ServerConfig) error {
nodeName := os.Getenv("NODE_NAME")
controllerServer := mkscontroller.NewServer(ctx, clients, conf.Namespace, nodeName)
healthMux := http.NewServeMux()
healthMux.HandleFunc("/healthz", func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusOK)
_, _ = w.Write([]byte("ok\n"))
})
healthMux.HandleFunc("/readyz", func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusOK)
_, _ = w.Write([]byte("ok\n"))
})
healthAddr := net.JoinHostPort("", "8080")
controllerAddr := net.JoinHostPort("", "8443")
healthHTTPServer := &http.Server{
Addr: healthAddr,
Handler: healthMux,
IdleTimeout: 90 * time.Second,
ReadTimeout: 10 * time.Second,
WriteTimeout: 10 * time.Second,
MaxHeaderBytes: 1 << 20,
}
controllerHTTPServer := &http.Server{
Addr: controllerAddr,
Handler: controllerServer,
IdleTimeout: 90 * time.Second,
ReadTimeout: 4 * time.Minute,
WriteTimeout: 4 * time.Minute,
MaxHeaderBytes: 1 << 20,
}
serverErrCh := make(chan error, 2)
go func() {
klog.InfoS("starting health HTTP server", "addr", healthAddr)
err := healthHTTPServer.ListenAndServe()
if err != nil && !errors.Is(err, http.ErrServerClosed) {
serverErrCh <- fmt.Errorf("health HTTP server: %w", err)
return
}
serverErrCh <- nil
}()
go func() {
if conf.TLSCertFile != "" {
klog.InfoS("starting controller HTTPS server",
"addr", controllerAddr,
"certFile", conf.TLSCertFile,
"keyFile", conf.TLSPrivateKeyFile,
)
err := controllerHTTPServer.ListenAndServeTLS(conf.TLSCertFile, conf.TLSPrivateKeyFile)
if err != nil && !errors.Is(err, http.ErrServerClosed) {
serverErrCh <- fmt.Errorf("controller HTTPS server: %w", err)
return
}
serverErrCh <- nil
return
}
klog.InfoS("starting controller HTTP server", "addr", controllerAddr)
err := controllerHTTPServer.ListenAndServe()
if err != nil && !errors.Is(err, http.ErrServerClosed) {
serverErrCh <- fmt.Errorf("controller HTTP server: %w", err)
return
}
serverErrCh <- nil
}()
select {
case <-ctx.Done():
klog.InfoS("shutting down HTTP servers",
"healthAddr", healthAddr,
"controllerAddr", controllerAddr,
)
shutdownCtx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
var errs []error
if err := healthHTTPServer.Shutdown(shutdownCtx); err != nil {
errs = append(errs, fmt.Errorf("shutdown health HTTP server: %w", err))
}
if err := controllerHTTPServer.Shutdown(shutdownCtx); err != nil {
errs = append(errs, fmt.Errorf("shutdown controller HTTP server: %w", err))
}
for i := 0; i < 2; i++ {
if err := <-serverErrCh; err != nil {
errs = append(errs, err)
}
}
if len(errs) > 0 {
return errors.Join(errs...)
}
return context.Canceled
case err := <-serverErrCh:
if err != nil {
klog.ErrorS(err, "HTTP server failed")
return err
}
// One server exited cleanly unexpectedly. Treat that as failure because
// the process should keep both servers alive until ctx is canceled.
return fmt.Errorf("HTTP server exited unexpectedly")
}
}

View File

@@ -0,0 +1,186 @@
package create
import (
"bytes"
"fmt"
"github.com/spf13/cobra"
"k8s.io/cli-runtime/pkg/genericclioptions"
"os"
"strings"
assets "example.com/monok8s/pkg/assets"
render "example.com/monok8s/pkg/render"
)
func NewCmdCreate(flags *genericclioptions.ConfigFlags) *cobra.Command {
cmd := &cobra.Command{Use: "create", Short: "Create starter resources"}
cmd.AddCommand(
&cobra.Command{
Use: "config",
Short: "Print a MonoKSConfig template",
RunE: func(cmd *cobra.Command, _ []string) error {
out, err := render.RenderMonoKSConfig()
if err != nil {
return err
}
_, err = fmt.Fprint(cmd.OutOrStdout(), out)
return err
},
},
&cobra.Command{
Use: "osupgrade",
Short: "Print an OSUpgrade template",
RunE: func(cmd *cobra.Command, _ []string) error {
ns, _, err := flags.ToRawKubeConfigLoader().Namespace()
if err != nil {
return err
}
out, err := render.RenderOSUpgrade(ns)
if err != nil {
return err
}
_, err = fmt.Fprint(cmd.OutOrStdout(), out)
return err
},
},
&cobra.Command{
Use: "crds",
Short: "Print the bundled CRDs",
RunE: func(cmd *cobra.Command, _ []string) error {
return assets.PrintCRDs(cmd.OutOrStdout())
},
},
)
var authorizedKeysPath string
sshdcmd := cobra.Command{
Use: "sshd",
Short: "Print sshd deployments template",
RunE: func(cmd *cobra.Command, _ []string) error {
ns, _, err := flags.ToRawKubeConfigLoader().Namespace()
if err != nil {
return err
}
authorizedKeys, err := readAuthorizedKeysFile(authorizedKeysPath)
if err != nil {
return err
}
out, err := render.RenderSSHDDeployments(ns, authorizedKeys)
if err != nil {
return err
}
_, err = fmt.Fprint(cmd.OutOrStdout(), out)
return err
},
}
sshdcmd.Flags().StringVar(&authorizedKeysPath, "authkeys", "", "path to authorized_keys file")
cmd.AddCommand(&sshdcmd)
cconf := render.ControllerConf{}
controllercmd := cobra.Command{
Use: "controller",
Short: "Print controller deployments template",
RunE: func(cmd *cobra.Command, _ []string) error {
if len(cconf.ImagePullSecrets) > 0 && strings.TrimSpace(cconf.Image) == "" {
return fmt.Errorf("--image-pull-secret requires --image")
}
ns, _, err := flags.ToRawKubeConfigLoader().Namespace()
if err != nil {
return err
}
cconf.Namespace = ns
out, err := render.RenderControllerDeployments(cconf)
if err != nil {
return err
}
_, err = fmt.Fprint(cmd.OutOrStdout(), out)
return err
},
}
controllercmd.Flags().StringVar(
&cconf.Image,
"image",
"",
"Controller image, including optional registry and tag",
)
controllercmd.Flags().StringSliceVar(
&cconf.ImagePullSecrets,
"image-pull-secret",
nil,
"Image pull secret name for the agent image; may be specified multiple times or as a comma-separated list",
)
cmd.AddCommand(&controllercmd)
aconf := render.AgentConf{}
agentcmd := cobra.Command{
Use: "agent",
Short: "Print agent daemonsets template",
RunE: func(cmd *cobra.Command, _ []string) error {
if len(aconf.ImagePullSecrets) > 0 && strings.TrimSpace(aconf.Image) == "" {
return fmt.Errorf("--image-pull-secret requires --image")
}
ns, _, err := flags.ToRawKubeConfigLoader().Namespace()
if err != nil {
return err
}
aconf.Namespace = ns
out, err := render.RenderAgentDaemonSets(aconf)
if err != nil {
return err
}
_, err = fmt.Fprint(cmd.OutOrStdout(), out)
return err
},
}
agentcmd.Flags().StringVar(
&aconf.Image,
"image",
"",
"Agent image, including optional registry and tag",
)
agentcmd.Flags().StringSliceVar(
&aconf.ImagePullSecrets,
"image-pull-secret",
nil,
"Image pull secret name for the agent image; may be specified multiple times or as a comma-separated list",
)
cmd.AddCommand(&agentcmd)
return cmd
}
func readAuthorizedKeysFile(path string) (string, error) {
if path == "" {
return "", fmt.Errorf("--authkeys is required")
}
b, err := os.ReadFile(path)
if err != nil {
return "", fmt.Errorf("read authorized_keys file %q: %w", path, err)
}
if len(bytes.TrimSpace(b)) == 0 {
return "", fmt.Errorf("authorized_keys file %q is empty", path)
}
return string(b), nil
}

View File

@@ -0,0 +1,57 @@
package cmd
import (
"bufio"
"fmt"
"os"
"strings"
)
func LoadEnvFile(path string) error {
f, err := os.Open(path)
if err != nil {
return err
}
defer f.Close()
scanner := bufio.NewScanner(f)
lineNum := 0
for scanner.Scan() {
lineNum++
line := strings.TrimSpace(scanner.Text())
if line == "" || strings.HasPrefix(line, "#") {
continue
}
key, val, ok := strings.Cut(line, "=")
if !ok {
return fmt.Errorf("line %d: expected KEY=VALUE", lineNum)
}
key = strings.TrimSpace(key)
val = strings.TrimSpace(val)
if key == "" {
return fmt.Errorf("line %d: empty variable name", lineNum)
}
// Remove matching single or double quotes around the whole value.
if len(val) >= 2 {
if (val[0] == '"' && val[len(val)-1] == '"') || (val[0] == '\'' && val[len(val)-1] == '\'') {
val = val[1 : len(val)-1]
}
}
if err := os.Setenv(key, val); err != nil {
return fmt.Errorf("line %d: set %q: %w", lineNum, key, err)
}
}
if err := scanner.Err(); err != nil {
return err
}
return nil
}

View File

@@ -0,0 +1,229 @@
package initcmd
import (
"fmt"
"sort"
"strconv"
"strings"
"github.com/spf13/cobra"
"k8s.io/cli-runtime/pkg/genericclioptions"
"k8s.io/klog/v2"
"example.com/monok8s/pkg/bootstrap"
"example.com/monok8s/pkg/config"
monov1alpha1 "example.com/monok8s/pkg/apis/monok8s/v1alpha1"
mkscmd "example.com/monok8s/pkg/cmd"
"example.com/monok8s/pkg/templates"
)
func NewCmdInit(_ *genericclioptions.ConfigFlags) *cobra.Command {
var configPath string
var envFile string
cmd := &cobra.Command{
Use: "init [list|STEPSEL] [--config path | --env-file path]",
Short: "Bootstrap this node (from config file or env file)",
Long: `Run the node bootstrap process.
You can provide configuration in two ways:
--config PATH Load MonoKSConfig YAML
--env-file PATH Load MKS_* variables from env file and render config
STEPSEL allows running specific steps instead of the full sequence.
Supported formats:
3 Run step 3
1-3 Run steps 1 through 3
-3 Run steps from start through 3
3- Run steps from 3 to the end
1,3,5 Run specific steps
9-10,15 Combine ranges and individual steps
`,
Example: `
# Run full bootstrap using config file
ctl init --config /etc/monok8s/config.yaml
# Run full bootstrap using env file
ctl init --env-file /opt/monok8s/config/cluster.env
# List steps
ctl init list
# Run selected steps
ctl init 1-3 --env-file /opt/monok8s/config/cluster.env
ctl init 3- --config /etc/monok8s/config.yaml
`,
Args: cobra.MaximumNArgs(1),
RunE: func(cmd *cobra.Command, args []string) error {
if strings.TrimSpace(configPath) != "" && strings.TrimSpace(envFile) != "" {
return fmt.Errorf("--config and --env-file are mutually exclusive")
}
if strings.TrimSpace(envFile) != "" {
if err := mkscmd.LoadEnvFile(envFile); err != nil {
return fmt.Errorf("load env file %q: %w", envFile, err)
}
}
var cfg *monov1alpha1.MonoKSConfig
switch {
case strings.TrimSpace(envFile) != "":
if err := mkscmd.LoadEnvFile(envFile); err != nil {
return fmt.Errorf("load env file %q: %w", envFile, err)
}
vals := templates.LoadTemplateValuesFromEnv()
rendered := templates.DefaultMonoKSConfig(vals)
cfg = &rendered
klog.InfoS("starting init", "node", cfg.Spec.NodeName, "envFile", envFile)
default:
path, err := (config.Loader{}).ResolvePath(configPath)
if err != nil {
return err
}
loaded, err := (config.Loader{}).Load(path)
if err != nil {
return err
}
cfg = loaded
klog.InfoS("starting init", "node", cfg.Spec.NodeName, "config", path)
}
runner := bootstrap.NewRunner(cfg)
if len(args) == 1 && strings.EqualFold(strings.TrimSpace(args[0]), "list") {
steps := runner.InitSteps()
fmt.Fprintln(cmd.OutOrStdout(), "Showing current bootstrap sequence")
width := len(fmt.Sprintf("%d", len(steps)))
for i, s := range steps {
fmt.Fprintf(cmd.OutOrStdout(), "\n %*d. %s\n", width, i+1, s.Name)
fmt.Fprintf(cmd.OutOrStdout(), " %s\n", s.Desc)
}
return nil
}
if len(args) == 0 {
return runner.Init(cmd.Context())
}
steps := runner.InitSteps()
sel, err := parseStepSelection(args[0], len(steps))
if err != nil {
return err
}
klog.InfoS("Running selected init steps", "steps", sel.Indices)
return runner.InitSelected(cmd.Context(), sel)
},
}
cmd.Flags().StringVarP(&configPath, "config", "c", "", "path to MonoKSConfig yaml")
cmd.Flags().StringVar(&envFile, "env-file", "", "path to env file containing MKS_* variables")
return cmd
}
func parseStepSelection(raw string, max int) (bootstrap.StepSelection, error) {
raw = strings.TrimSpace(raw)
if raw == "" {
return bootstrap.StepSelection{}, fmt.Errorf("empty step selection")
}
if max <= 0 {
return bootstrap.StepSelection{}, fmt.Errorf("no steps available")
}
selected := map[int]struct{}{}
for _, item := range strings.Split(raw, ",") {
item = strings.TrimSpace(item)
if item == "" {
return bootstrap.StepSelection{}, fmt.Errorf("invalid empty selector in %q", raw)
}
// Range or open-ended range
if strings.Contains(item, "-") {
if strings.Count(item, "-") != 1 {
return bootstrap.StepSelection{}, fmt.Errorf("invalid range %q", item)
}
parts := strings.SplitN(item, "-", 2)
left := strings.TrimSpace(parts[0])
right := strings.TrimSpace(parts[1])
var start, end int
switch {
case left == "" && right == "":
return bootstrap.StepSelection{}, fmt.Errorf("invalid range %q", item)
case left == "":
n, err := parseStepNumber(right, max)
if err != nil {
return bootstrap.StepSelection{}, err
}
start, end = 1, n
case right == "":
n, err := parseStepNumber(left, max)
if err != nil {
return bootstrap.StepSelection{}, err
}
start, end = n, max
default:
a, err := parseStepNumber(left, max)
if err != nil {
return bootstrap.StepSelection{}, err
}
b, err := parseStepNumber(right, max)
if err != nil {
return bootstrap.StepSelection{}, err
}
if a > b {
return bootstrap.StepSelection{}, fmt.Errorf("invalid descending range %q", item)
}
start, end = a, b
}
for i := start; i <= end; i++ {
selected[i] = struct{}{}
}
continue
}
// Single step
n, err := parseStepNumber(item, max)
if err != nil {
return bootstrap.StepSelection{}, err
}
selected[n] = struct{}{}
}
indices := make([]int, 0, len(selected))
for n := range selected {
indices = append(indices, n)
}
sort.Ints(indices)
return bootstrap.StepSelection{Indices: indices}, nil
}
func parseStepNumber(raw string, max int) (int, error) {
n, err := strconv.Atoi(strings.TrimSpace(raw))
if err != nil {
return 0, fmt.Errorf("invalid step number %q", raw)
}
if n < 1 || n > max {
return 0, fmt.Errorf("step number %d out of range 1-%d", n, max)
}
return n, nil
}

View File

@@ -0,0 +1,89 @@
package internal
import (
"fmt"
"os"
"strings"
"time"
"github.com/spf13/cobra"
"k8s.io/klog/v2"
"example.com/monok8s/pkg/system"
)
func newInternalFWPrintEnvCmd() *cobra.Command {
var key string
var configPath string
cmd := &cobra.Command{
Use: "fw-printenv",
Short: "Run fw_printenv",
RunE: func(cmd *cobra.Command, args []string) error {
ctx := cmd.Context()
key = strings.TrimSpace(key)
configPath = strings.TrimSpace(configPath)
if configPath == "" {
configPath = defaultFWEnvConfigPath
}
if _, err := os.Stat(configPath); err != nil {
return fmt.Errorf("stat fw env config %q: %w", configPath, err)
}
runner := system.NewRunner(system.RunnerConfig{
DefaultTimeout: 15 * time.Second,
StreamOutput: false,
Logger: &system.StdLogger{},
})
runArgs := []string{"-c", configPath}
if key != "" {
runArgs = append(runArgs, key)
}
res, err := runner.RunWithOptions(
ctx,
"fw_printenv",
runArgs,
system.RunOptions{
Quiet: true,
},
)
if err != nil {
if res != nil {
klog.ErrorS(err, "fw_printenv failed",
"key", key,
"stdout", strings.TrimSpace(res.Stdout),
"stderr", strings.TrimSpace(res.Stderr),
)
}
return err
}
stdout := strings.TrimSpace(res.Stdout)
stderr := strings.TrimSpace(res.Stderr)
if stdout != "" {
fmt.Println(stdout)
}
if stderr != "" {
klog.InfoS("fw_printenv stderr", "output", stderr)
}
klog.InfoS("fw_printenv succeeded",
"key", key,
"configPath", configPath,
)
return nil
},
}
cmd.Flags().StringVar(&key, "key", "", "U-Boot environment variable name to print")
cmd.Flags().StringVar(&configPath, "config", defaultFWEnvConfigPath, "Path to fw_env.config")
return cmd
}

View File

@@ -0,0 +1,115 @@
package internal
import (
"fmt"
"os"
"strings"
"time"
"github.com/spf13/cobra"
"k8s.io/klog/v2"
"example.com/monok8s/pkg/system"
)
const defaultFWEnvConfigPath = "/host/etc/fw_env.config"
func newInternalFWSetEnvCmd() *cobra.Command {
var key string
var value string
var configPath string
cmd := &cobra.Command{
Use: "fw-setenv",
Short: "Run fw_setenv",
RunE: func(cmd *cobra.Command, args []string) error {
ctx := cmd.Context()
key = strings.TrimSpace(key)
value = strings.TrimSpace(value)
configPath = strings.TrimSpace(configPath)
if key == "" {
return fmt.Errorf("--key is required")
}
if value == "" {
return fmt.Errorf("--value is required")
}
if configPath == "" {
configPath = defaultFWEnvConfigPath
}
if _, err := os.Stat(configPath); err != nil {
return fmt.Errorf("stat fw env config %q: %w", configPath, err)
}
runner := system.NewRunner(system.RunnerConfig{
DefaultTimeout: 15 * time.Second,
StreamOutput: false,
Logger: &system.StdLogger{},
})
// Preflight first so failure is clearer than blindly writing.
preflightRes, err := runner.RunWithOptions(
ctx,
"fw_printenv",
[]string{"-c", configPath},
system.RunOptions{
Quiet: true,
},
)
if err != nil {
if preflightRes != nil {
klog.ErrorS(err, "fw_printenv preflight failed",
"stdout", strings.TrimSpace(preflightRes.Stdout),
"stderr", strings.TrimSpace(preflightRes.Stderr),
)
}
return fmt.Errorf("fw_printenv preflight: %w", err)
}
res, err := runner.RunWithOptions(
ctx,
"fw_setenv",
[]string{
"-c", configPath,
key, value,
},
system.RunOptions{
Quiet: true,
},
)
if err != nil {
if res != nil {
klog.ErrorS(err, "fw_setenv failed",
"key", key,
"value", value,
"stdout", strings.TrimSpace(res.Stdout),
"stderr", strings.TrimSpace(res.Stderr),
)
}
return err
}
if strings.TrimSpace(res.Stdout) != "" {
klog.InfoS("fw_setenv stdout", "output", strings.TrimSpace(res.Stdout))
}
if strings.TrimSpace(res.Stderr) != "" {
klog.InfoS("fw_setenv stderr", "output", strings.TrimSpace(res.Stderr))
}
klog.InfoS("fw_setenv succeeded",
"key", key,
"value", value,
"configPath", configPath,
)
return nil
},
}
cmd.Flags().StringVar(&key, "key", "", "U-Boot environment variable name")
cmd.Flags().StringVar(&value, "value", "", "U-Boot environment variable value")
cmd.Flags().StringVar(&configPath, "config", defaultFWEnvConfigPath, "Path to fw_env.config")
return cmd
}

View File

@@ -0,0 +1,41 @@
package internal
import (
"example.com/monok8s/pkg/bootstrap"
"example.com/monok8s/pkg/config"
"github.com/spf13/cobra"
)
func NewCmdInternal() *cobra.Command {
var configPath string
cmd := &cobra.Command{Use: "internal", Hidden: true}
cmd.AddCommand(&cobra.Command{
Use: "run-step STEP",
Short: "Run one internal step for testing",
Args: cobra.ExactArgs(1),
RunE: func(cmd *cobra.Command, args []string) error {
path, err := (config.Loader{}).ResolvePath(configPath)
if err != nil {
return err
}
cfg, err := (config.Loader{}).Load(path)
if err != nil {
return err
}
err = bootstrap.NewRunner(cfg).RunNamedStep(cmd.Context(), args[0])
if err != nil {
return err
}
cmd.Println("OK")
return nil
},
})
cmd.AddCommand(newInternalFWSetEnvCmd())
cmd.AddCommand(newInternalFWPrintEnvCmd())
cmd.PersistentFlags().StringVarP(&configPath, "config", "c", "", "path to MonoKSConfig yaml")
return cmd
}

View File

@@ -0,0 +1,60 @@
package root
import (
"flag"
"os"
agentcmd "example.com/monok8s/pkg/cmd/agent"
checkconfigcmd "example.com/monok8s/pkg/cmd/checkconfig"
controllercmd "example.com/monok8s/pkg/cmd/controller"
createcmd "example.com/monok8s/pkg/cmd/create"
initcmd "example.com/monok8s/pkg/cmd/initcmd"
internalcmd "example.com/monok8s/pkg/cmd/internal"
versioncmd "example.com/monok8s/pkg/cmd/version"
"github.com/spf13/cobra"
"k8s.io/cli-runtime/pkg/genericclioptions"
"k8s.io/klog/v2"
)
func init() {
klog.InitFlags(nil)
_ = flag.Set("logtostderr", "true")
if os.Getenv("DEBUG") != "" {
_ = flag.Set("v", "4")
} else {
_ = flag.Set("v", "0")
}
}
func NewRootCmd() *cobra.Command {
flags := genericclioptions.NewConfigFlags(true)
cmd := &cobra.Command{
Use: "ctl",
Short: "MonoK8s control tool",
SilenceUsage: true,
SilenceErrors: true,
PersistentPreRun: func(*cobra.Command, []string) {
_ = flag.Set("logtostderr", "true")
},
}
// Expose klog stdlib flags through Cobra/pflag.
cmd.PersistentFlags().AddGoFlagSet(flag.CommandLine)
flags.AddFlags(cmd.PersistentFlags())
cmd.AddCommand(
versioncmd.NewCmdVersion(),
initcmd.NewCmdInit(flags),
checkconfigcmd.NewCmdCheckConfig(),
createcmd.NewCmdCreate(flags),
agentcmd.NewCmdAgent(flags),
controllercmd.NewCmdController(flags),
internalcmd.NewCmdInternal(),
)
return cmd
}

View File

@@ -0,0 +1,76 @@
package version
import (
"encoding/json"
"fmt"
"github.com/spf13/cobra"
buildinfo "example.com/monok8s/pkg/buildinfo"
)
type versionInfo struct {
Version string `json:"version"`
GitRevision string `json:"gitRevision"`
Timestamp string `json:"timestamp"`
KubeVersion string `json:"kubernetesVersion"`
}
func NewCmdVersion() *cobra.Command {
var (
shortOutput bool
jsonOutput bool
kubernetesOutput bool
)
cmd := &cobra.Command{
Use: "version",
Short: "Print version information",
Args: cobra.NoArgs,
RunE: func(cmd *cobra.Command, _ []string) error {
info := versionInfo{
Version: buildinfo.Version,
GitRevision: buildinfo.GitRevision,
Timestamp: buildinfo.Timestamp,
KubeVersion: buildinfo.KubeVersion,
}
out := cmd.OutOrStdout()
switch {
case jsonOutput:
enc := json.NewEncoder(out)
enc.SetIndent("", " ")
return enc.Encode(info)
case kubernetesOutput:
_, err := fmt.Fprintln(out, info.KubeVersion)
return err
case shortOutput:
_, err := fmt.Fprintln(out, info.Version)
return err
default:
_, err := fmt.Fprintf(
out,
"Version: %s\nGit commit: %s\nBuilt at: %s\nKubernetes: %s\n",
info.Version,
info.GitRevision,
info.Timestamp,
info.KubeVersion,
)
return err
}
},
}
flags := cmd.Flags()
flags.BoolVar(&shortOutput, "short", false, "Show only the application version")
flags.BoolVar(&jsonOutput, "json", false, "Show version information as JSON")
flags.BoolVarP(&kubernetesOutput, "kubernetes", "k", false, "Show only the Kubernetes version this binary was built for")
cmd.MarkFlagsMutuallyExclusive("short", "json", "kubernetes")
return cmd
}

View File

@@ -0,0 +1,127 @@
package config
import (
"errors"
"fmt"
"os"
"strings"
monov1alpha1 "example.com/monok8s/pkg/apis/monok8s/v1alpha1"
"gopkg.in/yaml.v3"
)
const EnvVar = "MONOKSCONFIG"
type Loader struct{}
func (Loader) ResolvePath(flagValue string) (string, error) {
if strings.TrimSpace(flagValue) != "" {
return flagValue, nil
}
if env := strings.TrimSpace(os.Getenv(EnvVar)); env != "" {
return env, nil
}
return "", fmt.Errorf("config path not provided; pass -c or set %s", EnvVar)
}
func (Loader) Load(path string) (*monov1alpha1.MonoKSConfig, error) {
data, err := os.ReadFile(path)
if err != nil {
return nil, err
}
var cfg monov1alpha1.MonoKSConfig
if err := yaml.Unmarshal(data, &cfg); err != nil {
return nil, err
}
if cfg.Kind == "" {
cfg.Kind = "MonoKSConfig"
}
if cfg.APIVersion == "" {
cfg.APIVersion = monov1alpha1.Group + "/" + monov1alpha1.Version
}
ApplyDefaults(&cfg)
if err := Validate(&cfg); err != nil {
return nil, err
}
return &cfg, nil
}
func ApplyDefaults(cfg *monov1alpha1.MonoKSConfig) {
if cfg.Spec.PodSubnet == "" {
cfg.Spec.PodSubnet = "10.244.0.0/16"
}
if cfg.Spec.ServiceSubnet == "" {
cfg.Spec.ServiceSubnet = "10.96.0.0/12"
}
if cfg.Spec.ClusterName == "" {
cfg.Spec.ClusterName = "monok8s"
}
if cfg.Spec.ClusterDomain == "" {
cfg.Spec.ClusterDomain = "cluster.local"
}
if cfg.Spec.ContainerRuntimeEndpoint == "" {
cfg.Spec.ContainerRuntimeEndpoint = "unix:///var/run/crio/crio.sock"
}
if cfg.Spec.ClusterRole == "" {
cfg.Spec.ClusterRole = "control-plane"
}
if cfg.Spec.CNIPlugin == "" {
cfg.Spec.CNIPlugin = "none"
}
if len(cfg.Spec.KubeProxyNodePortAddresses) == 0 {
cfg.Spec.KubeProxyNodePortAddresses = []string{"primary"}
}
}
func Validate(cfg *monov1alpha1.MonoKSConfig) error {
var problems []string
if cfg.Kind != "MonoKSConfig" {
problems = append(problems, "kind must be MonoKSConfig")
}
if cfg.APIVersion != monov1alpha1.Group+"/"+monov1alpha1.Version {
problems = append(problems, "apiVersion must be "+monov1alpha1.Group+"/"+monov1alpha1.Version)
}
if strings.TrimSpace(cfg.Spec.KubernetesVersion) == "" {
problems = append(problems, "spec.kubernetesVersion is required")
}
if strings.TrimSpace(cfg.Spec.NodeName) == "" {
problems = append(problems, "spec.nodeName is required")
}
if strings.TrimSpace(cfg.Spec.APIServerAdvertiseAddress) == "" {
problems = append(problems, "spec.apiServerAdvertiseAddress is required")
}
if strings.TrimSpace(cfg.Spec.Network.Hostname) == "" {
problems = append(problems, "spec.network.hostname is required")
}
if strings.TrimSpace(cfg.Spec.Network.ManagementIface) == "" {
problems = append(problems, "spec.network.managementIface is required")
}
if !strings.Contains(cfg.Spec.Network.ManagementCIDR, "/") {
problems = append(problems, "spec.network.managementCIDR must include a CIDR prefix")
}
if cfg.Spec.ClusterRole != "control-plane" && cfg.Spec.ClusterRole != "worker" {
problems = append(problems, "spec.clusterRole can either be control-plane or worker")
}
for _, ns := range cfg.Spec.Network.DNSNameservers {
if ns == "10.96.0.10" {
problems = append(problems, "spec.network.dnsNameservers must not include cluster DNS service IP 10.96.0.10")
}
}
if cfg.Spec.ClusterRole == "worker" {
if cfg.Spec.APIServerEndpoint == "" {
problems = append(problems, "spec.apiServerEndpoint is required to join a cluster")
}
if cfg.Spec.BootstrapToken == "" {
problems = append(problems, "spec.bootstrapToken is required to join a cluster")
}
if cfg.Spec.DiscoveryTokenCACertHash == "" {
problems = append(problems, "spec.discoveryTokenCACertHash is required to join a cluster")
}
} else if !cfg.Spec.InitControlPlane && cfg.Spec.ControlPlaneCertKey == "" {
problems = append(problems, "spec.controlPlaneCertKey is required for control-plane join")
}
if len(problems) > 0 {
return errors.New(strings.Join(problems, "; "))
}
return nil
}

View File

@@ -0,0 +1,42 @@
package osimage
import (
"context"
"fmt"
)
func ApplyImageStreamed(ctx context.Context, opts ApplyOptions) (*ApplyResult, error) {
if err := ValidateApplyOptions(opts); err != nil {
return nil, err
}
if err := CheckTargetSafe(opts.TargetPath, opts.ExpectedRawSize); err != nil {
return nil, fmt.Errorf("unsafe target %q: %w", opts.TargetPath, err)
}
src, closeFn, err := OpenDecompressedHTTPStream(ctx, opts.URL, opts.HTTPTimeout)
if err != nil {
return nil, fmt.Errorf("open source stream: %w", err)
}
defer closeFn()
written, err := WriteStreamToTarget(ctx, src, opts.TargetPath, opts.ExpectedRawSize, opts.BufferSize, opts.Progress)
if err != nil {
return nil, fmt.Errorf("write target: %w", err)
}
sum, err := VerifyTargetSHA256(ctx, opts.TargetPath, opts.ExpectedRawSize, opts.BufferSize, opts.Progress)
if err != nil {
return nil, fmt.Errorf("verify target: %w", err)
}
if err := VerifySHA256(sum, opts.ExpectedRawSHA256); err != nil {
return nil, fmt.Errorf("final disk checksum mismatch: %w", err)
}
return &ApplyResult{
BytesWritten: written,
VerifiedSHA256: sum,
VerificationOK: true,
}, nil
}

View File

@@ -0,0 +1,61 @@
package osimage
import (
"fmt"
"os"
"strings"
"sync"
monov1alpha1 "example.com/monok8s/pkg/apis/monok8s/v1alpha1"
)
var (
bootStateOnce sync.Once
bootState map[string]string
bootStateErr error
)
func PercentOf(done, total int64) int64 {
if total <= 0 {
return 0
}
p := (done * 100) / total
if p < 0 {
return 0
}
if p > 100 {
return 100
}
return p
}
func ReadBootState() (map[string]string, error) {
bootStateOnce.Do(func() {
data, err := os.ReadFile(monov1alpha1.BootStateFile)
if err != nil {
bootStateErr = err
return
}
out := make(map[string]string)
for _, line := range strings.Split(string(data), "\n") {
line = strings.TrimSpace(line)
if line == "" || strings.HasPrefix(line, "#") {
continue
}
k, v, ok := strings.Cut(line, "=")
if !ok {
bootStateErr = fmt.Errorf("invalid line: %q", line)
return
}
out[strings.TrimSpace(k)] = strings.TrimSpace(v)
}
bootState = out
})
return bootState, bootStateErr
}

View File

@@ -0,0 +1,136 @@
package osimage
import (
"k8s.io/klog/v2"
"sync"
"time"
)
type progressState struct {
lastTime time.Time
lastPercent int64
lastBucket int64
}
type ProgressLogger struct {
minInterval time.Duration
bucketSize int64
states map[string]*progressState
}
func NewProgressLogger(minSeconds int, bucketSize int64) *ProgressLogger {
if minSeconds < 0 {
minSeconds = 0
}
if bucketSize <= 0 {
bucketSize = 10
}
return &ProgressLogger{
minInterval: time.Duration(minSeconds) * time.Second,
bucketSize: bucketSize,
states: make(map[string]*progressState),
}
}
func (l *ProgressLogger) state(stage string) *progressState {
s, ok := l.states[stage]
if ok {
return s
}
s = &progressState{
lastPercent: -1,
lastBucket: -1,
}
l.states[stage] = s
return s
}
func (l *ProgressLogger) Log(p Progress) {
if p.BytesTotal <= 0 {
return
}
percent := PercentOf(p.BytesComplete, p.BytesTotal)
now := time.Now()
bucket := percent / l.bucketSize
s := l.state(p.Stage)
// Always log first visible progress
if s.lastPercent == -1 {
s.lastPercent = percent
s.lastBucket = bucket
s.lastTime = now
klog.V(4).InfoS(p.Stage, "progress", percent)
return
}
// Always log completion once
if percent == 100 && s.lastPercent < 100 {
s.lastPercent = 100
s.lastBucket = 100 / l.bucketSize
s.lastTime = now
klog.V(4).InfoS(p.Stage, "progress", 100)
return
}
// Log if we crossed a new milestone bucket
if bucket > s.lastBucket {
s.lastPercent = percent
s.lastBucket = bucket
s.lastTime = now
klog.V(4).InfoS(p.Stage, "progress", percent)
return
}
// Otherwise allow a timed refresh if progress moved
if now.Sub(s.lastTime) >= l.minInterval && percent > s.lastPercent {
s.lastPercent = percent
s.lastTime = now
klog.V(4).InfoS(p.Stage, "progress", percent)
}
}
type TimeBasedUpdater struct {
mu sync.Mutex
interval time.Duration
lastRun time.Time
inFlight bool
}
func NewTimeBasedUpdater(seconds int) *TimeBasedUpdater {
if seconds <= 0 {
seconds = 15
}
return &TimeBasedUpdater{
interval: time.Duration(seconds) * time.Second,
}
}
func (u *TimeBasedUpdater) Run(fn func() error) error {
u.mu.Lock()
now := time.Now()
if u.inFlight {
u.mu.Unlock()
return nil
}
if !u.lastRun.IsZero() && now.Sub(u.lastRun) < u.interval {
u.mu.Unlock()
return nil
}
u.lastRun = now
u.inFlight = true
u.mu.Unlock()
defer func() {
u.mu.Lock()
u.inFlight = false
u.mu.Unlock()
}()
return fn()
}

View File

@@ -0,0 +1,50 @@
package osimage
import (
"context"
"fmt"
"io"
"net/http"
"time"
"github.com/klauspost/compress/zstd"
)
func OpenDecompressedHTTPStream(ctx context.Context, url string, timeout time.Duration) (io.Reader, func() error, error) {
if url == "" {
return nil, nil, fmt.Errorf("url is required")
}
if timeout <= 0 {
timeout = 30 * time.Minute
}
req, err := http.NewRequestWithContext(ctx, http.MethodGet, url, nil)
if err != nil {
return nil, nil, fmt.Errorf("build request: %w", err)
}
client := &http.Client{Timeout: timeout}
resp, err := client.Do(req)
if err != nil {
return nil, nil, fmt.Errorf("http get %q: %w", url, err)
}
if resp.StatusCode != http.StatusOK {
resp.Body.Close()
return nil, nil, fmt.Errorf("unexpected status: %s", resp.Status)
}
dec, err := zstd.NewReader(resp.Body)
if err != nil {
resp.Body.Close()
return nil, nil, fmt.Errorf("create zstd decoder: %w", err)
}
closeFn := func() error {
dec.Close()
return resp.Body.Close()
}
return dec, closeFn, nil
}

View File

@@ -0,0 +1,29 @@
package osimage
import "time"
type ApplyOptions struct {
URL string
TargetPath string
ExpectedRawSHA256 string
ExpectedRawSize int64
HTTPTimeout time.Duration
BufferSize int
Progress ProgressFunc
}
type Progress struct {
Stage string
BytesComplete int64
BytesTotal int64
}
type ProgressFunc func(Progress)
type ApplyResult struct {
BytesWritten int64
VerifiedSHA256 string
VerificationOK bool
}

View File

@@ -0,0 +1,104 @@
package osimage
import (
"context"
"crypto/sha256"
"encoding/hex"
"fmt"
"io"
"os"
"strings"
)
func VerifyTargetSHA256(ctx context.Context, targetPath string, expectedSize int64,
bufferSize int, progress ProgressFunc) (string, error) {
if targetPath == "" {
return "", fmt.Errorf("target path is required")
}
if expectedSize <= 0 {
return "", fmt.Errorf("expected raw size is required for verification")
}
if bufferSize <= 0 {
bufferSize = 4 * 1024 * 1024
}
f, err := os.Open(targetPath)
if err != nil {
return "", fmt.Errorf("open target for verify: %w", err)
}
defer f.Close()
h := sha256.New()
buf := make([]byte, bufferSize)
var readTotal int64
limited := io.LimitReader(f, expectedSize)
for {
select {
case <-ctx.Done():
return "", ctx.Err()
default:
}
n, err := limited.Read(buf)
if n > 0 {
if _, werr := h.Write(buf[:n]); werr != nil {
return "", fmt.Errorf("hash target: %w", werr)
}
readTotal += int64(n)
if progress != nil {
progress(Progress{
Stage: "verify",
BytesComplete: readTotal,
BytesTotal: expectedSize,
})
}
}
if err == io.EOF {
break
}
if err != nil {
return "", fmt.Errorf("read target: %w", err)
}
}
if readTotal != expectedSize {
return "", fmt.Errorf("verify size mismatch: got %d want %d", readTotal, expectedSize)
}
return hex.EncodeToString(h.Sum(nil)), nil
}
func ValidateApplyOptions(opts ApplyOptions) error {
if opts.URL == "" {
return fmt.Errorf("url is required")
}
if opts.TargetPath == "" {
return fmt.Errorf("target path is required")
}
if opts.ExpectedRawSHA256 == "" {
return fmt.Errorf("expected raw sha256 is required")
}
if opts.ExpectedRawSize <= 0 {
return fmt.Errorf("expected raw size must be > 0")
}
return nil
}
func VerifySHA256(got, expected string) error {
expected = NormalizeSHA256(expected)
if expected == "" {
return nil
}
got = NormalizeSHA256(got)
if got != expected {
return fmt.Errorf("sha256 mismatch: got %s want %s", got, expected)
}
return nil
}
func NormalizeSHA256(s string) string {
return strings.ToLower(strings.TrimSpace(s))
}

View File

@@ -0,0 +1,35 @@
//go:build !dev
package osimage
import (
"fmt"
"os"
"strings"
)
func CheckTargetSafe(targetPath string, expectedRawSize int64) error {
if !strings.HasPrefix(targetPath, "/dev/") {
return fmt.Errorf("target must be a device path under /dev")
}
st, err := os.Stat(targetPath)
if err != nil {
return fmt.Errorf("stat target: %w", err)
}
mode := st.Mode()
if mode&os.ModeDevice == 0 {
return fmt.Errorf("target is not a device")
}
// TODO: Add stronger checks
// - EnsureNotMounted(targetPath)
// - EnsureNotCurrentRoot(targetPath)
// - EnsurePartitionNotWholeDisk(targetPath)
// - EnsureCapacity(targetPath, expectedRawSize)
_ = expectedRawSize
return nil
}

View File

@@ -0,0 +1,7 @@
//go:build dev
package osimage
func CheckTargetSafe(targetPath string, expectedRawSize int64) error {
return nil
}

View File

@@ -0,0 +1,152 @@
package osimage
import (
"context"
"fmt"
"io"
"os"
"time"
)
const (
defaultWriteBufferSize = 1 * 1024 * 1024
defaultMinWriteBPS = int64(2 * 1024 * 1024)
defaultInitialWriteBPS = int64(4 * 1024 * 1024)
defaultMaxWriteBPS = int64(8 * 1024 * 1024)
defaultBurstBytes = int64(512 * 1024)
defaultSampleInterval = 250 * time.Millisecond
defaultSyncEveryBytes = 0
defaultBusyHighPct = 80.0
defaultBusyLowPct = 40.0
defaultSlowAwait = 20 * time.Millisecond
defaultFastAwait = 5 * time.Millisecond
)
func WriteStreamToTarget(ctx context.Context,
src io.Reader, targetPath string,
expectedSize int64, bufferSize int,
progress ProgressFunc,
) (int64, error) {
if targetPath == "" {
return 0, fmt.Errorf("target path is required")
}
if bufferSize <= 0 {
bufferSize = defaultWriteBufferSize
}
f, err := os.OpenFile(targetPath, os.O_WRONLY, 0)
if err != nil {
return 0, fmt.Errorf("open target: %w", err)
}
defer f.Close()
ctrl, err := newAdaptiveWriteController(targetPath)
if err != nil {
ctrl = newNoopAdaptiveWriteController()
}
written, err := copyWithProgressBuffer(
ctx,
f,
src,
expectedSize,
"flash",
progress,
make([]byte, bufferSize),
ctrl,
defaultSyncEveryBytes,
)
if err != nil {
return written, err
}
if expectedSize > 0 && written != expectedSize {
return written, fmt.Errorf("written size mismatch: got %d want %d", written, expectedSize)
}
if err := f.Sync(); err != nil {
return written, fmt.Errorf("sync target: %w", err)
}
return written, nil
}
func copyWithProgressBuffer(
ctx context.Context,
dst *os.File,
src io.Reader,
total int64,
stage string,
progress ProgressFunc,
buf []byte,
ctrl *adaptiveWriteController,
syncEvery int64,
) (int64, error) {
var written int64
var sinceSync int64
for {
select {
case <-ctx.Done():
return written, ctx.Err()
default:
}
nr, er := src.Read(buf)
if nr > 0 {
if ctrl != nil {
if err := ctrl.Wait(ctx, nr); err != nil {
return written, err
}
}
nw, ew := dst.Write(buf[:nr])
if nw > 0 {
written += int64(nw)
sinceSync += int64(nw)
if ctrl != nil {
ctrl.ObserveWrite(nw)
}
if progress != nil {
progress(Progress{
Stage: stage,
BytesComplete: written,
BytesTotal: total,
})
}
if syncEvery > 0 && sinceSync >= syncEvery {
if err := dst.Sync(); err != nil {
return written, fmt.Errorf("periodic sync target: %w", err)
}
sinceSync = 0
if ctrl != nil {
ctrl.ObserveSync()
}
}
}
if ew != nil {
return written, ew
}
if nw != nr {
return written, io.ErrShortWrite
}
}
if er != nil {
if er == io.EOF {
return written, nil
}
return written, fmt.Errorf("copy %s: %w", stage, er)
}
}
}

View File

@@ -0,0 +1,106 @@
package osimage
import (
"context"
"fmt"
"io"
"time"
"k8s.io/klog/v2"
)
type repeatPatternReader struct {
pattern []byte
remain int64
off int
}
func newRepeatPatternReader(total int64, pattern []byte) *repeatPatternReader {
if len(pattern) == 0 {
pattern = []byte("monok8s-test-pattern-0123456789abcdef")
}
return &repeatPatternReader{
pattern: pattern,
remain: total,
}
}
func (r *repeatPatternReader) Read(p []byte) (int, error) {
if r.remain <= 0 {
return 0, io.EOF
}
if int64(len(p)) > r.remain {
p = p[:r.remain]
}
n := 0
for n < len(p) {
copied := copy(p[n:], r.pattern[r.off:])
n += copied
r.off += copied
if r.off == len(r.pattern) {
r.off = 0
}
}
r.remain -= int64(n)
return n, nil
}
func TestStreamToTarget(ctx context.Context, targetPath string) error {
const (
totalSize = int64(512 * 1024 * 1024) // 512 MiB
bufferSize = 128 * 1024 // test the conservative setting
)
src := newRepeatPatternReader(totalSize, nil)
start := time.Now()
lastLog := start
progress := func(p Progress) {
now := time.Now()
if now.Sub(lastLog) < 1*time.Second && p.BytesComplete != p.BytesTotal {
return
}
lastLog = now
var mbps float64
elapsed := now.Sub(start).Seconds()
if elapsed > 0 {
mbps = float64(p.BytesComplete) / 1024.0 / 1024.0 / elapsed
}
klog.InfoS("test write progress",
"stage", p.Stage,
"bytesComplete", p.BytesComplete,
"bytesTotal", p.BytesTotal,
"mbpsAvg", fmt.Sprintf("%.2f", mbps),
)
}
written, err := WriteStreamToTarget(
ctx,
src,
targetPath,
totalSize,
bufferSize,
progress,
)
if err != nil {
return fmt.Errorf("write stream to target: %w", err)
}
elapsed := time.Since(start)
mbps := float64(written) / 1024.0 / 1024.0 / elapsed.Seconds()
klog.InfoS("test write complete",
"targetPath", targetPath,
"written", written,
"elapsed", elapsed.String(),
"mbpsAvg", fmt.Sprintf("%.2f", mbps),
)
return nil
}

View File

@@ -0,0 +1,400 @@
//go:build linux
package osimage
import (
"bufio"
"context"
"fmt"
"os"
"path/filepath"
"strconv"
"strings"
"sync"
"time"
"golang.org/x/sys/unix"
)
type adaptiveWriteController struct {
mu sync.Mutex
limiter *rateLimiter
monitor *diskBusyMonitor
sampleInterval time.Duration
nextSampleAt time.Time
minBPS int64
maxBPS int64
busyHighPct float64
busyLowPct float64
}
func newAdaptiveWriteController(targetPath string) (*adaptiveWriteController, error) {
mon, err := newDiskBusyMonitor(targetPath)
if err != nil {
return nil, err
}
now := time.Now()
return &adaptiveWriteController{
limiter: newRateLimiter(defaultInitialWriteBPS, defaultBurstBytes),
monitor: mon,
sampleInterval: defaultSampleInterval,
nextSampleAt: now.Add(defaultSampleInterval),
minBPS: defaultMinWriteBPS,
maxBPS: defaultMaxWriteBPS,
busyHighPct: defaultBusyHighPct,
busyLowPct: defaultBusyLowPct,
}, nil
}
func newNoopAdaptiveWriteController() *adaptiveWriteController {
return &adaptiveWriteController{
limiter: newRateLimiter(0, 0),
sampleInterval: defaultSampleInterval,
}
}
func (c *adaptiveWriteController) Wait(ctx context.Context, n int) error {
if c == nil || c.limiter == nil {
return nil
}
return c.limiter.Wait(ctx, n)
}
func (c *adaptiveWriteController) ObserveWrite(n int) {
c.observe(false)
}
func (c *adaptiveWriteController) ObserveSync() {
c.observe(true)
}
func (c *adaptiveWriteController) observe(afterSync bool) {
if c == nil {
return
}
c.mu.Lock()
defer c.mu.Unlock()
now := time.Now()
if c.monitor == nil || now.Before(c.nextSampleAt) {
return
}
c.nextSampleAt = now.Add(c.sampleInterval)
s, err := c.monitor.Sample(now)
if err != nil {
return
}
cur := c.limiter.Rate()
if cur <= 0 {
cur = c.minBPS
}
switch {
case s.UtilPct >= c.busyHighPct || s.Await >= defaultSlowAwait || afterSync:
// Back off aggressively when the disk is obviously suffering.
next := cur / 2
if next < c.minBPS {
next = c.minBPS
}
c.limiter.SetRate(next)
case s.UtilPct <= c.busyLowPct && s.Await <= defaultFastAwait:
// Recover slowly.
next := cur + (cur / 5) // +20%
if next > c.maxBPS {
next = c.maxBPS
}
c.limiter.SetRate(next)
}
}
type rateLimiter struct {
mu sync.Mutex
rateBPS int64
burst int64
tokens float64
last time.Time
}
func newRateLimiter(rateBPS, burst int64) *rateLimiter {
now := time.Now()
if burst < 0 {
burst = 0
}
return &rateLimiter{
rateBPS: rateBPS,
burst: burst,
tokens: float64(burst),
last: now,
}
}
func (r *rateLimiter) Rate() int64 {
r.mu.Lock()
defer r.mu.Unlock()
return r.rateBPS
}
func (r *rateLimiter) SetRate(rateBPS int64) {
r.mu.Lock()
defer r.mu.Unlock()
r.refillLocked(time.Now())
r.rateBPS = rateBPS
if rateBPS <= 0 {
r.tokens = 0
r.burst = 0
return
}
// Keep burst small and fixed. Do not let burst scale with rate.
r.burst = defaultBurstBytes
if r.tokens > float64(r.burst) {
r.tokens = float64(r.burst)
}
}
func (r *rateLimiter) Wait(ctx context.Context, n int) error {
if n <= 0 {
return nil
}
remaining := n
for remaining > 0 {
r.mu.Lock()
if r.rateBPS <= 0 {
r.mu.Unlock()
return nil
}
now := time.Now()
r.refillLocked(now)
allowed := remaining
if int64(allowed) > r.burst && r.burst > 0 {
allowed = int(r.burst)
}
if allowed <= 0 {
allowed = remaining
}
if r.tokens >= float64(allowed) {
r.tokens -= float64(allowed)
r.mu.Unlock()
remaining -= allowed
continue
}
missing := float64(allowed) - r.tokens
waitDur := time.Duration(missing / float64(r.rateBPS) * float64(time.Second))
if waitDur < 5*time.Millisecond {
waitDur = 5 * time.Millisecond
}
r.mu.Unlock()
timer := time.NewTimer(waitDur)
select {
case <-ctx.Done():
timer.Stop()
return ctx.Err()
case <-timer.C:
}
}
return nil
}
func (r *rateLimiter) refillLocked(now time.Time) {
if r.rateBPS <= 0 {
r.last = now
return
}
elapsed := now.Sub(r.last)
if elapsed <= 0 {
return
}
r.tokens += elapsed.Seconds() * float64(r.rateBPS)
if r.tokens > float64(r.burst) {
r.tokens = float64(r.burst)
}
r.last = now
}
type diskBusySample struct {
UtilPct float64
Await time.Duration
}
type diskBusyMonitor struct {
major int
minor int
lastAt time.Time
lastIOMs uint64
lastWrites uint64
}
func newDiskBusyMonitor(targetPath string) (*diskBusyMonitor, error) {
major, minor, err := resolveWholeDiskMajorMinor(targetPath)
if err != nil {
return nil, err
}
ioMs, writes, err := readDiskStats(major, minor)
if err != nil {
return nil, err
}
return &diskBusyMonitor{
major: major,
minor: minor,
lastAt: time.Now(),
lastIOMs: ioMs,
lastWrites: writes,
}, nil
}
func (m *diskBusyMonitor) Sample(now time.Time) (diskBusySample, error) {
ioMs, writes, err := readDiskStats(m.major, m.minor)
if err != nil {
return diskBusySample{}, err
}
elapsedMs := now.Sub(m.lastAt).Milliseconds()
if elapsedMs <= 0 {
return diskBusySample{}, nil
}
deltaIOMs := int64(ioMs - m.lastIOMs)
deltaWrites := int64(writes - m.lastWrites)
m.lastAt = now
m.lastIOMs = ioMs
m.lastWrites = writes
util := float64(deltaIOMs) * 100 / float64(elapsedMs)
if util < 0 {
util = 0
}
if util > 100 {
util = 100
}
var await time.Duration
if deltaWrites > 0 {
await = time.Duration(deltaIOMs/int64(deltaWrites)) * time.Millisecond
}
return diskBusySample{
UtilPct: util,
Await: await,
}, nil
}
func resolveWholeDiskMajorMinor(targetPath string) (int, int, error) {
var st unix.Stat_t
if err := unix.Stat(targetPath, &st); err != nil {
return 0, 0, fmt.Errorf("stat target %q: %w", targetPath, err)
}
if st.Mode&unix.S_IFMT != unix.S_IFBLK {
return 0, 0, fmt.Errorf("target %q is not a block device", targetPath)
}
major := int(unix.Major(uint64(st.Rdev)))
minor := int(unix.Minor(uint64(st.Rdev)))
sysfsPath := fmt.Sprintf("/sys/dev/block/%d:%d", major, minor)
resolved, err := filepath.EvalSymlinks(sysfsPath)
if err != nil {
return major, minor, nil
}
// Partition path usually looks like .../block/sda/sda3
// Parent whole disk is .../block/sda
parent := filepath.Dir(resolved)
devName := filepath.Base(parent)
ueventPath := filepath.Join(parent, "dev")
data, err := os.ReadFile(ueventPath)
if err != nil {
return major, minor, nil
}
parts := strings.Split(strings.TrimSpace(string(data)), ":")
if len(parts) != 2 {
return major, minor, nil
}
parentMajor, err1 := strconv.Atoi(parts[0])
parentMinor, err2 := strconv.Atoi(parts[1])
if err1 != nil || err2 != nil || devName == "" {
return major, minor, nil
}
return parentMajor, parentMinor, nil
}
func readDiskStats(major, minor int) (ioMs uint64, writesCompleted uint64, err error) {
f, err := os.Open("/proc/diskstats")
if err != nil {
return 0, 0, fmt.Errorf("open /proc/diskstats: %w", err)
}
defer f.Close()
sc := bufio.NewScanner(f)
for sc.Scan() {
line := strings.Fields(sc.Text())
if len(line) < 14 {
continue
}
maj, err := strconv.Atoi(line[0])
if err != nil {
continue
}
min, err := strconv.Atoi(line[1])
if err != nil {
continue
}
if maj != major || min != minor {
continue
}
// writes completed successfully: field 5, index 4
writesCompleted, err = strconv.ParseUint(line[4], 10, 64)
if err != nil {
return 0, 0, fmt.Errorf("parse writes completed for %d:%d: %w", major, minor, err)
}
// time spent doing I/Os (ms): field 13, index 12
ioMs, err = strconv.ParseUint(line[12], 10, 64)
if err != nil {
return 0, 0, fmt.Errorf("parse io_ms for %d:%d: %w", major, minor, err)
}
return ioMs, writesCompleted, nil
}
if err := sc.Err(); err != nil {
return 0, 0, fmt.Errorf("scan /proc/diskstats: %w", err)
}
return 0, 0, fmt.Errorf("device %d:%d not found in /proc/diskstats", major, minor)
}

View File

@@ -0,0 +1,19 @@
//go:build !linux
package osimage
import "context"
type adaptiveWriteController struct{}
func newAdaptiveWriteController(string) (*adaptiveWriteController, error) {
return &adaptiveWriteController{}, nil
}
func newNoopAdaptiveWriteController() *adaptiveWriteController {
return &adaptiveWriteController{}
}
func (c *adaptiveWriteController) Wait(ctx context.Context, n int) error { return nil }
func (c *adaptiveWriteController) ObserveWrite(n int) {}
func (c *adaptiveWriteController) ObserveSync() {}

View File

@@ -0,0 +1,275 @@
package osupgrade
import (
"context"
"fmt"
"os"
"sync/atomic"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/klog/v2"
monov1alpha1 "example.com/monok8s/pkg/apis/monok8s/v1alpha1"
"example.com/monok8s/pkg/buildinfo"
"example.com/monok8s/pkg/catalog"
"example.com/monok8s/pkg/controller/osimage"
"example.com/monok8s/pkg/kube"
"example.com/monok8s/pkg/node/uboot"
)
type UpgradeRunner struct {
running atomic.Bool
rebooting atomic.Bool
}
var r UpgradeRunner
func (r *UpgradeRunner) Run(fn func() error) error {
if r.rebooting.Load() {
return nil
}
if !r.running.CompareAndSwap(false, true) {
return nil
}
defer r.running.Store(false)
if r.rebooting.Load() {
return nil
}
return fn()
}
func HandleOSUpgradeProgress(
ctx context.Context,
clients *kube.Clients,
namespace string,
nodeName string,
osup *monov1alpha1.OSUpgradeProgress,
) error {
return r.Run(func() error {
return handleOSUpgradeProgressLocked(ctx, clients, namespace, nodeName, osup)
})
}
func handleOSUpgradeProgressLocked(
ctx context.Context,
clients *kube.Clients,
namespace string,
nodeName string,
osup *monov1alpha1.OSUpgradeProgress,
) error {
if osup == nil {
return fmt.Errorf("osupgradeprogress is nil")
}
if osup.Spec.NodeName != nodeName {
klog.V(4).InfoS("skipping osupgradeprogress due to nodeName mismatch",
"name", osup.Name,
"node", nodeName,
"target", osup.Spec.NodeName,
)
return nil
}
if !shouldProcessProgress(osup) {
klog.V(2).InfoS("skipping osupgradeprogress due to phase",
"name", osup.Name,
"node", nodeName,
"phase", osup.StatusPhase(),
)
return nil
}
parentName := osup.Spec.SourceRef.Name
if parentName == "" {
return failProgress(ctx, clients, osup, "resolve parent osupgrade", fmt.Errorf("missing spec.osUpgradeName"))
}
osu, err := clients.MonoKS.Monok8sV1alpha1().
OSUpgrades(namespace).
Get(ctx, parentName, metav1.GetOptions{})
if err != nil {
return failProgress(ctx, clients, osup, "resolve parent osupgrade", err)
}
klog.InfoS("handling osupgradeprogress",
"name", osup.Name,
"osupgrade", osu.Name,
"node", nodeName,
"desiredVersion", osu.Spec.DesiredVersion,
)
kata, err := catalog.ResolveCatalog(ctx, clients.Kubernetes, namespace, osu.Spec.Catalog)
if err != nil {
return failProgress(ctx, clients, osup, "resolve catalog", err)
}
plan, err := PlanUpgrade(buildinfo.KubeVersion, osu, kata)
if err != nil {
return failProgress(ctx, clients, osup, "plan upgrade", err)
}
if len(plan.Path) == 0 {
osup.Status.CurrentVersion = buildinfo.KubeVersion
osup.Status.TargetVersion = buildinfo.KubeVersion
if err := markProgressCompleted(ctx, clients, osup, "already at target version"); err != nil {
return err
}
klog.InfoS("osupgrade already satisfied",
"name", osu.Name,
"node", nodeName,
"target", plan.ResolvedTarget,
)
return nil
}
first := plan.Path[0]
updated, err := updateProgressRobust(ctx, clients, osup.Namespace, osup.Name, func(cur *monov1alpha1.OSUpgradeProgress) {
now := metav1.Now()
cur.Status.CurrentVersion = buildinfo.KubeVersion
cur.Status.TargetVersion = plan.ResolvedTarget
cur.Status.PlannedPath = plannedPath(plan)
cur.Status.Phase = monov1alpha1.OSUpgradeProgressPhaseDownloading
cur.Status.ObservedRetryNonce = cur.Spec.RetryNonce
cur.Status.Message = fmt.Sprintf("downloading image: %s", first.URL)
cur.Status.LastUpdatedAt = &now
})
if updated != nil {
osup = updated
}
if err != nil {
return fmt.Errorf("update progress status: %w", err)
}
klog.InfoS("planned osupgrade",
"name", osu.Name,
"node", nodeName,
"resolvedTarget", plan.ResolvedTarget,
"steps", len(plan.Path),
"currentVersion", buildinfo.KubeVersion,
"firstVersion", first.Version,
"firstURL", first.URL,
"size", first.Size,
)
imageSHA, err := first.SHA256()
if err != nil {
return failProgress(ctx, clients, osup, "apply image", err)
}
pLogger := osimage.NewProgressLogger(2, 25)
statusUpdater := osimage.NewTimeBasedUpdater(15)
imageOptions := osimage.ApplyOptions{
URL: first.URL,
TargetPath: monov1alpha1.AltPartDeviceLink,
ExpectedRawSHA256: imageSHA,
ExpectedRawSize: first.Size,
BufferSize: 6 * 1024 * 1024,
Progress: func(p osimage.Progress) {
pLogger.Log(p)
if err := statusUpdater.Run(func() error {
klog.Infof("%s: %d%%", p.Stage, osimage.PercentOf(p.BytesComplete, p.BytesTotal))
updated, err := updateProgressRobust(ctx, clients, osup.Namespace, osup.Name, func(cur *monov1alpha1.OSUpgradeProgress) {
now := metav1.Now()
switch p.Stage {
case "flash":
cur.Status.Phase = monov1alpha1.OSUpgradeProgressPhaseWriting
case "verify":
cur.Status.Phase = monov1alpha1.OSUpgradeProgressPhaseVerifying
}
cur.Status.TargetVersion = plan.ResolvedTarget
cur.Status.LastUpdatedAt = &now
cur.Status.Message = fmt.Sprintf(
"%s: %d%%",
p.Stage,
osimage.PercentOf(p.BytesComplete, p.BytesTotal),
)
})
if updated != nil {
osup = updated
}
if err != nil {
return fmt.Errorf("update progress status: %w", err)
}
return nil
}); err != nil {
klog.ErrorS(err, "throttled progress update failed")
}
},
}
result, err := osimage.ApplyImageStreamed(ctx, imageOptions)
if err != nil {
return failProgress(ctx, clients, osup, "apply image", err)
}
klog.Info(result)
cfgPath := os.Getenv("FW_ENV_CONFIG_FILE")
if err := uboot.ConfigureNextBoot(ctx, cfgPath); err != nil {
return failProgress(ctx, clients, osup, "set boot env", err)
}
updated, err = updateProgressRobust(ctx, clients, osup.Namespace, osup.Name, func(cur *monov1alpha1.OSUpgradeProgress) {
now := metav1.Now()
cur.Status.TargetVersion = plan.ResolvedTarget
cur.Status.Message = "image applied, verified, and next boot environment updated"
cur.Status.Phase = monov1alpha1.OSUpgradeProgressPhaseRebooting
cur.Status.LastUpdatedAt = &now
})
if updated != nil {
osup = updated
}
if err != nil {
return fmt.Errorf("update progress status: %w", err)
}
// TODO: Drain the node here
// Get all running pods outta here!
// kubectl.Run()
// Wait for the node to be drained
// kubectl.Wait()
r.rebooting.Store(true)
if err := triggerReboot(); err != nil {
r.rebooting.Store(false)
return fmt.Errorf("trigger reboot: %w", err)
}
select {}
}
func shouldProcessProgress(osup *monov1alpha1.OSUpgradeProgress) bool {
if osup == nil {
return false
}
if osup.Status == nil {
return false
}
switch osup.Status.Phase {
case "",
monov1alpha1.OSUpgradeProgressPhasePending,
monov1alpha1.OSUpgradeProgressPhaseRebooting:
return true
case monov1alpha1.OSUpgradeProgressPhaseFailed:
return osup.Spec.RetryNonce != osup.Status.ObservedRetryNonce
default:
return false
}
}
func triggerReboot() error {
_ = os.WriteFile("/proc/sysrq-trigger", []byte("s\n"), 0)
_ = os.WriteFile("/proc/sysrq-trigger", []byte("u\n"), 0)
return os.WriteFile("/proc/sysrq-trigger", []byte("b\n"), 0)
}

View File

@@ -0,0 +1,352 @@
package osupgrade
import (
"fmt"
"sort"
"strconv"
"strings"
monov1alpha1 "example.com/monok8s/pkg/apis/monok8s/v1alpha1"
"example.com/monok8s/pkg/catalog"
)
type Version struct {
Major int
Minor int
Patch int
Raw string
}
func ParseVersion(s string) (Version, error) {
raw := strings.TrimSpace(s)
if raw == "" {
return Version{}, fmt.Errorf("empty version")
}
raw = strings.TrimPrefix(raw, "v")
parts := strings.Split(raw, ".")
if len(parts) != 3 {
return Version{}, fmt.Errorf("invalid version %q: expected vMAJOR.MINOR.PATCH", s)
}
maj, err := strconv.Atoi(parts[0])
if err != nil {
return Version{}, fmt.Errorf("parse major from %q: %w", s, err)
}
min, err := strconv.Atoi(parts[1])
if err != nil {
return Version{}, fmt.Errorf("parse minor from %q: %w", s, err)
}
patch, err := strconv.Atoi(parts[2])
if err != nil {
return Version{}, fmt.Errorf("parse patch from %q: %w", s, err)
}
return Version{
Major: maj,
Minor: min,
Patch: patch,
Raw: fmt.Sprintf("v%d.%d.%d", maj, min, patch),
}, nil
}
func (v Version) String() string {
return v.Raw
}
func (v Version) Compare(o Version) int {
if v.Major != o.Major {
if v.Major < o.Major {
return -1
}
return 1
}
if v.Minor != o.Minor {
if v.Minor < o.Minor {
return -1
}
return 1
}
if v.Patch != o.Patch {
if v.Patch < o.Patch {
return -1
}
return 1
}
return 0
}
func (v Version) SameMinor(o Version) bool {
return v.Major == o.Major && v.Minor == o.Minor
}
type Plan struct {
ResolvedTarget string
Path []catalog.CatalogImage
}
func PlanUpgrade(
current string,
osu *monov1alpha1.OSUpgrade,
cat *catalog.VersionCatalog,
) (*Plan, error) {
target, err := resolveTarget(osu.Spec.DesiredVersion, cat)
if err != nil {
return nil, err
}
if isBlocked(target, cat.Blocked) {
return nil, fmt.Errorf("target %s is blocked", target)
}
imagesByVersion := make(map[string]catalog.CatalogImage, len(cat.Images))
installable := make([]string, 0, len(cat.Images))
for _, img := range cat.Images {
if img.Version == "" {
continue
}
if isBlocked(img.Version, cat.Blocked) {
continue
}
if _, exists := imagesByVersion[img.Version]; exists {
return nil, fmt.Errorf("duplicate image entry for version %s", img.Version)
}
imagesByVersion[img.Version] = img
installable = append(installable, img.Version)
}
versionPath, err := calculatePath(current, target, installable)
if err != nil {
return nil, err
}
path := make([]catalog.CatalogImage, 0, len(versionPath))
for _, v := range versionPath {
img, ok := imagesByVersion[v]
if !ok {
return nil, fmt.Errorf("internal error: no image for planned version %s", v)
}
path = append(path, img)
}
return &Plan{
ResolvedTarget: target,
Path: path,
}, nil
}
func installableVersions(cat *catalog.VersionCatalog) []string {
out := make([]string, 0, len(cat.Images))
for _, img := range cat.Images {
if img.Version == "" {
continue
}
if isBlocked(img.Version, cat.Blocked) {
continue
}
out = append(out, img.Version)
}
return out
}
func resolveTarget(desired string, cat *catalog.VersionCatalog) (string, error) {
if desired == "stable" {
if cat.Stable == "" {
return "", fmt.Errorf("catalog missing stable")
}
return cat.Stable, nil
}
for _, img := range cat.Images {
if img.Version == desired {
return desired, nil
}
}
return "", fmt.Errorf("desired version %s not in catalog", desired)
}
func calculatePath(current, target string, available []string) ([]string, error) {
cur, err := ParseVersion(current)
if err != nil {
return nil, fmt.Errorf("parse current version: %w", err)
}
tgt, err := ParseVersion(target)
if err != nil {
return nil, fmt.Errorf("parse target version: %w", err)
}
if cur.Compare(tgt) == 0 {
return nil, nil
}
if cur.Compare(tgt) > 0 {
return nil, fmt.Errorf("downgrade not supported: current=%s target=%s", cur, tgt)
}
if cur.Major != tgt.Major {
return nil, fmt.Errorf("cross-major upgrade not supported: %s -> %s", cur, tgt)
}
versions, err := parseAndSortVersions(available)
if err != nil {
return nil, err
}
if !containsVersion(versions, tgt) {
return nil, fmt.Errorf("target version %s not found in available versions", tgt)
}
var path []Version
seen := map[string]struct{}{}
add := func(v Version) {
if v.Compare(cur) <= 0 {
return
}
if _, ok := seen[v.String()]; ok {
return
}
seen[v.String()] = struct{}{}
path = append(path, v)
}
// Same minor: jump directly to target patch.
if cur.SameMinor(tgt) {
add(tgt)
return versionsToStrings(path), nil
}
// Step 1: finish current minor by moving to the latest patch available there.
if latestCurMinor, ok := latestPatchInMinor(versions, cur.Major, cur.Minor, cur); ok {
add(latestCurMinor)
}
// Step 2: walk each intermediate minor using the latest available patch in that minor.
for minor := cur.Minor + 1; minor < tgt.Minor; minor++ {
bridge, ok := latestAnyPatchInMinor(versions, cur.Major, minor)
if !ok {
return nil, fmt.Errorf("no available bridge version for v%d.%d.x", cur.Major, minor)
}
add(bridge)
}
// Step 3: final target.
add(tgt)
return versionsToStrings(path), nil
}
func latestAnyPatchInMinor(versions []Version, major, minor int) (Version, bool) {
var found Version
ok := false
for _, v := range versions {
if v.Major != major || v.Minor != minor {
continue
}
if !ok || found.Compare(v) < 0 {
found = v
ok = true
}
}
return found, ok
}
func parseAndSortVersions(raw []string) ([]Version, error) {
out := make([]Version, 0, len(raw))
seen := map[string]struct{}{}
for _, s := range raw {
v, err := ParseVersion(s)
if err != nil {
return nil, fmt.Errorf("parse catalog version %q: %w", s, err)
}
if _, ok := seen[v.String()]; ok {
continue
}
seen[v.String()] = struct{}{}
out = append(out, v)
}
sort.Slice(out, func(i, j int) bool {
return out[i].Compare(out[j]) < 0
})
return out, nil
}
func containsRawVersion(versions []string, want string) bool {
for _, v := range versions {
if strings.TrimSpace(v) == strings.TrimSpace(want) {
return true
}
}
return false
}
func containsVersion(versions []Version, want Version) bool {
for _, v := range versions {
if v.Compare(want) == 0 {
return true
}
}
return false
}
func isBlocked(version string, blocked []string) bool {
for _, v := range blocked {
if strings.TrimSpace(v) == strings.TrimSpace(version) {
return true
}
}
return false
}
func latestPatchInMinor(versions []Version, major, minor int, gt Version) (Version, bool) {
var found Version
ok := false
for _, v := range versions {
if v.Major != major || v.Minor != minor {
continue
}
if v.Compare(gt) <= 0 {
continue
}
if !ok || found.Compare(v) < 0 {
found = v
ok = true
}
}
return found, ok
}
func lowestPatchInMinor(versions []Version, major, minor int) (Version, bool) {
for _, v := range versions {
if v.Major == major && v.Minor == minor {
return v, true
}
}
return Version{}, false
}
func plannedPath(plan *Plan) []string {
ppath := []string{}
for _, img := range plan.Path {
ppath = append(ppath, img.Version)
}
return ppath
}
func versionsToStrings(vs []Version) []string {
out := make([]string, 0, len(vs))
for _, v := range vs {
out = append(out, v.String())
}
return out
}

View File

@@ -0,0 +1,149 @@
package osupgrade
import (
"reflect"
"testing"
)
func TestCalculatePath(t *testing.T) {
t.Parallel()
tests := []struct {
name string
current string
target string
available []string
want []string
wantErr bool
}{
{
name: "same version returns nil path",
current: "v1.34.6",
target: "v1.34.6",
available: []string{"v1.34.6"},
want: nil,
wantErr: false,
},
{
name: "same minor jumps directly to target",
current: "v1.34.1",
target: "v1.34.6",
available: []string{"v1.34.1", "v1.34.3", "v1.34.6"},
want: []string{"v1.34.6"},
wantErr: false,
},
{
name: "next minor direct jump when no current minor patch available",
current: "v1.34.6",
target: "v1.35.3",
available: []string{"v1.34.6", "v1.35.1", "v1.35.3"},
want: []string{"v1.35.3"},
wantErr: false,
},
{
name: "finish current minor then target",
current: "v1.34.1",
target: "v1.35.3",
available: []string{"v1.34.1", "v1.34.6", "v1.35.1", "v1.35.3"},
want: []string{"v1.34.6", "v1.35.3"},
wantErr: false,
},
{
name: "multi minor path uses latest bridge patch",
current: "v1.33.10",
target: "v1.35.3",
available: []string{"v1.34.1", "v1.34.6", "v1.35.1", "v1.35.3"},
want: []string{"v1.34.6", "v1.35.3"},
wantErr: false,
},
{
name: "multi minor path finishes current minor and latest bridge patch",
current: "v1.33.1",
target: "v1.35.3",
available: []string{"v1.33.5", "v1.33.9", "v1.34.1", "v1.34.6", "v1.35.3"},
want: []string{"v1.33.9", "v1.34.6", "v1.35.3"},
wantErr: false,
},
{
name: "duplicates in available are ignored",
current: "v1.33.10",
target: "v1.35.3",
available: []string{"v1.34.6", "v1.34.6", "v1.35.3", "v1.35.3"},
want: []string{"v1.34.6", "v1.35.3"},
wantErr: false,
},
{
name: "target missing returns error",
current: "v1.34.6",
target: "v1.35.3",
available: []string{"v1.34.6", "v1.35.1"},
wantErr: true,
},
{
name: "missing bridge minor returns error",
current: "v1.33.10",
target: "v1.35.3",
available: []string{"v1.35.3"},
wantErr: true,
},
{
name: "downgrade not supported",
current: "v1.35.3",
target: "v1.34.6",
available: []string{"v1.34.6", "v1.35.3"},
wantErr: true,
},
{
name: "cross major not supported",
current: "v1.35.3",
target: "v2.0.0",
available: []string{"v1.35.3", "v2.0.0"},
wantErr: true,
},
{
name: "invalid current version returns error",
current: "garbage",
target: "v1.35.3",
available: []string{"v1.35.3"},
wantErr: true,
},
{
name: "invalid target version returns error",
current: "v1.34.6",
target: "wat",
available: []string{"v1.34.6", "v1.35.3"},
wantErr: true,
},
{
name: "invalid available version returns error",
current: "v1.34.6",
target: "v1.35.3",
available: []string{"v1.34.6", "broken", "v1.35.3"},
wantErr: true,
},
}
for _, tt := range tests {
tt := tt
t.Run(tt.name, func(t *testing.T) {
t.Parallel()
got, err := calculatePath(tt.current, tt.target, tt.available)
if tt.wantErr {
if err == nil {
t.Fatalf("expected error, got nil; path=%v", got)
}
return
}
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
if !reflect.DeepEqual(got, tt.want) {
t.Fatalf("calculatePath(%q, %q, %v)\n got: %v\n want: %v",
tt.current, tt.target, tt.available, got, tt.want)
}
})
}
}

View File

@@ -0,0 +1,254 @@
package osupgrade
import (
"context"
"fmt"
"strings"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/util/retry"
"k8s.io/klog/v2"
monov1alpha1 "example.com/monok8s/pkg/apis/monok8s/v1alpha1"
"example.com/monok8s/pkg/kube"
)
func EnsureOSUpgradeProgressForNode(
ctx context.Context,
clients *kube.Clients,
namespace string,
nodeName string,
osu *monov1alpha1.OSUpgrade,
) error {
if osu == nil {
return fmt.Errorf("osupgrade is nil")
}
name := fmt.Sprintf("%s-%s", osu.Name, nodeName)
now := metav1.Now()
progress := &monov1alpha1.OSUpgradeProgress{
TypeMeta: metav1.TypeMeta{
APIVersion: monov1alpha1.APIVersion,
Kind: "OSUpgradeProgress",
},
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: namespace,
},
Spec: monov1alpha1.OSUpgradeProgressSpec{
NodeName: nodeName,
SourceRef: monov1alpha1.OSUpgradeSourceRef{
Name: osu.Name,
},
},
Status: &monov1alpha1.OSUpgradeProgressStatus{
Phase: monov1alpha1.OSUpgradeProgressPhasePending,
LastUpdatedAt: &now,
},
}
created, err := createProgress(ctx, clients, progress)
if err == nil {
klog.InfoS("created osupgradeprogress", "name", created.Name, "namespace", created.Namespace)
return nil
}
if !apierrors.IsAlreadyExists(err) {
return fmt.Errorf("create OSUpgradeProgress %s/%s: %w", namespace, name, err)
}
existing, err := getProgress(ctx, clients, namespace, name)
if err != nil {
return fmt.Errorf("get existing OSUpgradeProgress %s/%s: %w", namespace, name, err)
}
if existing.Spec.NodeName != nodeName || existing.Spec.SourceRef.Name != osu.Name {
return fmt.Errorf(
"conflicting OSUpgradeProgress %s/%s already exists: got spec.nodeName=%q spec.sourceRef.name=%q, want nodeName=%q sourceRef.name=%q",
namespace,
name,
existing.Spec.NodeName,
existing.Spec.SourceRef.Name,
nodeName,
osu.Name,
)
}
return nil
}
func updateProgressRobust(
ctx context.Context,
clients *kube.Clients,
namespace string,
name string,
mutate func(*monov1alpha1.OSUpgradeProgress),
) (*monov1alpha1.OSUpgradeProgress, error) {
var out *monov1alpha1.OSUpgradeProgress
err := retry.RetryOnConflict(retry.DefaultRetry, func() error {
current, err := getProgress(ctx, clients, namespace, name)
if err != nil {
return err
}
if current.Status == nil {
current.Status = &monov1alpha1.OSUpgradeProgressStatus{}
}
mutate(current)
updated, err := updateProgressStatus(ctx, clients, current)
if err != nil {
if isUnknownUpdateResult(err) {
latest, getErr := getProgress(ctx, clients, namespace, name)
if getErr == nil {
out = latest
}
}
return err
}
out = updated
return nil
})
if err != nil && out != nil {
// Unknown-result case: caller gets latest known server state plus error.
return out, err
}
return out, err
}
func isUnknownUpdateResult(err error) bool {
if err == nil {
return false
}
if apierrors.IsTimeout(err) ||
apierrors.IsServerTimeout(err) ||
apierrors.IsTooManyRequests(err) {
return true
}
msg := strings.ToLower(err.Error())
return strings.Contains(msg, "request timed out") ||
strings.Contains(msg, "context deadline exceeded") ||
strings.Contains(msg, "etcdserver: request timed out") ||
strings.Contains(msg, "connection reset by peer") ||
strings.Contains(msg, "http2: client connection lost")
}
func createProgress(
ctx context.Context,
clients *kube.Clients,
progress *monov1alpha1.OSUpgradeProgress,
) (*monov1alpha1.OSUpgradeProgress, error) {
toCreate := progress.DeepCopy()
toCreate.Status = nil
created, err := clients.MonoKS.
Monok8sV1alpha1().
OSUpgradeProgresses(toCreate.Namespace).
Create(ctx, toCreate, metav1.CreateOptions{})
if err != nil {
return nil, err
}
if progress.Status != nil {
toUpdate := created.DeepCopy()
toUpdate.Status = progress.Status
return updateProgressStatus(ctx, clients, toUpdate)
}
return created, nil
}
func updateProgressStatus(
ctx context.Context,
clients *kube.Clients,
progress *monov1alpha1.OSUpgradeProgress,
) (*monov1alpha1.OSUpgradeProgress, error) {
updated, err := clients.MonoKS.
Monok8sV1alpha1().
OSUpgradeProgresses(progress.Namespace).
UpdateStatus(ctx, progress, metav1.UpdateOptions{})
if err != nil {
return nil, fmt.Errorf(
"update status for OSUpgradeProgress %s/%s: %w",
progress.Namespace, progress.Name, err,
)
}
return updated, nil
}
func getProgress(
ctx context.Context,
clients *kube.Clients,
namespace, name string,
) (*monov1alpha1.OSUpgradeProgress, error) {
return clients.MonoKS.
Monok8sV1alpha1().
OSUpgradeProgresses(namespace).
Get(ctx, name, metav1.GetOptions{})
}
func failProgress(
ctx context.Context,
clients *kube.Clients,
osup *monov1alpha1.OSUpgradeProgress,
action string,
cause error,
) error {
_, err := updateProgressRobust(ctx, clients, osup.Namespace, osup.Name, func(cur *monov1alpha1.OSUpgradeProgress) {
now := metav1.Now()
if cur.Status == nil {
cur.Status = &monov1alpha1.OSUpgradeProgressStatus{}
}
cur.Status.ObservedRetryNonce = cur.Spec.RetryNonce
cur.Status.LastUpdatedAt = &now
cur.Status.Message = fmt.Sprintf("%s: %v", action, cause)
cur.Status.Phase = monov1alpha1.OSUpgradeProgressPhaseFailed
})
if err != nil {
klog.ErrorS(err, "failed to update osupgradeprogress status after error",
"action", action,
"name", osup.Name,
"namespace", osup.Namespace,
)
}
return fmt.Errorf("%s: %w", action, cause)
}
func markProgressCompleted(
ctx context.Context,
clients *kube.Clients,
osup *monov1alpha1.OSUpgradeProgress,
message string,
) error {
_, err := updateProgressRobust(ctx, clients, osup.Namespace, osup.Name, func(cur *monov1alpha1.OSUpgradeProgress) {
now := metav1.Now()
if cur.Status == nil {
cur.Status = &monov1alpha1.OSUpgradeProgressStatus{}
}
cur.Status.ObservedRetryNonce = cur.Spec.RetryNonce
cur.Status.Phase = monov1alpha1.OSUpgradeProgressPhaseCompleted
cur.Status.Message = message
cur.Status.CurrentVersion = osup.Status.CurrentVersion
cur.Status.TargetVersion = osup.Status.TargetVersion
cur.Status.LastUpdatedAt = &now
cur.Status.CompletedAt = &now
})
if err != nil {
return fmt.Errorf("mark progress completed: %w", err)
}
return nil
}

View File

@@ -0,0 +1,345 @@
package osupgrade
import (
"context"
"fmt"
"time"
corev1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/watch"
"k8s.io/klog/v2"
monov1alpha1 "example.com/monok8s/pkg/apis/monok8s/v1alpha1"
"example.com/monok8s/pkg/kube"
)
func Watch(ctx context.Context, clients *kube.Clients, namespace string) error {
var resourceVersion string
for {
if ctx.Err() != nil {
return ctx.Err()
}
err := watchOnce(ctx, clients, namespace, &resourceVersion)
if err != nil {
if ctx.Err() != nil {
return ctx.Err()
}
// Expired RV is normal enough; clear it and relist.
if apierrors.IsResourceExpired(err) {
klog.InfoS("OSUpgrade watch resourceVersion expired; resetting",
"namespace", namespace,
"resourceVersion", resourceVersion,
)
resourceVersion = ""
} else {
klog.ErrorS(err, "OSUpgrade watch failed; retrying",
"namespace", namespace,
"resourceVersion", resourceVersion,
)
}
select {
case <-ctx.Done():
return ctx.Err()
case <-time.After(2 * time.Second):
}
continue
}
}
}
func watchOnce(
ctx context.Context,
clients *kube.Clients,
namespace string,
resourceVersion *string,
) error {
// Cold start: list existing objects once, handle them, then watch from list RV.
if *resourceVersion == "" {
list, err := clients.MonoKS.
Monok8sV1alpha1().
OSUpgrades(namespace).
List(ctx, metav1.ListOptions{})
if err != nil {
return fmt.Errorf("list OSUpgrades: %w", err)
}
for i := range list.Items {
osu := &list.Items[i]
handled, err := handleOSUpgrade(ctx, clients, namespace, osu)
if err != nil {
klog.ErrorS(err, "reconcile existing OSUpgrade failed",
"name", osu.Name,
"resourceVersion", osu.ResourceVersion,
)
continue
}
if !handled {
klog.V(2).InfoS("skipping existing OSUpgrade",
"name", osu.Name,
"phase", osu.StatusPhase(),
)
}
}
*resourceVersion = list.ResourceVersion
klog.InfoS("initial OSUpgrade sync complete",
"namespace", namespace,
"resourceVersion", *resourceVersion,
"count", len(list.Items),
)
}
w, err := clients.MonoKS.
Monok8sV1alpha1().
OSUpgrades(namespace).
Watch(ctx, metav1.ListOptions{
ResourceVersion: *resourceVersion,
AllowWatchBookmarks: true,
})
if err != nil {
return fmt.Errorf("watch OSUpgrades: %w", err)
}
defer w.Stop()
klog.InfoS("watching OSUpgrades",
"namespace", namespace,
"resourceVersion", *resourceVersion,
)
for {
select {
case <-ctx.Done():
return ctx.Err()
case evt, ok := <-w.ResultChan():
if !ok {
return fmt.Errorf("watch channel closed")
}
switch evt.Type {
case watch.Bookmark:
if rv := extractResourceVersion(evt.Object); rv != "" {
*resourceVersion = rv
}
continue
case watch.Error:
// Let outer loop retry / relist.
return fmt.Errorf("watch returned error event")
case watch.Deleted:
// Top-level delete does not require action here.
continue
case watch.Added, watch.Modified:
// handled below
default:
klog.V(1).InfoS("skipping unexpected watch event type",
"eventType", evt.Type,
)
continue
}
osu, ok := evt.Object.(*monov1alpha1.OSUpgrade)
if !ok {
klog.V(1).InfoS("skipping unexpected watch object type",
"type", fmt.Sprintf("%T", evt.Object),
)
continue
}
if osu.ResourceVersion != "" {
*resourceVersion = osu.ResourceVersion
}
handled, err := handleOSUpgrade(ctx, clients, namespace, osu)
if err != nil {
klog.ErrorS(err, "reconcile OSUpgrade failed",
"name", osu.Name,
"eventType", evt.Type,
"resourceVersion", osu.ResourceVersion,
)
continue
}
if !handled {
klog.V(2).InfoS("skipping OSUpgrade",
"name", osu.Name,
"eventType", evt.Type,
"phase", osu.StatusPhase(),
)
}
}
}
}
func handleOSUpgrade(
ctx context.Context,
clients *kube.Clients,
namespace string,
osu *monov1alpha1.OSUpgrade,
) (bool, error) {
if !shouldHandle(osu) {
return false, nil
}
if osu.Status == nil || osu.Status.ObservedGeneration != osu.Generation {
return true, reconcileSpec(ctx, clients, namespace, osu)
}
if osu.Status.Phase == monov1alpha1.OSUpgradePhaseAccepted {
return true, reconcileFanout(ctx, clients, namespace, osu)
}
return false, nil
}
func reconcileSpec(
ctx context.Context,
clients *kube.Clients,
namespace string,
osu *monov1alpha1.OSUpgrade,
) error {
osu = osu.DeepCopy()
osu.Status = &monov1alpha1.OSUpgradeStatus{
Phase: monov1alpha1.OSUpgradePhaseAccepted,
ResolvedVersion: osu.Spec.DesiredVersion,
ObservedGeneration: osu.Generation,
}
_, err := clients.MonoKS.
Monok8sV1alpha1().
OSUpgrades(namespace).
UpdateStatus(ctx, osu, metav1.UpdateOptions{})
return err
}
func reconcileFanout(
ctx context.Context,
clients *kube.Clients,
namespace string,
osu *monov1alpha1.OSUpgrade,
) error {
nodeNames, err := listTargetNodeNames(ctx, clients, osu)
if err != nil {
return fmt.Errorf("list target nodes for %s: %w", osu.Name, err)
}
if len(nodeNames) == 0 {
klog.InfoS("no targets", "osupgrade", osu.Name)
return nil
}
klog.InfoS("ensuring OSUpgradeProgress for target nodes",
"osupgrade", osu.Name,
"targets", len(nodeNames),
)
for _, nodeName := range nodeNames {
if err := EnsureOSUpgradeProgressForNode(
ctx,
clients,
namespace,
nodeName,
osu,
); err != nil {
klog.ErrorS(err, "ensure OSUpgradeProgress for node failed",
"osupgrade", osu.Name,
"node", nodeName,
)
}
}
return nil
}
func listTargetNodeNames(
ctx context.Context,
clients *kube.Clients,
osu *monov1alpha1.OSUpgrade,
) ([]string, error) {
selector := labels.SelectorFromSet(labels.Set{
monov1alpha1.NodeControlKey: "true",
})
if osu.Spec.NodeSelector != nil {
userSelector, err := metav1.LabelSelectorAsSelector(osu.Spec.NodeSelector)
if err != nil {
return nil, fmt.Errorf("invalid nodeSelector: %w", err)
}
reqs, selectable := userSelector.Requirements()
if !selectable {
selector = labels.Nothing()
} else {
selector = selector.Add(reqs...)
}
}
list, err := clients.Kubernetes.CoreV1().
Nodes().
List(ctx, metav1.ListOptions{
LabelSelector: selector.String(),
})
if err != nil {
return nil, fmt.Errorf("list nodes: %w", err)
}
out := make([]string, 0, len(list.Items))
for i := range list.Items {
node := &list.Items[i]
if shouldUseNode(node) {
out = append(out, node.Name)
}
}
return out, nil
}
func shouldUseNode(node *corev1.Node) bool {
return node != nil && node.Name != ""
}
func shouldHandle(osu *monov1alpha1.OSUpgrade) bool {
if osu == nil || osu.DeletionTimestamp != nil {
return false
}
if osu.Spec.DesiredVersion == "" {
return false
}
// NEW: initial processing stage
if osu.Status == nil {
return true
}
// Reconcile if spec changed
if osu.Status.ObservedGeneration != osu.Generation {
return true
}
// Fanout stage
return osu.Status.Phase == monov1alpha1.OSUpgradePhaseAccepted
}
func extractResourceVersion(obj interface{}) string {
type hasRV interface {
GetResourceVersion() string
}
if o, ok := obj.(hasRV); ok {
return o.GetResourceVersion()
}
return ""
}

View File

@@ -0,0 +1,92 @@
package controller
import (
"context"
"net/http"
"time"
"example.com/monok8s/pkg/kube"
"github.com/emicklei/go-restful/v3"
"k8s.io/apiserver/pkg/server/httplog"
)
var statusesNoTracePred = httplog.StatusIsNot(
http.StatusOK,
http.StatusFound,
http.StatusMovedPermanently,
http.StatusTemporaryRedirect,
http.StatusBadRequest,
http.StatusNotFound,
http.StatusSwitchingProtocols,
)
type Server struct {
restfulCont *restful.Container
ctx context.Context
clients *kube.Clients
namespace string
nodeName string
startedAt time.Time
}
type StatusResponse struct {
OK bool `json:"ok"`
Service string `json:"service"`
Namespace string `json:"namespace,omitempty"`
NodeName string `json:"nodeName,omitempty"`
UptimeSec int64 `json:"uptimeSec"`
}
func NewServer(ctx context.Context, clients *kube.Clients, namespace, nodeName string) *Server {
s := &Server{
ctx: ctx,
clients: clients,
namespace: namespace,
nodeName: nodeName,
startedAt: time.Now(),
}
s.Initialize()
return s
}
func (s *Server) ServeHTTP(w http.ResponseWriter, req *http.Request) {
if s == nil {
http.Error(w, "server is nil", http.StatusInternalServerError)
return
}
if s.restfulCont == nil {
http.Error(w, "server not initialized", http.StatusInternalServerError)
return
}
handler := httplog.WithLogging(s.restfulCont, statusesNoTracePred)
handler.ServeHTTP(w, req)
}
func (s *Server) Initialize() {
s.restfulCont = restful.NewContainer()
ws := new(restful.WebService)
ws.Path("/")
ws.Consumes(restful.MIME_JSON)
ws.Produces(restful.MIME_JSON)
ws.Route(ws.GET("/status").To(s.queryStatus).
Doc("Return basic controller status"))
s.restfulCont.Add(ws)
}
func (s *Server) queryStatus(request *restful.Request, response *restful.Response) {
resp := StatusResponse{
OK: true,
Service: "monok8s-controller",
Namespace: s.namespace,
NodeName: s.nodeName,
UptimeSec: int64(time.Since(s.startedAt).Seconds()),
}
_ = response.WriteHeaderAndEntity(http.StatusOK, resp)
}

View File

@@ -0,0 +1,106 @@
/* MIT License */
// Code generated by client-gen. DO NOT EDIT.
package versioned
import (
fmt "fmt"
http "net/http"
monok8sv1alpha1 "example.com/monok8s/pkg/generated/clientset/versioned/typed/monok8s/v1alpha1"
discovery "k8s.io/client-go/discovery"
rest "k8s.io/client-go/rest"
flowcontrol "k8s.io/client-go/util/flowcontrol"
)
type Interface interface {
Discovery() discovery.DiscoveryInterface
Monok8sV1alpha1() monok8sv1alpha1.Monok8sV1alpha1Interface
}
// Clientset contains the clients for groups.
type Clientset struct {
*discovery.DiscoveryClient
monok8sV1alpha1 *monok8sv1alpha1.Monok8sV1alpha1Client
}
// Monok8sV1alpha1 retrieves the Monok8sV1alpha1Client
func (c *Clientset) Monok8sV1alpha1() monok8sv1alpha1.Monok8sV1alpha1Interface {
return c.monok8sV1alpha1
}
// Discovery retrieves the DiscoveryClient
func (c *Clientset) Discovery() discovery.DiscoveryInterface {
if c == nil {
return nil
}
return c.DiscoveryClient
}
// NewForConfig creates a new Clientset for the given config.
// If config's RateLimiter is not set and QPS and Burst are acceptable,
// NewForConfig will generate a rate-limiter in configShallowCopy.
// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient),
// where httpClient was generated with rest.HTTPClientFor(c).
func NewForConfig(c *rest.Config) (*Clientset, error) {
configShallowCopy := *c
if configShallowCopy.UserAgent == "" {
configShallowCopy.UserAgent = rest.DefaultKubernetesUserAgent()
}
// share the transport between all clients
httpClient, err := rest.HTTPClientFor(&configShallowCopy)
if err != nil {
return nil, err
}
return NewForConfigAndClient(&configShallowCopy, httpClient)
}
// NewForConfigAndClient creates a new Clientset for the given config and http client.
// Note the http client provided takes precedence over the configured transport values.
// If config's RateLimiter is not set and QPS and Burst are acceptable,
// NewForConfigAndClient will generate a rate-limiter in configShallowCopy.
func NewForConfigAndClient(c *rest.Config, httpClient *http.Client) (*Clientset, error) {
configShallowCopy := *c
if configShallowCopy.RateLimiter == nil && configShallowCopy.QPS > 0 {
if configShallowCopy.Burst <= 0 {
return nil, fmt.Errorf("burst is required to be greater than 0 when RateLimiter is not set and QPS is set to greater than 0")
}
configShallowCopy.RateLimiter = flowcontrol.NewTokenBucketRateLimiter(configShallowCopy.QPS, configShallowCopy.Burst)
}
var cs Clientset
var err error
cs.monok8sV1alpha1, err = monok8sv1alpha1.NewForConfigAndClient(&configShallowCopy, httpClient)
if err != nil {
return nil, err
}
cs.DiscoveryClient, err = discovery.NewDiscoveryClientForConfigAndClient(&configShallowCopy, httpClient)
if err != nil {
return nil, err
}
return &cs, nil
}
// NewForConfigOrDie creates a new Clientset for the given config and
// panics if there is an error in the config.
func NewForConfigOrDie(c *rest.Config) *Clientset {
cs, err := NewForConfig(c)
if err != nil {
panic(err)
}
return cs
}
// New creates a new Clientset for the given RESTClient.
func New(c rest.Interface) *Clientset {
var cs Clientset
cs.monok8sV1alpha1 = monok8sv1alpha1.New(c)
cs.DiscoveryClient = discovery.NewDiscoveryClient(c)
return &cs
}

View File

@@ -0,0 +1,91 @@
/* MIT License */
// Code generated by client-gen. DO NOT EDIT.
package fake
import (
clientset "example.com/monok8s/pkg/generated/clientset/versioned"
monok8sv1alpha1 "example.com/monok8s/pkg/generated/clientset/versioned/typed/monok8s/v1alpha1"
fakemonok8sv1alpha1 "example.com/monok8s/pkg/generated/clientset/versioned/typed/monok8s/v1alpha1/fake"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/watch"
"k8s.io/client-go/discovery"
fakediscovery "k8s.io/client-go/discovery/fake"
"k8s.io/client-go/testing"
)
// NewSimpleClientset returns a clientset that will respond with the provided objects.
// It's backed by a very simple object tracker that processes creates, updates and deletions as-is,
// without applying any field management, validations and/or defaults. It shouldn't be considered a replacement
// for a real clientset and is mostly useful in simple unit tests.
//
// Deprecated: NewClientset replaces this with support for field management, which significantly improves
// server side apply testing. NewClientset is only available when apply configurations are generated (e.g.
// via --with-applyconfig).
func NewSimpleClientset(objects ...runtime.Object) *Clientset {
o := testing.NewObjectTracker(scheme, codecs.UniversalDecoder())
for _, obj := range objects {
if err := o.Add(obj); err != nil {
panic(err)
}
}
cs := &Clientset{tracker: o}
cs.discovery = &fakediscovery.FakeDiscovery{Fake: &cs.Fake}
cs.AddReactor("*", "*", testing.ObjectReaction(o))
cs.AddWatchReactor("*", func(action testing.Action) (handled bool, ret watch.Interface, err error) {
var opts metav1.ListOptions
if watchAction, ok := action.(testing.WatchActionImpl); ok {
opts = watchAction.ListOptions
}
gvr := action.GetResource()
ns := action.GetNamespace()
watch, err := o.Watch(gvr, ns, opts)
if err != nil {
return false, nil, err
}
return true, watch, nil
})
return cs
}
// Clientset implements clientset.Interface. Meant to be embedded into a
// struct to get a default implementation. This makes faking out just the method
// you want to test easier.
type Clientset struct {
testing.Fake
discovery *fakediscovery.FakeDiscovery
tracker testing.ObjectTracker
}
func (c *Clientset) Discovery() discovery.DiscoveryInterface {
return c.discovery
}
func (c *Clientset) Tracker() testing.ObjectTracker {
return c.tracker
}
// IsWatchListSemanticsSupported informs the reflector that this client
// doesn't support WatchList semantics.
//
// This is a synthetic method whose sole purpose is to satisfy the optional
// interface check performed by the reflector.
// Returning true signals that WatchList can NOT be used.
// No additional logic is implemented here.
func (c *Clientset) IsWatchListSemanticsUnSupported() bool {
return true
}
var (
_ clientset.Interface = &Clientset{}
_ testing.FakeClient = &Clientset{}
)
// Monok8sV1alpha1 retrieves the Monok8sV1alpha1Client
func (c *Clientset) Monok8sV1alpha1() monok8sv1alpha1.Monok8sV1alpha1Interface {
return &fakemonok8sv1alpha1.FakeMonok8sV1alpha1{Fake: &c.Fake}
}

View File

@@ -0,0 +1,6 @@
/* MIT License */
// Code generated by client-gen. DO NOT EDIT.
// This package has the automatically generated fake clientset.
package fake

View File

@@ -0,0 +1,42 @@
/* MIT License */
// Code generated by client-gen. DO NOT EDIT.
package fake
import (
monok8sv1alpha1 "example.com/monok8s/pkg/apis/monok8s/v1alpha1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
schema "k8s.io/apimachinery/pkg/runtime/schema"
serializer "k8s.io/apimachinery/pkg/runtime/serializer"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
)
var scheme = runtime.NewScheme()
var codecs = serializer.NewCodecFactory(scheme)
var localSchemeBuilder = runtime.SchemeBuilder{
monok8sv1alpha1.AddToScheme,
}
// AddToScheme adds all types of this clientset into the given scheme. This allows composition
// of clientsets, like in:
//
// import (
// "k8s.io/client-go/kubernetes"
// clientsetscheme "k8s.io/client-go/kubernetes/scheme"
// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme"
// )
//
// kclientset, _ := kubernetes.NewForConfig(c)
// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme)
//
// After this, RawExtensions in Kubernetes types will serialize kube-aggregator types
// correctly.
var AddToScheme = localSchemeBuilder.AddToScheme
func init() {
v1.AddToGroupVersion(scheme, schema.GroupVersion{Version: "v1"})
utilruntime.Must(AddToScheme(scheme))
}

View File

@@ -0,0 +1,6 @@
/* MIT License */
// Code generated by client-gen. DO NOT EDIT.
// This package contains the scheme of the automatically generated clientset.
package scheme

View File

@@ -0,0 +1,42 @@
/* MIT License */
// Code generated by client-gen. DO NOT EDIT.
package scheme
import (
monok8sv1alpha1 "example.com/monok8s/pkg/apis/monok8s/v1alpha1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
schema "k8s.io/apimachinery/pkg/runtime/schema"
serializer "k8s.io/apimachinery/pkg/runtime/serializer"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
)
var Scheme = runtime.NewScheme()
var Codecs = serializer.NewCodecFactory(Scheme)
var ParameterCodec = runtime.NewParameterCodec(Scheme)
var localSchemeBuilder = runtime.SchemeBuilder{
monok8sv1alpha1.AddToScheme,
}
// AddToScheme adds all types of this clientset into the given scheme. This allows composition
// of clientsets, like in:
//
// import (
// "k8s.io/client-go/kubernetes"
// clientsetscheme "k8s.io/client-go/kubernetes/scheme"
// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme"
// )
//
// kclientset, _ := kubernetes.NewForConfig(c)
// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme)
//
// After this, RawExtensions in Kubernetes types will serialize kube-aggregator types
// correctly.
var AddToScheme = localSchemeBuilder.AddToScheme
func init() {
v1.AddToGroupVersion(Scheme, schema.GroupVersion{Version: "v1"})
utilruntime.Must(AddToScheme(Scheme))
}

View File

@@ -0,0 +1,6 @@
/* MIT License */
// Code generated by client-gen. DO NOT EDIT.
// This package has the automatically generated typed clients.
package v1alpha1

View File

@@ -0,0 +1,6 @@
/* MIT License */
// Code generated by client-gen. DO NOT EDIT.
// Package fake has the automatically generated clients.
package fake

View File

@@ -0,0 +1,30 @@
/* MIT License */
// Code generated by client-gen. DO NOT EDIT.
package fake
import (
v1alpha1 "example.com/monok8s/pkg/generated/clientset/versioned/typed/monok8s/v1alpha1"
rest "k8s.io/client-go/rest"
testing "k8s.io/client-go/testing"
)
type FakeMonok8sV1alpha1 struct {
*testing.Fake
}
func (c *FakeMonok8sV1alpha1) OSUpgrades(namespace string) v1alpha1.OSUpgradeInterface {
return newFakeOSUpgrades(c, namespace)
}
func (c *FakeMonok8sV1alpha1) OSUpgradeProgresses(namespace string) v1alpha1.OSUpgradeProgressInterface {
return newFakeOSUpgradeProgresses(c, namespace)
}
// RESTClient returns a RESTClient that is used to communicate
// with API server by this client implementation.
func (c *FakeMonok8sV1alpha1) RESTClient() rest.Interface {
var ret *rest.RESTClient
return ret
}

View File

@@ -0,0 +1,36 @@
/* MIT License */
// Code generated by client-gen. DO NOT EDIT.
package fake
import (
v1alpha1 "example.com/monok8s/pkg/apis/monok8s/v1alpha1"
monok8sv1alpha1 "example.com/monok8s/pkg/generated/clientset/versioned/typed/monok8s/v1alpha1"
gentype "k8s.io/client-go/gentype"
)
// fakeOSUpgrades implements OSUpgradeInterface
type fakeOSUpgrades struct {
*gentype.FakeClientWithList[*v1alpha1.OSUpgrade, *v1alpha1.OSUpgradeList]
Fake *FakeMonok8sV1alpha1
}
func newFakeOSUpgrades(fake *FakeMonok8sV1alpha1, namespace string) monok8sv1alpha1.OSUpgradeInterface {
return &fakeOSUpgrades{
gentype.NewFakeClientWithList[*v1alpha1.OSUpgrade, *v1alpha1.OSUpgradeList](
fake.Fake,
namespace,
v1alpha1.SchemeGroupVersion.WithResource("osupgrades"),
v1alpha1.SchemeGroupVersion.WithKind("OSUpgrade"),
func() *v1alpha1.OSUpgrade { return &v1alpha1.OSUpgrade{} },
func() *v1alpha1.OSUpgradeList { return &v1alpha1.OSUpgradeList{} },
func(dst, src *v1alpha1.OSUpgradeList) { dst.ListMeta = src.ListMeta },
func(list *v1alpha1.OSUpgradeList) []*v1alpha1.OSUpgrade { return gentype.ToPointerSlice(list.Items) },
func(list *v1alpha1.OSUpgradeList, items []*v1alpha1.OSUpgrade) {
list.Items = gentype.FromPointerSlice(items)
},
),
fake,
}
}

View File

@@ -0,0 +1,38 @@
/* MIT License */
// Code generated by client-gen. DO NOT EDIT.
package fake
import (
v1alpha1 "example.com/monok8s/pkg/apis/monok8s/v1alpha1"
monok8sv1alpha1 "example.com/monok8s/pkg/generated/clientset/versioned/typed/monok8s/v1alpha1"
gentype "k8s.io/client-go/gentype"
)
// fakeOSUpgradeProgresses implements OSUpgradeProgressInterface
type fakeOSUpgradeProgresses struct {
*gentype.FakeClientWithList[*v1alpha1.OSUpgradeProgress, *v1alpha1.OSUpgradeProgressList]
Fake *FakeMonok8sV1alpha1
}
func newFakeOSUpgradeProgresses(fake *FakeMonok8sV1alpha1, namespace string) monok8sv1alpha1.OSUpgradeProgressInterface {
return &fakeOSUpgradeProgresses{
gentype.NewFakeClientWithList[*v1alpha1.OSUpgradeProgress, *v1alpha1.OSUpgradeProgressList](
fake.Fake,
namespace,
v1alpha1.SchemeGroupVersion.WithResource("osupgradeprogresses"),
v1alpha1.SchemeGroupVersion.WithKind("OSUpgradeProgress"),
func() *v1alpha1.OSUpgradeProgress { return &v1alpha1.OSUpgradeProgress{} },
func() *v1alpha1.OSUpgradeProgressList { return &v1alpha1.OSUpgradeProgressList{} },
func(dst, src *v1alpha1.OSUpgradeProgressList) { dst.ListMeta = src.ListMeta },
func(list *v1alpha1.OSUpgradeProgressList) []*v1alpha1.OSUpgradeProgress {
return gentype.ToPointerSlice(list.Items)
},
func(list *v1alpha1.OSUpgradeProgressList, items []*v1alpha1.OSUpgradeProgress) {
list.Items = gentype.FromPointerSlice(items)
},
),
fake,
}
}

View File

@@ -0,0 +1,9 @@
/* MIT License */
// Code generated by client-gen. DO NOT EDIT.
package v1alpha1
type OSUpgradeExpansion interface{}
type OSUpgradeProgressExpansion interface{}

View File

@@ -0,0 +1,92 @@
/* MIT License */
// Code generated by client-gen. DO NOT EDIT.
package v1alpha1
import (
http "net/http"
monok8sv1alpha1 "example.com/monok8s/pkg/apis/monok8s/v1alpha1"
scheme "example.com/monok8s/pkg/generated/clientset/versioned/scheme"
rest "k8s.io/client-go/rest"
)
type Monok8sV1alpha1Interface interface {
RESTClient() rest.Interface
OSUpgradesGetter
OSUpgradeProgressesGetter
}
// Monok8sV1alpha1Client is used to interact with features provided by the monok8s group.
type Monok8sV1alpha1Client struct {
restClient rest.Interface
}
func (c *Monok8sV1alpha1Client) OSUpgrades(namespace string) OSUpgradeInterface {
return newOSUpgrades(c, namespace)
}
func (c *Monok8sV1alpha1Client) OSUpgradeProgresses(namespace string) OSUpgradeProgressInterface {
return newOSUpgradeProgresses(c, namespace)
}
// NewForConfig creates a new Monok8sV1alpha1Client for the given config.
// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient),
// where httpClient was generated with rest.HTTPClientFor(c).
func NewForConfig(c *rest.Config) (*Monok8sV1alpha1Client, error) {
config := *c
setConfigDefaults(&config)
httpClient, err := rest.HTTPClientFor(&config)
if err != nil {
return nil, err
}
return NewForConfigAndClient(&config, httpClient)
}
// NewForConfigAndClient creates a new Monok8sV1alpha1Client for the given config and http client.
// Note the http client provided takes precedence over the configured transport values.
func NewForConfigAndClient(c *rest.Config, h *http.Client) (*Monok8sV1alpha1Client, error) {
config := *c
setConfigDefaults(&config)
client, err := rest.RESTClientForConfigAndClient(&config, h)
if err != nil {
return nil, err
}
return &Monok8sV1alpha1Client{client}, nil
}
// NewForConfigOrDie creates a new Monok8sV1alpha1Client for the given config and
// panics if there is an error in the config.
func NewForConfigOrDie(c *rest.Config) *Monok8sV1alpha1Client {
client, err := NewForConfig(c)
if err != nil {
panic(err)
}
return client
}
// New creates a new Monok8sV1alpha1Client for the given RESTClient.
func New(c rest.Interface) *Monok8sV1alpha1Client {
return &Monok8sV1alpha1Client{c}
}
func setConfigDefaults(config *rest.Config) {
gv := monok8sv1alpha1.SchemeGroupVersion
config.GroupVersion = &gv
config.APIPath = "/apis"
config.NegotiatedSerializer = rest.CodecFactoryForGeneratedClient(scheme.Scheme, scheme.Codecs).WithoutConversion()
if config.UserAgent == "" {
config.UserAgent = rest.DefaultKubernetesUserAgent()
}
}
// RESTClient returns a RESTClient that is used to communicate
// with API server by this client implementation.
func (c *Monok8sV1alpha1Client) RESTClient() rest.Interface {
if c == nil {
return nil
}
return c.restClient
}

View File

@@ -0,0 +1,56 @@
/* MIT License */
// Code generated by client-gen. DO NOT EDIT.
package v1alpha1
import (
context "context"
monok8sv1alpha1 "example.com/monok8s/pkg/apis/monok8s/v1alpha1"
scheme "example.com/monok8s/pkg/generated/clientset/versioned/scheme"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
gentype "k8s.io/client-go/gentype"
)
// OSUpgradesGetter has a method to return a OSUpgradeInterface.
// A group's client should implement this interface.
type OSUpgradesGetter interface {
OSUpgrades(namespace string) OSUpgradeInterface
}
// OSUpgradeInterface has methods to work with OSUpgrade resources.
type OSUpgradeInterface interface {
Create(ctx context.Context, oSUpgrade *monok8sv1alpha1.OSUpgrade, opts v1.CreateOptions) (*monok8sv1alpha1.OSUpgrade, error)
Update(ctx context.Context, oSUpgrade *monok8sv1alpha1.OSUpgrade, opts v1.UpdateOptions) (*monok8sv1alpha1.OSUpgrade, error)
// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
UpdateStatus(ctx context.Context, oSUpgrade *monok8sv1alpha1.OSUpgrade, opts v1.UpdateOptions) (*monok8sv1alpha1.OSUpgrade, error)
Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
Get(ctx context.Context, name string, opts v1.GetOptions) (*monok8sv1alpha1.OSUpgrade, error)
List(ctx context.Context, opts v1.ListOptions) (*monok8sv1alpha1.OSUpgradeList, error)
Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *monok8sv1alpha1.OSUpgrade, err error)
OSUpgradeExpansion
}
// oSUpgrades implements OSUpgradeInterface
type oSUpgrades struct {
*gentype.ClientWithList[*monok8sv1alpha1.OSUpgrade, *monok8sv1alpha1.OSUpgradeList]
}
// newOSUpgrades returns a OSUpgrades
func newOSUpgrades(c *Monok8sV1alpha1Client, namespace string) *oSUpgrades {
return &oSUpgrades{
gentype.NewClientWithList[*monok8sv1alpha1.OSUpgrade, *monok8sv1alpha1.OSUpgradeList](
"osupgrades",
c.RESTClient(),
scheme.ParameterCodec,
namespace,
func() *monok8sv1alpha1.OSUpgrade { return &monok8sv1alpha1.OSUpgrade{} },
func() *monok8sv1alpha1.OSUpgradeList { return &monok8sv1alpha1.OSUpgradeList{} },
),
}
}

View File

@@ -0,0 +1,56 @@
/* MIT License */
// Code generated by client-gen. DO NOT EDIT.
package v1alpha1
import (
context "context"
monok8sv1alpha1 "example.com/monok8s/pkg/apis/monok8s/v1alpha1"
scheme "example.com/monok8s/pkg/generated/clientset/versioned/scheme"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
gentype "k8s.io/client-go/gentype"
)
// OSUpgradeProgressesGetter has a method to return a OSUpgradeProgressInterface.
// A group's client should implement this interface.
type OSUpgradeProgressesGetter interface {
OSUpgradeProgresses(namespace string) OSUpgradeProgressInterface
}
// OSUpgradeProgressInterface has methods to work with OSUpgradeProgress resources.
type OSUpgradeProgressInterface interface {
Create(ctx context.Context, oSUpgradeProgress *monok8sv1alpha1.OSUpgradeProgress, opts v1.CreateOptions) (*monok8sv1alpha1.OSUpgradeProgress, error)
Update(ctx context.Context, oSUpgradeProgress *monok8sv1alpha1.OSUpgradeProgress, opts v1.UpdateOptions) (*monok8sv1alpha1.OSUpgradeProgress, error)
// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
UpdateStatus(ctx context.Context, oSUpgradeProgress *monok8sv1alpha1.OSUpgradeProgress, opts v1.UpdateOptions) (*monok8sv1alpha1.OSUpgradeProgress, error)
Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
Get(ctx context.Context, name string, opts v1.GetOptions) (*monok8sv1alpha1.OSUpgradeProgress, error)
List(ctx context.Context, opts v1.ListOptions) (*monok8sv1alpha1.OSUpgradeProgressList, error)
Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *monok8sv1alpha1.OSUpgradeProgress, err error)
OSUpgradeProgressExpansion
}
// oSUpgradeProgresses implements OSUpgradeProgressInterface
type oSUpgradeProgresses struct {
*gentype.ClientWithList[*monok8sv1alpha1.OSUpgradeProgress, *monok8sv1alpha1.OSUpgradeProgressList]
}
// newOSUpgradeProgresses returns a OSUpgradeProgresses
func newOSUpgradeProgresses(c *Monok8sV1alpha1Client, namespace string) *oSUpgradeProgresses {
return &oSUpgradeProgresses{
gentype.NewClientWithList[*monok8sv1alpha1.OSUpgradeProgress, *monok8sv1alpha1.OSUpgradeProgressList](
"osupgradeprogresses",
c.RESTClient(),
scheme.ParameterCodec,
namespace,
func() *monok8sv1alpha1.OSUpgradeProgress { return &monok8sv1alpha1.OSUpgradeProgress{} },
func() *monok8sv1alpha1.OSUpgradeProgressList { return &monok8sv1alpha1.OSUpgradeProgressList{} },
),
}
}

View File

@@ -0,0 +1,249 @@
/* MIT License */
// Code generated by informer-gen. DO NOT EDIT.
package externalversions
import (
reflect "reflect"
sync "sync"
time "time"
versioned "example.com/monok8s/pkg/generated/clientset/versioned"
internalinterfaces "example.com/monok8s/pkg/generated/informers/externalversions/internalinterfaces"
monok8s "example.com/monok8s/pkg/generated/informers/externalversions/monok8s"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
schema "k8s.io/apimachinery/pkg/runtime/schema"
cache "k8s.io/client-go/tools/cache"
)
// SharedInformerOption defines the functional option type for SharedInformerFactory.
type SharedInformerOption func(*sharedInformerFactory) *sharedInformerFactory
type sharedInformerFactory struct {
client versioned.Interface
namespace string
tweakListOptions internalinterfaces.TweakListOptionsFunc
lock sync.Mutex
defaultResync time.Duration
customResync map[reflect.Type]time.Duration
transform cache.TransformFunc
informers map[reflect.Type]cache.SharedIndexInformer
// startedInformers is used for tracking which informers have been started.
// This allows Start() to be called multiple times safely.
startedInformers map[reflect.Type]bool
// wg tracks how many goroutines were started.
wg sync.WaitGroup
// shuttingDown is true when Shutdown has been called. It may still be running
// because it needs to wait for goroutines.
shuttingDown bool
}
// WithCustomResyncConfig sets a custom resync period for the specified informer types.
func WithCustomResyncConfig(resyncConfig map[v1.Object]time.Duration) SharedInformerOption {
return func(factory *sharedInformerFactory) *sharedInformerFactory {
for k, v := range resyncConfig {
factory.customResync[reflect.TypeOf(k)] = v
}
return factory
}
}
// WithTweakListOptions sets a custom filter on all listers of the configured SharedInformerFactory.
func WithTweakListOptions(tweakListOptions internalinterfaces.TweakListOptionsFunc) SharedInformerOption {
return func(factory *sharedInformerFactory) *sharedInformerFactory {
factory.tweakListOptions = tweakListOptions
return factory
}
}
// WithNamespace limits the SharedInformerFactory to the specified namespace.
func WithNamespace(namespace string) SharedInformerOption {
return func(factory *sharedInformerFactory) *sharedInformerFactory {
factory.namespace = namespace
return factory
}
}
// WithTransform sets a transform on all informers.
func WithTransform(transform cache.TransformFunc) SharedInformerOption {
return func(factory *sharedInformerFactory) *sharedInformerFactory {
factory.transform = transform
return factory
}
}
// NewSharedInformerFactory constructs a new instance of sharedInformerFactory for all namespaces.
func NewSharedInformerFactory(client versioned.Interface, defaultResync time.Duration) SharedInformerFactory {
return NewSharedInformerFactoryWithOptions(client, defaultResync)
}
// NewFilteredSharedInformerFactory constructs a new instance of sharedInformerFactory.
// Listers obtained via this SharedInformerFactory will be subject to the same filters
// as specified here.
//
// Deprecated: Please use NewSharedInformerFactoryWithOptions instead
func NewFilteredSharedInformerFactory(client versioned.Interface, defaultResync time.Duration, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) SharedInformerFactory {
return NewSharedInformerFactoryWithOptions(client, defaultResync, WithNamespace(namespace), WithTweakListOptions(tweakListOptions))
}
// NewSharedInformerFactoryWithOptions constructs a new instance of a SharedInformerFactory with additional options.
func NewSharedInformerFactoryWithOptions(client versioned.Interface, defaultResync time.Duration, options ...SharedInformerOption) SharedInformerFactory {
factory := &sharedInformerFactory{
client: client,
namespace: v1.NamespaceAll,
defaultResync: defaultResync,
informers: make(map[reflect.Type]cache.SharedIndexInformer),
startedInformers: make(map[reflect.Type]bool),
customResync: make(map[reflect.Type]time.Duration),
}
// Apply all options
for _, opt := range options {
factory = opt(factory)
}
return factory
}
func (f *sharedInformerFactory) Start(stopCh <-chan struct{}) {
f.lock.Lock()
defer f.lock.Unlock()
if f.shuttingDown {
return
}
for informerType, informer := range f.informers {
if !f.startedInformers[informerType] {
f.wg.Add(1)
// We need a new variable in each loop iteration,
// otherwise the goroutine would use the loop variable
// and that keeps changing.
informer := informer
go func() {
defer f.wg.Done()
informer.Run(stopCh)
}()
f.startedInformers[informerType] = true
}
}
}
func (f *sharedInformerFactory) Shutdown() {
f.lock.Lock()
f.shuttingDown = true
f.lock.Unlock()
// Will return immediately if there is nothing to wait for.
f.wg.Wait()
}
func (f *sharedInformerFactory) WaitForCacheSync(stopCh <-chan struct{}) map[reflect.Type]bool {
informers := func() map[reflect.Type]cache.SharedIndexInformer {
f.lock.Lock()
defer f.lock.Unlock()
informers := map[reflect.Type]cache.SharedIndexInformer{}
for informerType, informer := range f.informers {
if f.startedInformers[informerType] {
informers[informerType] = informer
}
}
return informers
}()
res := map[reflect.Type]bool{}
for informType, informer := range informers {
res[informType] = cache.WaitForCacheSync(stopCh, informer.HasSynced)
}
return res
}
// InformerFor returns the SharedIndexInformer for obj using an internal
// client.
func (f *sharedInformerFactory) InformerFor(obj runtime.Object, newFunc internalinterfaces.NewInformerFunc) cache.SharedIndexInformer {
f.lock.Lock()
defer f.lock.Unlock()
informerType := reflect.TypeOf(obj)
informer, exists := f.informers[informerType]
if exists {
return informer
}
resyncPeriod, exists := f.customResync[informerType]
if !exists {
resyncPeriod = f.defaultResync
}
informer = newFunc(f.client, resyncPeriod)
informer.SetTransform(f.transform)
f.informers[informerType] = informer
return informer
}
// SharedInformerFactory provides shared informers for resources in all known
// API group versions.
//
// It is typically used like this:
//
// ctx, cancel := context.WithCancel(context.Background())
// defer cancel()
// factory := NewSharedInformerFactory(client, resyncPeriod)
// defer factory.WaitForStop() // Returns immediately if nothing was started.
// genericInformer := factory.ForResource(resource)
// typedInformer := factory.SomeAPIGroup().V1().SomeType()
// factory.Start(ctx.Done()) // Start processing these informers.
// synced := factory.WaitForCacheSync(ctx.Done())
// for v, ok := range synced {
// if !ok {
// fmt.Fprintf(os.Stderr, "caches failed to sync: %v", v)
// return
// }
// }
//
// // Creating informers can also be created after Start, but then
// // Start must be called again:
// anotherGenericInformer := factory.ForResource(resource)
// factory.Start(ctx.Done())
type SharedInformerFactory interface {
internalinterfaces.SharedInformerFactory
// Start initializes all requested informers. They are handled in goroutines
// which run until the stop channel gets closed.
// Warning: Start does not block. When run in a go-routine, it will race with a later WaitForCacheSync.
Start(stopCh <-chan struct{})
// Shutdown marks a factory as shutting down. At that point no new
// informers can be started anymore and Start will return without
// doing anything.
//
// In addition, Shutdown blocks until all goroutines have terminated. For that
// to happen, the close channel(s) that they were started with must be closed,
// either before Shutdown gets called or while it is waiting.
//
// Shutdown may be called multiple times, even concurrently. All such calls will
// block until all goroutines have terminated.
Shutdown()
// WaitForCacheSync blocks until all started informers' caches were synced
// or the stop channel gets closed.
WaitForCacheSync(stopCh <-chan struct{}) map[reflect.Type]bool
// ForResource gives generic access to a shared informer of the matching type.
ForResource(resource schema.GroupVersionResource) (GenericInformer, error)
// InformerFor returns the SharedIndexInformer for obj using an internal
// client.
InformerFor(obj runtime.Object, newFunc internalinterfaces.NewInformerFunc) cache.SharedIndexInformer
Monok8s() monok8s.Interface
}
func (f *sharedInformerFactory) Monok8s() monok8s.Interface {
return monok8s.New(f, f.namespace, f.tweakListOptions)
}

View File

@@ -0,0 +1,50 @@
/* MIT License */
// Code generated by informer-gen. DO NOT EDIT.
package externalversions
import (
fmt "fmt"
v1alpha1 "example.com/monok8s/pkg/apis/monok8s/v1alpha1"
schema "k8s.io/apimachinery/pkg/runtime/schema"
cache "k8s.io/client-go/tools/cache"
)
// GenericInformer is type of SharedIndexInformer which will locate and delegate to other
// sharedInformers based on type
type GenericInformer interface {
Informer() cache.SharedIndexInformer
Lister() cache.GenericLister
}
type genericInformer struct {
informer cache.SharedIndexInformer
resource schema.GroupResource
}
// Informer returns the SharedIndexInformer.
func (f *genericInformer) Informer() cache.SharedIndexInformer {
return f.informer
}
// Lister returns the GenericLister.
func (f *genericInformer) Lister() cache.GenericLister {
return cache.NewGenericLister(f.Informer().GetIndexer(), f.resource)
}
// ForResource gives generic access to a shared informer of the matching type
// TODO extend this to unknown resources with a client pool
func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource) (GenericInformer, error) {
switch resource {
// Group=monok8s, Version=v1alpha1
case v1alpha1.SchemeGroupVersion.WithResource("osupgrades"):
return &genericInformer{resource: resource.GroupResource(), informer: f.Monok8s().V1alpha1().OSUpgrades().Informer()}, nil
case v1alpha1.SchemeGroupVersion.WithResource("osupgradeprogresses"):
return &genericInformer{resource: resource.GroupResource(), informer: f.Monok8s().V1alpha1().OSUpgradeProgresses().Informer()}, nil
}
return nil, fmt.Errorf("no informer found for %v", resource)
}

View File

@@ -0,0 +1,26 @@
/* MIT License */
// Code generated by informer-gen. DO NOT EDIT.
package internalinterfaces
import (
time "time"
versioned "example.com/monok8s/pkg/generated/clientset/versioned"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
cache "k8s.io/client-go/tools/cache"
)
// NewInformerFunc takes versioned.Interface and time.Duration to return a SharedIndexInformer.
type NewInformerFunc func(versioned.Interface, time.Duration) cache.SharedIndexInformer
// SharedInformerFactory a small interface to allow for adding an informer without an import cycle
type SharedInformerFactory interface {
Start(stopCh <-chan struct{})
InformerFor(obj runtime.Object, newFunc NewInformerFunc) cache.SharedIndexInformer
}
// TweakListOptionsFunc is a function that transforms a v1.ListOptions.
type TweakListOptionsFunc func(*v1.ListOptions)

View File

@@ -0,0 +1,32 @@
/* MIT License */
// Code generated by informer-gen. DO NOT EDIT.
package monok8s
import (
internalinterfaces "example.com/monok8s/pkg/generated/informers/externalversions/internalinterfaces"
v1alpha1 "example.com/monok8s/pkg/generated/informers/externalversions/monok8s/v1alpha1"
)
// Interface provides access to each of this group's versions.
type Interface interface {
// V1alpha1 provides access to shared informers for resources in V1alpha1.
V1alpha1() v1alpha1.Interface
}
type group struct {
factory internalinterfaces.SharedInformerFactory
namespace string
tweakListOptions internalinterfaces.TweakListOptionsFunc
}
// New returns a new Interface.
func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface {
return &group{factory: f, namespace: namespace, tweakListOptions: tweakListOptions}
}
// V1alpha1 returns a new v1alpha1.Interface.
func (g *group) V1alpha1() v1alpha1.Interface {
return v1alpha1.New(g.factory, g.namespace, g.tweakListOptions)
}

View File

@@ -0,0 +1,38 @@
/* MIT License */
// Code generated by informer-gen. DO NOT EDIT.
package v1alpha1
import (
internalinterfaces "example.com/monok8s/pkg/generated/informers/externalversions/internalinterfaces"
)
// Interface provides access to all the informers in this group version.
type Interface interface {
// OSUpgrades returns a OSUpgradeInformer.
OSUpgrades() OSUpgradeInformer
// OSUpgradeProgresses returns a OSUpgradeProgressInformer.
OSUpgradeProgresses() OSUpgradeProgressInformer
}
type version struct {
factory internalinterfaces.SharedInformerFactory
namespace string
tweakListOptions internalinterfaces.TweakListOptionsFunc
}
// New returns a new Interface.
func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface {
return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions}
}
// OSUpgrades returns a OSUpgradeInformer.
func (v *version) OSUpgrades() OSUpgradeInformer {
return &oSUpgradeInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions}
}
// OSUpgradeProgresses returns a OSUpgradeProgressInformer.
func (v *version) OSUpgradeProgresses() OSUpgradeProgressInformer {
return &oSUpgradeProgressInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions}
}

Some files were not shown because too many files have changed in this diff Show More