Compare commits

..

2 Commits

Author SHA256 Message Date
2a1a5a8f08 Worker node upgrade chain 2026-04-29 19:28:18 +08:00
e1959bee6d Refactor into RenderAgent and ApplyAgent 2026-04-29 16:41:40 +08:00
27 changed files with 1652 additions and 730 deletions

View File

@@ -136,6 +136,11 @@ The currently tested upgrade chain is:
- `1.33.10 -> 1.34.6`
- `1.34.6 -> 1.35.3`
Tested worker node upgrade chain:
- `1.33.3 -> 1.34.1`
- `1.33.1 -> 1.35.3`
---
## Current status

View File

@@ -6,7 +6,7 @@ source /utils.sh
/preload-k8s-images.sh || exit 1
export CTL_BIN_LAYER=$( skopeo inspect docker-daemon:localhost/monok8s/node-control:dev | jq -r '.Layers[0] | sub("^sha256:"; "")' )
export CTL_BIN_LAYER=$( skopeo inspect docker-daemon:localhost/monok8s/node-control:$TAG | jq -r '.Layers[0] | sub("^sha256:"; "")' )
mkdir -p \
"$ROOTFS/dev" \

View File

@@ -43,3 +43,7 @@ BUILD_TAG=MONOK8S
# Optional apt cache
# example: apt-cacher-ng.eco-system.svc.cluster.local:3142
APT_PROXY=
# remote image repository prefix to push to
# e.g. ghcr.io/monok8s
IMAGE_REPOSITORY=

View File

@@ -1,16 +1,41 @@
ARG BASE_IMAGE=localhost/monok8s/ctl-build-base:dev
FROM --platform=$BUILDPLATFORM ${BASE_IMAGE} AS build
ARG VERSION=dev
ARG TARGETOS
ARG TARGETARCH
WORKDIR /src
COPY . .
RUN test -f pkg/buildinfo/buildinfo_gen.go
RUN mkdir -p /out && \
GOOS=${TARGETOS} GOARCH=${TARGETARCH} CGO_ENABLED=0 \
go build -trimpath -ldflags="-s -w" \
-o /out/ctl ./cmd/ctl
FROM alpine:latest AS cacerts
FROM scratch
ARG VERSION
ARG TARGETOS
ARG TARGETARCH
ENV VERSION=${VERSION}
WORKDIR /
COPY bin/ctl-linux-aarch64-${VERSION} ./ctl
COPY out/fw_printenv ./
COPY out/fw_setenv ./
COPY --from=build /out/ctl /ctl
COPY --from=cacerts /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/
COPY out/uboot-tools/${TARGETOS}_${TARGETARCH}/fw_printenv /fw_printenv
COPY out/uboot-tools/${TARGETOS}_${TARGETARCH}/fw_setenv /fw_setenv
ENV PATH=/
ENTRYPOINT ["/ctl"]

View File

@@ -1,24 +0,0 @@
FROM golang:1.26-alpine AS build
ARG VERSION
ARG KUBE_VERSION
ARG GIT_REV=unknown
WORKDIR /src
RUN apk add --no-cache git build-base
COPY go.mod go.sum ./
RUN go mod download
COPY . .
RUN test -f pkg/buildinfo/buildinfo_gen.go
RUN mkdir -p /out && \
GOOS=darwin GOARCH=arm64 CGO_ENABLED=0 \
go build -trimpath -ldflags="-s -w" \
-o /out/ctl-${VERSION} ./cmd/ctl
FROM scratch
COPY --from=build /out/ /

View File

@@ -1,20 +0,0 @@
ARG BASE_IMAGE=localhost/monok8s/ctl-build-base:dev
FROM ${BASE_IMAGE} AS build
ARG VERSION=dev
ARG TARGETOS=linux
ARG TARGETARCH=arm64
WORKDIR /src
COPY . .
RUN test -f pkg/buildinfo/buildinfo_gen.go
RUN mkdir -p /out && \
GOOS=${TARGETOS} GOARCH=${TARGETARCH} CGO_ENABLED=0 \
go build -trimpath -ldflags="-s -w" \
-o /out/ctl-linux-aarch64-${VERSION} ./cmd/ctl
FROM scratch
COPY --from=build /out/ /

View File

@@ -1,4 +1,6 @@
include ../build.env
-include ../build.env.work
export
BUILD_PLATFORM ?= linux/amd64
@@ -11,30 +13,29 @@ KUBE_VERSION ?= v1.33.3
GIT_REV := $(shell git rev-parse HEAD)
PACKAGES_DIR := packages
BIN_DIR := bin
OUT_DIR := out
PACKAGES_DIR := packages
OUT_DIR := out
UBOOT_TOOLS_OUT := $(OUT_DIR)/uboot-tools
UBOOT_TAR := $(PACKAGES_DIR)/uboot-$(UBOOT_VERSION).tar.gz
BUILDINFO_FILE := pkg/buildinfo/buildinfo_gen.go
CRD_PATHS := ./pkg/apis/...
ASSETS_PATH := ./pkg/assets
BUILDX_BUILDER := container-builder
LOCAL_REGISTRY := registry
LOCAL_REGISTRY_PORT := 5000
CTL_BUILD_BASE_IMAGE := localhost:5000/monok8s/ctl-build-base:$(VERSION)
CTL_BINARY := ctl-linux-aarch64-$(VERSION)
CTL_BUILD_BASE_REPO := localhost:5000/monok8s/ctl-build-base
CTL_IMAGE_REPO := localhost:5000/monok8s/node-control
CTL_BUILD_BASE_IMAGE := $(CTL_BUILD_BASE_REPO):$(VERSION)
CTL_IMAGE := $(CTL_IMAGE_REPO):$(VERSION)
DOWNLOAD_PACKAGES_STAMP := $(PACKAGES_DIR)/.download-packages.stamp
$(PACKAGES_DIR):
mkdir -p $@
$(BIN_DIR):
mkdir -p $@
$(OUT_DIR):
mkdir -p $@
@@ -88,11 +89,14 @@ $(DOWNLOAD_PACKAGES_STAMP): docker/download-packages.Dockerfile makefile | $(PAC
@touch $@
uboot-tools: $(DOWNLOAD_PACKAGES_STAMP)
docker buildx build --platform linux/arm64 \
rm -rf "$(UBOOT_TOOLS_OUT)"
mkdir -p "$(UBOOT_TOOLS_OUT)"
docker buildx build \
--platform linux/amd64,linux/arm64 \
-f docker/uboot-tools.Dockerfile \
--build-arg UBOOT_VERSION=$(UBOOT_VERSION) \
--build-arg UBOOT_TAR=$(UBOOT_TAR) \
--output type=local,dest=./$(OUT_DIR) .
--output type=local,dest=./$(UBOOT_TOOLS_OUT),platform-split=true .
ctl-build-base: ensure-buildx ensure-registry
docker buildx build \
@@ -101,16 +105,6 @@ ctl-build-base: ensure-buildx ensure-registry
-t $(CTL_BUILD_BASE_IMAGE) \
--output type=image,push=true,registry.insecure=true .
build-bin: .buildinfo ctl-build-base | $(BIN_DIR)
docker buildx build \
--platform $(BUILD_PLATFORM) \
-f docker/ctl-builder.Dockerfile \
--build-arg BASE_IMAGE=$(CTL_BUILD_BASE_IMAGE) \
--build-arg VERSION=$(VERSION) \
--build-arg TARGETOS=linux \
--build-arg TARGETARCH=arm64 \
--output type=local,dest=./$(BIN_DIR) .
build-crds: ctl-build-base | $(OUT_DIR)
mkdir -p "$(OUT_DIR)/crds"
docker buildx build \
@@ -118,35 +112,47 @@ build-crds: ctl-build-base | $(OUT_DIR)
-f docker/crdgen.Dockerfile \
--build-arg BASE_IMAGE=$(CTL_BUILD_BASE_IMAGE) \
--output type=local,dest=./$(OUT_DIR)/crds .
rm -rf "$(ASSETS_PATH)/crds"
mkdir -p "$(ASSETS_PATH)/crds"
cp -R "$(OUT_DIR)/crds/." "$(ASSETS_PATH)/crds/"
build-agent: build uboot-tools
build-agent: .buildinfo build-crds uboot-tools
docker buildx build \
--platform linux/amd64,linux/arm64 \
-f docker/ctl-agent.Dockerfile \
--build-arg BASE_IMAGE=$(CTL_BUILD_BASE_IMAGE) \
--build-arg VERSION=$(VERSION) \
-t $(CTL_IMAGE) \
--output type=image,push=true,registry.insecure=true .
build-local: .buildinfo build-crds uboot-tools
docker buildx build \
--platform linux/arm64 \
-f docker/ctl-agent.Dockerfile \
--build-arg BASE_IMAGE=$(CTL_BUILD_BASE_IMAGE) \
--build-arg VERSION=$(VERSION) \
--load \
-t localhost/monok8s/node-control:$(VERSION) .
build-local: .buildinfo | $(BIN_DIR)
push-agent: .buildinfo build-crds uboot-tools
test -n "$(IMAGE_REPOSITORY)"
docker buildx build \
-f docker/ctl-builder-local.Dockerfile \
--platform linux/amd64,linux/arm64 \
-f docker/ctl-agent.Dockerfile \
--build-arg BASE_IMAGE=$(CTL_BUILD_BASE_IMAGE) \
--build-arg VERSION=$(VERSION) \
--build-arg KUBE_VERSION=$(KUBE_VERSION) \
--build-arg GIT_REV=$(GIT_REV) \
--output type=local,dest=./$(BIN_DIR) .
-t $(IMAGE_REPOSITORY)/node-control:$(VERSION) \
--push .
run-agent:
docker run --rm \
-v "$$(pwd)/out:/work/out" \
localhost/monok8s/node-control:$(VERSION) \
$(CTL_IMAGE) \
agent --env-file /work/out/cluster.env
build: build-bin build-crds
clean:
-docker image rm localhost/monok8s/node-control:$(VERSION) >/dev/null 2>&1 || true
rm -rf \
$(BIN_DIR) \
$(OUT_DIR)/crds \
$(BUILDINFO_FILE)
@@ -158,7 +164,6 @@ dockerclean:
- docker rmi \
localhost/monok8s/ctl-build-base:$(VERSION) \
localhost/monok8s/node-control:$(VERSION) \
localhost/monok8s/ctl-builder:$(VERSION) \
localhost/monok8s/crdgen:$(VERSION) \
2>/dev/null || true
@@ -169,10 +174,10 @@ dockerclean:
pkgclean:
rm -rf $(PACKAGES_DIR)
all: build build-agent build-local
all: build-agent build-local
.PHONY: \
all clean dockerclean \
.buildinfo ensure-buildx ensure-registry \
build build-bin build-crds build-local build-agent \
uboot-tools run-agent
build-crds build-local build-agent build-agent-local push-agent \
uboot-tools run-agent run-agent-local

View File

@@ -0,0 +1,178 @@
---
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
controller-gen.kubebuilder.io/version: v0.20.1
name: monoksconfigs.monok8s.io
spec:
group: monok8s.io
names:
kind: MonoKSConfig
listKind: MonoKSConfigList
plural: monoksconfigs
singular: monoksconfig
scope: Namespaced
versions:
- name: v1alpha1
schema:
openAPIV3Schema:
properties:
apiVersion:
description: |-
APIVersion defines the versioned schema of this representation of an object.
Servers should convert recognized schemas to the latest internal value, and
may reject unrecognized values.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
type: string
kind:
description: |-
Kind is a string value representing the REST resource this object represents.
Servers may infer this from the endpoint the client submits requests to.
Cannot be updated.
In CamelCase.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
type: string
metadata:
type: object
spec:
properties:
allowSchedulingOnControlPlane:
type: boolean
apiServerAdvertiseAddress:
type: string
apiServerEndpoint:
type: string
bootstrapToken:
type: string
clusterDomain:
type: string
clusterName:
type: string
clusterRole:
type: string
cniPlugin:
type: string
containerRuntimeEndpoint:
type: string
controlPlaneCertKey:
type: string
discoveryTokenCACertHash:
type: string
enableNodeControl:
type: boolean
initControlPlane:
type: boolean
kubeProxyNodePortAddresses:
items:
type: string
type: array
kubernetesVersion:
type: string
network:
properties:
dnsNameservers:
items:
type: string
type: array
dnsSearchDomains:
items:
type: string
type: array
hostname:
type: string
managementCIDR:
type: string
managementGateway:
type: string
managementIface:
type: string
type: object
nodeLabels:
additionalProperties:
type: string
type: object
nodeName:
type: string
podSubnet:
type: string
serviceSubnet:
type: string
skipImageCheck:
type: boolean
subjectAltNames:
items:
type: string
type: array
type: object
status:
properties:
appliedSteps:
items:
type: string
type: array
conditions:
items:
description: Condition contains details for one aspect of the current
state of this API Resource.
properties:
lastTransitionTime:
description: |-
lastTransitionTime is the last time the condition transitioned from one status to another.
This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable.
format: date-time
type: string
message:
description: |-
message is a human readable message indicating details about the transition.
This may be an empty string.
maxLength: 32768
type: string
observedGeneration:
description: |-
observedGeneration represents the .metadata.generation that the condition was set based upon.
For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date
with respect to the current state of the instance.
format: int64
minimum: 0
type: integer
reason:
description: |-
reason contains a programmatic identifier indicating the reason for the condition's last transition.
Producers of specific condition types may define expected values and meanings for this field,
and whether the values are considered a guaranteed API.
The value should be a CamelCase string.
This field may not be empty.
maxLength: 1024
minLength: 1
pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$
type: string
status:
description: status of the condition, one of True, False, Unknown.
enum:
- "True"
- "False"
- Unknown
type: string
type:
description: type of condition in CamelCase or in foo.example.com/CamelCase.
maxLength: 316
pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$
type: string
required:
- lastTransitionTime
- message
- reason
- status
- type
type: object
type: array
observedGeneration:
format: int64
type: integer
phase:
type: string
type: object
type: object
served: true
storage: true

View File

@@ -0,0 +1,124 @@
---
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
controller-gen.kubebuilder.io/version: v0.20.1
name: osupgradeprogresses.monok8s.io
spec:
group: monok8s.io
names:
kind: OSUpgradeProgress
listKind: OSUpgradeProgressList
plural: osupgradeprogresses
shortNames:
- osup
singular: osupgradeprogress
scope: Namespaced
versions:
- additionalPrinterColumns:
- jsonPath: .spec.nodeName
name: Node
type: string
- jsonPath: .spec.sourceRef.name
name: Source
type: string
- jsonPath: .status.currentVersion
name: Current
type: string
- jsonPath: .status.targetVersion
name: Target
type: string
- jsonPath: .status.phase
name: Phase
type: string
name: v1alpha1
schema:
openAPIV3Schema:
properties:
apiVersion:
description: |-
APIVersion defines the versioned schema of this representation of an object.
Servers should convert recognized schemas to the latest internal value, and
may reject unrecognized values.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
type: string
kind:
description: |-
Kind is a string value representing the REST resource this object represents.
Servers may infer this from the endpoint the client submits requests to.
Cannot be updated.
In CamelCase.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
type: string
metadata:
type: object
spec:
description: Specification of the desired behavior of the OSUpgradeProgress.
properties:
nodeName:
type: string
retryNonce:
description: |-
RetryNonce triggers a retry when its value changes.
Users can update this field (for example, set it to the current time)
to request a retry of a failed OS upgrade.
type: string
sourceRef:
properties:
name:
type: string
namespace:
type: string
type: object
type: object
status:
description: Most recently observed status of the OSUpgradeProgress.
properties:
completedAt:
format: date-time
type: string
currentFrom:
type: string
currentStep:
format: int32
type: integer
currentTo:
type: string
currentVersion:
type: string
failureReason:
type: string
inactivePartition:
type: string
lastUpdatedAt:
format: date-time
type: string
message:
type: string
observedRetryNonce:
description: |-
ObservedRetryNonce records the last retryNonce value the agent accepted.
When spec.retryNonce is changed by the user and differs from this value,
the agent may retry a failed upgrade.
type: string
phase:
type: string
plannedPath:
items:
type: string
type: array
retryCount:
format: int32
type: integer
startedAt:
format: date-time
type: string
targetVersion:
type: string
type: object
type: object
served: true
storage: true
subresources:
status: {}

View File

@@ -0,0 +1,202 @@
---
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
controller-gen.kubebuilder.io/version: v0.20.1
name: osupgrades.monok8s.io
spec:
group: monok8s.io
names:
kind: OSUpgrade
listKind: OSUpgradeList
plural: osupgrades
shortNames:
- osu
singular: osupgrade
scope: Namespaced
versions:
- additionalPrinterColumns:
- jsonPath: .spec.desiredVersion
name: Desired
type: string
- jsonPath: .status.resolvedVersion
name: Resolved
type: string
- jsonPath: .status.phase
name: Phase
type: string
name: v1alpha1
schema:
openAPIV3Schema:
properties:
apiVersion:
description: |-
APIVersion defines the versioned schema of this representation of an object.
Servers should convert recognized schemas to the latest internal value, and
may reject unrecognized values.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
type: string
kind:
description: |-
Kind is a string value representing the REST resource this object represents.
Servers may infer this from the endpoint the client submits requests to.
Cannot be updated.
In CamelCase.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
type: string
metadata:
type: object
spec:
description: Specification of the desired behavior of the OSUpgrade.
properties:
catalog:
properties:
configMapRef:
type: string
inline:
type: string
url:
type: string
type: object
desiredVersion:
minLength: 1
type: string
flashProfile:
default: balanced
description: |-
Profiles (TODO)
safe - api-server can be responsive most of the time
balanced - api-server can sometimes be unresponsive
fast - disable throttling. Good for worker node.
enum:
- fast
- balanced
- safe
type: string
nodeSelector:
description: |-
A label selector is a label query over a set of resources. The result of matchLabels and
matchExpressions are ANDed. An empty label selector matches all objects. A null
label selector matches no objects.
properties:
matchExpressions:
description: matchExpressions is a list of label selector requirements.
The requirements are ANDed.
items:
description: |-
A label selector requirement is a selector that contains values, a key, and an operator that
relates the key and values.
properties:
key:
description: key is the label key that the selector applies
to.
type: string
operator:
description: |-
operator represents a key's relationship to a set of values.
Valid operators are In, NotIn, Exists and DoesNotExist.
type: string
values:
description: |-
values is an array of string values. If the operator is In or NotIn,
the values array must be non-empty. If the operator is Exists or DoesNotExist,
the values array must be empty. This array is replaced during a strategic
merge patch.
items:
type: string
type: array
x-kubernetes-list-type: atomic
required:
- key
- operator
type: object
type: array
x-kubernetes-list-type: atomic
matchLabels:
additionalProperties:
type: string
description: |-
matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels
map is equivalent to an element of matchExpressions, whose key field is "key", the
operator is "In", and the values array contains only "value". The requirements are ANDed.
type: object
type: object
x-kubernetes-map-type: atomic
type: object
status:
description: Most recently observed status of the OSUpgrade.
properties:
conditions:
items:
description: Condition contains details for one aspect of the current
state of this API Resource.
properties:
lastTransitionTime:
description: |-
lastTransitionTime is the last time the condition transitioned from one status to another.
This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable.
format: date-time
type: string
message:
description: |-
message is a human readable message indicating details about the transition.
This may be an empty string.
maxLength: 32768
type: string
observedGeneration:
description: |-
observedGeneration represents the .metadata.generation that the condition was set based upon.
For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date
with respect to the current state of the instance.
format: int64
minimum: 0
type: integer
reason:
description: |-
reason contains a programmatic identifier indicating the reason for the condition's last transition.
Producers of specific condition types may define expected values and meanings for this field,
and whether the values are considered a guaranteed API.
The value should be a CamelCase string.
This field may not be empty.
maxLength: 1024
minLength: 1
pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$
type: string
status:
description: status of the condition, one of True, False, Unknown.
enum:
- "True"
- "False"
- Unknown
type: string
type:
description: type of condition in CamelCase or in foo.example.com/CamelCase.
maxLength: 316
pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$
type: string
required:
- lastTransitionTime
- message
- reason
- status
- type
type: object
type: array
message:
type: string
observedGeneration:
format: int64
type: integer
phase:
type: string
reason:
type: string
resolvedVersion:
type: string
type: object
type: object
served: true
storage: true
subresources:
status: {}

View File

@@ -0,0 +1,6 @@
package assets
import "embed"
//go:embed crds/*.yaml
var CRDs embed.FS

View File

@@ -0,0 +1,49 @@
package assets
import (
"fmt"
"io"
"path/filepath"
"sort"
)
func PrintCRDs(out io.Writer) error {
entries, err := CRDs.ReadDir("crds")
if err != nil {
return err
}
names := make([]string, 0, len(entries))
for _, entry := range entries {
if entry.IsDir() {
continue
}
if filepath.Ext(entry.Name()) != ".yaml" {
continue
}
names = append(names, entry.Name())
}
sort.Strings(names)
for _, name := range names {
b, err := CRDs.ReadFile("crds/" + name)
if err != nil {
return err
}
if _, err := fmt.Fprintln(out, "---"); err != nil {
return err
}
if _, err := out.Write(b); err != nil {
return err
}
if len(b) == 0 || b[len(b)-1] != '\n' {
if _, err := fmt.Fprintln(out); err != nil {
return err
}
}
}
return nil
}

View File

@@ -112,6 +112,11 @@ func NewRunner(cfg *monov1alpha1.MonoKSConfig) *Runner {
Name: "Wait for existing cluster",
Desc: "Block until control plane is reachable when joining or reconciling an existing cluster",
},
{
RegKey: "CheckForVersionSkew",
Name: "Check for version skew",
Desc: "Validate wether version satisfy the requirements againts current cluster if any",
},
{
RegKey: "ReconcileControlPlane",
Name: "Reconcile control plane",
@@ -122,11 +127,6 @@ func NewRunner(cfg *monov1alpha1.MonoKSConfig) *Runner {
Name: "Reconcile worker node",
Desc: "Reconcile the worker node",
},
{
RegKey: "CheckForVersionSkew",
Name: "Check for version skew",
Desc: "Validate wether version satisfy the requirements againts current cluster if any",
},
{
RegKey: "RunKubeadmUpgradeApply",
Name: "Run kubeadm upgrade apply",

View File

@@ -6,7 +6,9 @@ import (
"github.com/spf13/cobra"
"k8s.io/cli-runtime/pkg/genericclioptions"
"os"
"strings"
assets "example.com/monok8s/pkg/assets"
render "example.com/monok8s/pkg/render"
)
@@ -42,13 +44,20 @@ func NewCmdCreate(flags *genericclioptions.ConfigFlags) *cobra.Command {
return err
},
},
&cobra.Command{
Use: "crds",
Short: "Print the bundled CRDs",
RunE: func(cmd *cobra.Command, _ []string) error {
return assets.PrintCRDs(cmd.OutOrStdout())
},
},
)
var authorizedKeysPath string
sshdcmd := cobra.Command{
Use: "sshd",
Short: "Print sshd deployment template",
Short: "Print sshd deployments template",
RunE: func(cmd *cobra.Command, _ []string) error {
ns, _, err := flags.ToRawKubeConfigLoader().Namespace()
if err != nil {
@@ -77,8 +86,12 @@ func NewCmdCreate(flags *genericclioptions.ConfigFlags) *cobra.Command {
cconf := render.ControllerConf{}
controllercmd := cobra.Command{
Use: "controller",
Short: "Print controller deployment template",
Short: "Print controller deployments template",
RunE: func(cmd *cobra.Command, _ []string) error {
if len(cconf.ImagePullSecrets) > 0 && strings.TrimSpace(cconf.Image) == "" {
return fmt.Errorf("--image-pull-secret requires --image")
}
ns, _, err := flags.ToRawKubeConfigLoader().Namespace()
if err != nil {
return err
@@ -102,9 +115,56 @@ func NewCmdCreate(flags *genericclioptions.ConfigFlags) *cobra.Command {
"",
"Controller image, including optional registry and tag",
)
controllercmd.Flags().StringSliceVar(
&cconf.ImagePullSecrets,
"image-pull-secret",
nil,
"Image pull secret name for the agent image; may be specified multiple times or as a comma-separated list",
)
cmd.AddCommand(&controllercmd)
aconf := render.AgentConf{}
agentcmd := cobra.Command{
Use: "agent",
Short: "Print agent daemonsets template",
RunE: func(cmd *cobra.Command, _ []string) error {
if len(aconf.ImagePullSecrets) > 0 && strings.TrimSpace(aconf.Image) == "" {
return fmt.Errorf("--image-pull-secret requires --image")
}
ns, _, err := flags.ToRawKubeConfigLoader().Namespace()
if err != nil {
return err
}
aconf.Namespace = ns
out, err := render.RenderAgentDaemonSets(aconf)
if err != nil {
return err
}
_, err = fmt.Fprint(cmd.OutOrStdout(), out)
return err
},
}
agentcmd.Flags().StringVar(
&aconf.Image,
"image",
"",
"Agent image, including optional registry and tag",
)
agentcmd.Flags().StringSliceVar(
&aconf.ImagePullSecrets,
"image-pull-secret",
nil,
"Image pull secret name for the agent image; may be specified multiple times or as a comma-separated list",
)
cmd.AddCommand(&agentcmd)
return cmd
}

View File

@@ -272,11 +272,17 @@ func listTargetNodeNames(
})
if osu.Spec.NodeSelector != nil {
sel, err := metav1.LabelSelectorAsSelector(osu.Spec.NodeSelector)
userSelector, err := metav1.LabelSelectorAsSelector(osu.Spec.NodeSelector)
if err != nil {
return nil, fmt.Errorf("invalid nodeSelector: %w", err)
}
selector = sel
reqs, selectable := userSelector.Requirements()
if !selectable {
selector = labels.Nothing()
} else {
selector = selector.Add(reqs...)
}
}
list, err := clients.Kubernetes.CoreV1().

View File

@@ -1,76 +0,0 @@
package crds
import (
monov1alpha1 "example.com/monok8s/pkg/apis/monok8s/v1alpha1"
apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
func Definitions() []*apiextensionsv1.CustomResourceDefinition {
return []*apiextensionsv1.CustomResourceDefinition{
monoKSConfigCRD(),
osUpgradeCRD(),
}
}
func monoKSConfigCRD() *apiextensionsv1.CustomResourceDefinition {
return &apiextensionsv1.CustomResourceDefinition{
ObjectMeta: metav1.ObjectMeta{
Name: monov1alpha1.MonoKSConfigCRD,
},
Spec: apiextensionsv1.CustomResourceDefinitionSpec{
Group: monov1alpha1.Group,
Scope: apiextensionsv1.NamespaceScoped,
Names: apiextensionsv1.CustomResourceDefinitionNames{
Plural: "monoksconfigs",
Singular: "monoksconfig",
Kind: "MonoKSConfig",
ShortNames: []string{"mkscfg"},
},
Versions: []apiextensionsv1.CustomResourceDefinitionVersion{{
Name: "v1alpha1",
Served: true,
Storage: true,
Schema: &apiextensionsv1.CustomResourceValidation{OpenAPIV3Schema: &apiextensionsv1.JSONSchemaProps{
Type: "object",
Properties: map[string]apiextensionsv1.JSONSchemaProps{
"spec": {Type: "object", XPreserveUnknownFields: boolPtr(true)},
"status": {Type: "object", XPreserveUnknownFields: boolPtr(true)},
},
}},
}},
},
}
}
func osUpgradeCRD() *apiextensionsv1.CustomResourceDefinition {
return &apiextensionsv1.CustomResourceDefinition{
ObjectMeta: metav1.ObjectMeta{
Name: monov1alpha1.OSUpgradeCRD,
},
Spec: apiextensionsv1.CustomResourceDefinitionSpec{
Group: monov1alpha1.Group,
Scope: apiextensionsv1.NamespaceScoped,
Names: apiextensionsv1.CustomResourceDefinitionNames{
Plural: "osupgrades",
Singular: "osupgrade",
Kind: "OSUpgrade",
ShortNames: []string{"osup"},
},
Versions: []apiextensionsv1.CustomResourceDefinitionVersion{{
Name: "v1alpha1",
Served: true,
Storage: true,
Schema: &apiextensionsv1.CustomResourceValidation{OpenAPIV3Schema: &apiextensionsv1.JSONSchemaProps{
Type: "object",
Properties: map[string]apiextensionsv1.JSONSchemaProps{
"spec": {Type: "object", XPreserveUnknownFields: boolPtr(true)},
"status": {Type: "object", XPreserveUnknownFields: boolPtr(true)},
},
}},
}},
},
}
}
func boolPtr(v bool) *bool { return &v }

View File

@@ -3,37 +3,27 @@ package node
import (
"context"
"fmt"
"reflect"
"strings"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
rbacv1 "k8s.io/api/rbac/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
"k8s.io/klog/v2"
monov1alpha1 "example.com/monok8s/pkg/apis/monok8s/v1alpha1"
"example.com/monok8s/pkg/kube"
"example.com/monok8s/pkg/render"
templates "example.com/monok8s/pkg/templates"
)
const (
controlAgentImage = "localhost/monok8s/node-control:dev"
kubeconfig = "/etc/kubernetes/admin.conf"
)
const kubeconfig = "/etc/kubernetes/admin.conf"
func ApplyNodeControlDaemonSetResources(ctx context.Context, n *NodeContext) error {
// Only the control-plane should bootstrap this DaemonSet definition.
// And only when the feature is enabled.
if strings.TrimSpace(n.Config.Spec.ClusterRole) != "control-plane" || !n.Config.Spec.EnableNodeControl {
klog.InfoS("skipped for", "clusterRole", n.Config.Spec.ClusterRole, "enableNodeAgent", n.Config.Spec.EnableNodeControl)
klog.InfoS("skipped for",
"clusterRole", n.Config.Spec.ClusterRole,
"enableNodeAgent", n.Config.Spec.EnableNodeControl,
)
return nil
}
err := ApplyCRDs(ctx, n)
if err != nil {
if err := ApplyCRDs(ctx, n); err != nil {
return err
}
@@ -47,363 +37,13 @@ func ApplyNodeControlDaemonSetResources(ctx context.Context, n *NodeContext) err
return fmt.Errorf("build kube clients from %s: %w", kubeconfig, err)
}
labels := map[string]string{
"app.kubernetes.io/name": monov1alpha1.NodeAgentName,
"app.kubernetes.io/component": "agent",
"app.kubernetes.io/part-of": "monok8s",
"app.kubernetes.io/managed-by": monov1alpha1.NodeControlName,
conf := render.AgentConf{
Namespace: namespace,
}
kubeClient := clients.Kubernetes
if err := ensureNamespace(ctx, kubeClient, namespace, labels); err != nil {
return fmt.Errorf("ensure namespace %q: %w", namespace, err)
}
if err := applyNodeAgentServiceAccount(ctx, kubeClient, namespace, labels); err != nil {
return fmt.Errorf("apply serviceaccount: %w", err)
}
if err := applyNodeAgentClusterRole(ctx, kubeClient, labels); err != nil {
return fmt.Errorf("apply clusterrole: %w", err)
}
if err := applyNodeAgentClusterRoleBinding(ctx, kubeClient, namespace, labels); err != nil {
return fmt.Errorf("apply clusterrolebinding: %w", err)
}
if err := applyNodeAgentDaemonSet(ctx, kubeClient, namespace, labels); err != nil {
return fmt.Errorf("apply daemonset: %w", err)
if err := render.ApplyAgentDaemonSets(ctx, clients.Kubernetes, conf); err != nil {
return fmt.Errorf("apply node agent daemonset resources: %w", err)
}
return nil
}
func ensureNamespace(
ctx context.Context,
kubeClient kubernetes.Interface,
namespace string,
labels map[string]string,
) error {
_, err := kubeClient.CoreV1().Namespaces().Get(ctx, namespace, metav1.GetOptions{})
if err == nil {
return nil
}
if !apierrors.IsNotFound(err) {
return fmt.Errorf("get namespace: %w", err)
}
ns := &corev1.Namespace{
ObjectMeta: metav1.ObjectMeta{
Name: namespace,
Labels: copyStringMap(labels),
},
}
_, err = kubeClient.CoreV1().Namespaces().Create(ctx, ns, metav1.CreateOptions{})
if err != nil && !apierrors.IsAlreadyExists(err) {
return fmt.Errorf("create namespace: %w", err)
}
return nil
}
func copyStringMap(in map[string]string) map[string]string {
if len(in) == 0 {
return nil
}
out := make(map[string]string, len(in))
for k, v := range in {
out[k] = v
}
return out
}
func applyNodeAgentServiceAccount(ctx context.Context, kubeClient kubernetes.Interface, namespace string, labels map[string]string) error {
want := &corev1.ServiceAccount{
ObjectMeta: metav1.ObjectMeta{
Name: monov1alpha1.NodeAgentName,
Namespace: namespace,
Labels: labels,
},
}
existing, err := kubeClient.CoreV1().ServiceAccounts(namespace).Get(ctx, monov1alpha1.NodeAgentName, metav1.GetOptions{})
if apierrors.IsNotFound(err) {
_, err = kubeClient.CoreV1().ServiceAccounts(namespace).Create(ctx, want, metav1.CreateOptions{})
return err
}
if err != nil {
return err
}
changed := false
if !reflect.DeepEqual(existing.Labels, want.Labels) {
existing.Labels = want.Labels
changed = true
}
if !changed {
return nil
}
_, err = kubeClient.CoreV1().ServiceAccounts(namespace).Update(ctx, existing, metav1.UpdateOptions{})
return err
}
func applyNodeAgentClusterRole(ctx context.Context, kubeClient kubernetes.Interface, labels map[string]string) error {
wantRules := []rbacv1.PolicyRule{
{
APIGroups: []string{monov1alpha1.Group},
Resources: []string{"osupgrades"},
Verbs: []string{"get"},
},
{
APIGroups: []string{monov1alpha1.Group},
Resources: []string{"osupgradeprogresses"},
Verbs: []string{"get", "list", "watch", "create", "patch", "update"},
},
{
APIGroups: []string{monov1alpha1.Group},
Resources: []string{"osupgradeprogresses/status"},
Verbs: []string{"get", "list", "watch", "create", "patch", "update"},
},
{
APIGroups: []string{""},
Resources: []string{"nodes"},
Verbs: []string{"get", "list", "watch"},
},
}
want := &rbacv1.ClusterRole{
ObjectMeta: metav1.ObjectMeta{
Name: monov1alpha1.NodeAgentName,
Labels: labels,
},
Rules: wantRules,
}
existing, err := kubeClient.RbacV1().ClusterRoles().Get(ctx, monov1alpha1.NodeAgentName, metav1.GetOptions{})
if apierrors.IsNotFound(err) {
_, err = kubeClient.RbacV1().ClusterRoles().Create(ctx, want, metav1.CreateOptions{})
return err
}
if err != nil {
return err
}
changed := false
if !reflect.DeepEqual(existing.Labels, want.Labels) {
existing.Labels = want.Labels
changed = true
}
if !reflect.DeepEqual(existing.Rules, want.Rules) {
existing.Rules = want.Rules
changed = true
}
if !changed {
return nil
}
_, err = kubeClient.RbacV1().ClusterRoles().Update(ctx, existing, metav1.UpdateOptions{})
return err
}
func applyNodeAgentClusterRoleBinding(ctx context.Context, kubeClient kubernetes.Interface, namespace string, labels map[string]string) error {
wantRoleRef := rbacv1.RoleRef{
APIGroup: rbacv1.GroupName,
Kind: "ClusterRole",
Name: monov1alpha1.NodeAgentName,
}
wantSubjects := []rbacv1.Subject{
{
Kind: "ServiceAccount",
Name: monov1alpha1.NodeAgentName,
Namespace: namespace,
},
}
want := &rbacv1.ClusterRoleBinding{
ObjectMeta: metav1.ObjectMeta{
Name: monov1alpha1.NodeAgentName,
Labels: labels,
},
RoleRef: wantRoleRef,
Subjects: wantSubjects,
}
existing, err := kubeClient.RbacV1().ClusterRoleBindings().Get(ctx, monov1alpha1.NodeAgentName, metav1.GetOptions{})
if apierrors.IsNotFound(err) {
_, err = kubeClient.RbacV1().ClusterRoleBindings().Create(ctx, want, metav1.CreateOptions{})
return err
}
if err != nil {
return err
}
// roleRef is immutable. If it differs, fail loudly instead of pretending we can patch it.
if !reflect.DeepEqual(existing.RoleRef, want.RoleRef) {
return fmt.Errorf("existing ClusterRoleBinding %q has different roleRef and must be recreated", monov1alpha1.NodeAgentName)
}
changed := false
if !reflect.DeepEqual(existing.Labels, want.Labels) {
existing.Labels = want.Labels
changed = true
}
if !reflect.DeepEqual(existing.Subjects, want.Subjects) {
existing.Subjects = want.Subjects
changed = true
}
if !changed {
return nil
}
_, err = kubeClient.RbacV1().ClusterRoleBindings().Update(ctx, existing, metav1.UpdateOptions{})
return err
}
func applyNodeAgentDaemonSet(ctx context.Context, kubeClient kubernetes.Interface, namespace string, labels map[string]string) error {
privileged := true
dsLabels := monov1alpha1.NodeAgentLabels()
want := &appsv1.DaemonSet{
ObjectMeta: metav1.ObjectMeta{
Name: monov1alpha1.NodeAgentName,
Namespace: namespace,
Labels: labels,
},
Spec: appsv1.DaemonSetSpec{
Selector: &metav1.LabelSelector{
MatchLabels: map[string]string{
"app.kubernetes.io/name": monov1alpha1.NodeAgentName,
},
},
Template: corev1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: dsLabels,
},
Spec: corev1.PodSpec{
ServiceAccountName: monov1alpha1.NodeAgentName,
HostNetwork: true,
HostPID: true,
DNSPolicy: corev1.DNSClusterFirstWithHostNet,
NodeSelector: map[string]string{
monov1alpha1.NodeControlKey: "true",
},
Tolerations: []corev1.Toleration{
{Operator: corev1.TolerationOpExists},
},
Containers: []corev1.Container{
{
Name: "agent",
Image: controlAgentImage,
ImagePullPolicy: corev1.PullNever,
Args: []string{"agent", "--env-file", "$(CLUSTER_ENV_FILE)"},
Env: []corev1.EnvVar{
{
Name: "NODE_NAME",
ValueFrom: &corev1.EnvVarSource{
FieldRef: &corev1.ObjectFieldSelector{
APIVersion: "v1",
FieldPath: "spec.nodeName",
},
},
},
{
Name: "CLUSTER_ENV_FILE",
Value: "/host/opt/monok8s/config/cluster.env",
},
{
Name: "FW_ENV_CONFIG_FILE",
Value: "/host/etc/fw_env.config",
},
},
SecurityContext: &corev1.SecurityContext{
Privileged: &privileged,
},
VolumeMounts: []corev1.VolumeMount{
{
Name: "host-dev",
MountPath: "/dev",
},
{
Name: "host-etc",
MountPath: "/host/etc",
ReadOnly: true,
},
{
Name: "host-config",
MountPath: "/host/opt/monok8s/config",
ReadOnly: true,
},
},
},
},
Volumes: []corev1.Volume{
{
Name: "host-dev",
VolumeSource: corev1.VolumeSource{
HostPath: &corev1.HostPathVolumeSource{
Path: "/dev",
Type: hostPathType(corev1.HostPathDirectory),
},
},
},
{
Name: "host-etc",
VolumeSource: corev1.VolumeSource{
HostPath: &corev1.HostPathVolumeSource{
Path: "/etc",
Type: hostPathType(corev1.HostPathDirectory),
},
},
},
{
Name: "host-config",
VolumeSource: corev1.VolumeSource{
HostPath: &corev1.HostPathVolumeSource{
Path: "/opt/monok8s/config",
Type: hostPathType(corev1.HostPathDirectory),
},
},
},
},
},
},
},
}
existing, err := kubeClient.AppsV1().DaemonSets(namespace).Get(ctx, monov1alpha1.NodeAgentName, metav1.GetOptions{})
if apierrors.IsNotFound(err) {
_, err = kubeClient.AppsV1().DaemonSets(namespace).Create(ctx, want, metav1.CreateOptions{})
return err
}
if err != nil {
return err
}
changed := false
if !reflect.DeepEqual(existing.Labels, want.Labels) {
existing.Labels = want.Labels
changed = true
}
if !reflect.DeepEqual(existing.Spec, want.Spec) {
existing.Spec = want.Spec
changed = true
}
if !changed {
return nil
}
_, err = kubeClient.AppsV1().DaemonSets(namespace).Update(ctx, existing, metav1.UpdateOptions{})
return err
}
func hostPathType(t corev1.HostPathType) *corev1.HostPathType {
return &t
}
func mountPropagationMode(m corev1.MountPropagationMode) *corev1.MountPropagationMode {
return &m
}

View File

@@ -12,9 +12,6 @@ import (
"time"
"gopkg.in/yaml.v3"
"k8s.io/client-go/discovery"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/clientcmd"
"k8s.io/klog/v2"
monov1alpha1 "example.com/monok8s/pkg/apis/monok8s/v1alpha1"
@@ -27,6 +24,16 @@ const (
tmpKubeadmInitConf = "/tmp/kubeadm-init.yaml"
)
func chooseVersionKubeconfig(state *LocalClusterState) string {
if state.HasAdminKubeconfig {
return adminKubeconfigPath
}
if state.HasKubeletKubeconfig {
return kubeletKubeconfigPath
}
return ""
}
func DetectLocalClusterState(ctx context.Context, nctx *NodeContext) error {
_ = ctx
@@ -259,110 +266,6 @@ func waitForAPIViaKubeconfig(ctx context.Context, kubeconfigPath string, timeout
}
}
func getServerVersion(ctx context.Context, kubeconfigPath string) (string, error) {
restCfg, err := clientcmd.BuildConfigFromFlags("", kubeconfigPath)
if err != nil {
return "", fmt.Errorf("build kubeconfig %s: %w", kubeconfigPath, err)
}
// Keep this short. This is a probe, not a long-running client.
restCfg.Timeout = 5 * time.Second
clientset, err := kubernetes.NewForConfig(restCfg)
if err != nil {
return "", fmt.Errorf("create clientset: %w", err)
}
disc := clientset.Discovery()
return discoverServerVersion(ctx, disc)
}
func discoverServerVersion(ctx context.Context, disc discovery.DiscoveryInterface) (string, error) {
info, err := disc.ServerVersion()
if err != nil {
return "", err
}
if info == nil || strings.TrimSpace(info.GitVersion) == "" {
return "", errors.New("server version is empty")
}
return normalizeKubeVersion(info.GitVersion), nil
}
type kubeVersion struct {
Major int
Minor int
Patch int
}
func parseKubeVersion(s string) (kubeVersion, error) {
s = strings.TrimSpace(s)
s = strings.TrimPrefix(s, "v")
var v kubeVersion
n, err := fmt.Sscanf(s, "%d.%d.%d", &v.Major, &v.Minor, &v.Patch)
// Accepts "1.29" or "1.29.3"
if err != nil || n < 2 {
return kubeVersion{}, fmt.Errorf("invalid kubernetes version %q", s)
}
return v, nil
}
// Control-plane: keep this strict.
// Accept same version, or a one-minor step where the node binary is newer than the current cluster.
// That covers normal control-plane upgrade flow but blocks nonsense.
func isSupportedControlPlaneSkew(clusterVersion, nodeVersion string) bool {
cv, err := parseKubeVersion(clusterVersion)
if err != nil {
return false
}
nv, err := parseKubeVersion(nodeVersion)
if err != nil {
return false
}
if cv.Major != nv.Major {
return false
}
if cv.Minor == nv.Minor {
return true
}
if nv.Minor == cv.Minor+1 {
return true
}
return false
}
// Worker: kubelet generally must not be newer than the apiserver.
// Older kubelets are allowed within supported skew range.
// Your requirement says unsupported worker skew should still proceed, so this
// only classifies support status and must NOT be used to block this function.
func isSupportedWorkerSkew(clusterVersion, nodeVersion string) bool {
cv, err := parseKubeVersion(clusterVersion)
if err != nil {
return false
}
nv, err := parseKubeVersion(nodeVersion)
if err != nil {
return false
}
if cv.Major != nv.Major {
return false
}
// kubelet newer than apiserver => unsupported
if nv.Minor > cv.Minor {
return false
}
// kubelet up to 3 minors older than apiserver => supported
if cv.Minor-nv.Minor <= 3 {
return true
}
return false
}
func ValidateRequiredImagesPresent(ctx context.Context, n *NodeContext) error {
if n.Config.Spec.SkipImageCheck {
klog.Infof("skipping image check (skipImageCheck=true)")
@@ -419,31 +322,6 @@ func checkImagePresent(ctx context.Context, n *NodeContext, image string) error
return nil
}
func chooseVersionKubeconfig(state *LocalClusterState) string {
if state.HasAdminKubeconfig {
return adminKubeconfigPath
}
if state.HasKubeletKubeconfig {
return kubeletKubeconfigPath
}
return ""
}
func versionEq(a, b string) bool {
return normalizeKubeVersion(a) == normalizeKubeVersion(b)
}
func normalizeKubeVersion(v string) string {
v = strings.TrimSpace(v)
if v == "" {
return ""
}
if !strings.HasPrefix(v, "v") {
v = "v" + v
}
return v
}
func buildNodeRegistration(spec monov1alpha1.MonoKSConfigSpec) NodeRegistrationOptions {
nodeName := strings.TrimSpace(spec.NodeName)
criSocket := strings.TrimSpace(spec.ContainerRuntimeEndpoint)
@@ -781,11 +659,6 @@ func RunKubeadmJoin(ctx context.Context, nctx *NodeContext) error {
return nil
}
func RunKubeadmUpgradeNode(context.Context, *NodeContext) error {
klog.Info("run_kubeadm_upgrade_node: TODO implement kubeadm upgrade node")
return nil
}
func ReconcileControlPlane(ctx context.Context, nctx *NodeContext) error {
if nctx.BootstrapState == nil {
return errors.New("BootstrapState is nil, call ClassifyBootstrapAction() first")

View File

@@ -0,0 +1,108 @@
package node
import (
"context"
"errors"
"fmt"
"os"
"strings"
"k8s.io/klog/v2"
"example.com/monok8s/pkg/system"
)
const kubeadmUpgradeNodeHostnameBugFixedIn = "v1.35.0"
// COMPAT(kubeadm-upgrade-node-hostname)
// Affects: Kubernetes/kubeadm < v1.35.0
// Upstream: kubernetes/kubeadm#3244, kubernetes/kubernetes#134319
// RemoveWhen: minimum supported Kubernetes version >= v1.35.0
//
// Affected kubeadm versions can derive the target Node name for
// `kubeadm upgrade node` from the local OS hostname instead of the existing
// kubeadm NodeRegistration / kubelet --hostname-override state.
func needsKubeadmUpgradeNodeHostnameWorkaround(kubeadmVersion string) bool {
lt, err := versionLt(kubeadmVersion, kubeadmUpgradeNodeHostnameBugFixedIn)
if err != nil {
klog.Warningf(
"could not parse kubeadm version %q; enabling kubeadm upgrade node hostname workaround: %v",
kubeadmVersion,
err,
)
return true
}
return lt
}
// runWithTemporaryHostname works around kubernetes/kubeadm#3244, fixed by
// kubernetes/kubernetes#134319 in Kubernetes v1.35.0.
//
// Affected kubeadm versions can derive the target Node name for
// `kubeadm upgrade node` from the local OS hostname instead of the existing
// kubeadm NodeRegistration / kubelet --hostname-override state. That breaks
// valid setups where the machine hostname differs from the Kubernetes Node
// name: kubeadm may authenticate as one node but try to get/patch another Node,
// and the Node authorizer correctly rejects it.
//
// Keep this workaround scoped to affected kubeadm versions only. Set the
// temporary hostname to the Kubernetes Node name, run kubeadm, then restore the
// configured machine hostname immediately afterward.
func runWithTemporaryHostname(ctx context.Context, nctx *NodeContext, fn func(context.Context) error) error {
if nctx == nil {
return errors.New("node context is nil")
}
temporaryHostname := strings.TrimSpace(nctx.Config.Spec.NodeName)
if temporaryHostname == "" {
return errors.New("temporary hostname is required")
}
originalHostname, err := os.Hostname()
if err != nil {
return fmt.Errorf("get current hostname: %w", err)
}
if originalHostname == temporaryHostname {
return fn(ctx)
}
restoreHostname := strings.TrimSpace(nctx.Config.Spec.Network.Hostname)
if restoreHostname == "" {
restoreHostname = originalHostname
}
klog.Warningf(
"temporarily changing hostname for kubeadm upgrade node: current=%q temporary=%q restore=%q",
originalHostname,
temporaryHostname,
restoreHostname,
)
if err := system.SetHostname(temporaryHostname); err != nil {
return fmt.Errorf("set temporary hostname to %q: %w", temporaryHostname, err)
}
defer func() {
if err := system.SetHostname(restoreHostname); err != nil {
klog.Errorf("failed to restore hostname to %q: %v", restoreHostname, err)
}
}()
return fn(ctx)
}
// COMPAT(kubeadm-upgrade-node-hostname)
// RemoveWhen: minimum supported Kubernetes version >= v1.35.0
func runKubeadmUpgradeNodeWithCompat(
ctx context.Context,
nctx *NodeContext,
kubeadmVersion string,
fn func(context.Context) error,
) error {
if needsKubeadmUpgradeNodeHostnameWorkaround(kubeadmVersion) {
return runWithTemporaryHostname(ctx, nctx, fn)
}
return fn(ctx)
}

View File

@@ -257,3 +257,102 @@ func describeHealthCheckFailure(ctx context.Context, kubeClient kubernetes.Inter
return nil
}
func RunKubeadmUpgradeNode(ctx context.Context, nctx *NodeContext) error {
if nctx == nil {
return errors.New("node context is nil")
}
if nctx.Config == nil {
return errors.New("node config is nil")
}
if nctx.LocalClusterState == nil {
return errors.New("LocalClusterState is nil. Please run earlier steps first")
}
if nctx.BootstrapState == nil {
return errors.New("BootstrapState is nil. Please run earlier steps first")
}
switch nctx.BootstrapState.Action {
case BootstrapActionUpgradeWorker:
// continue
default:
klog.V(4).Infof("RunKubeadmUpgradeNode skipped for action %q", nctx.BootstrapState.Action)
return nil
}
wantVersion := normalizeKubeVersion(strings.TrimSpace(nctx.Config.Spec.KubernetesVersion))
if wantVersion == "" {
return errors.New("spec.kubernetesVersion is required")
}
kubeconfigPath := chooseVersionKubeconfig(nctx.LocalClusterState)
if kubeconfigPath == "" {
return errors.New("no kubeconfig available for detecting cluster version")
}
clusterVersion := strings.TrimSpace(nctx.BootstrapState.DetectedClusterVersion)
if clusterVersion == "" {
var err error
clusterVersion, err = getServerVersion(ctx, kubeconfigPath)
if err != nil {
if nctx.BootstrapState.UnsupportedWorkerVersionSkew {
klog.Warningf(
"cluster version unavailable but worker skew was marked unsupported/permissive, continuing: reason=%s",
nctx.BootstrapState.VersionSkewReason,
)
} else {
return fmt.Errorf("get cluster version via %s: %w", kubeconfigPath, err)
}
}
}
if clusterVersion != "" && !isSupportedWorkerSkew(clusterVersion, wantVersion) {
klog.Warningf(
"unsupported worker version skew detected, continuing anyway: cluster=%s node=%s",
clusterVersion,
wantVersion,
)
}
klog.Infof(
"running kubeadm upgrade node: role=%s clusterVersion=%s nodeVersion=%s kubeconfig=%s",
strings.TrimSpace(nctx.Config.Spec.ClusterRole),
clusterVersion,
wantVersion,
kubeconfigPath,
)
args := []string{
"upgrade",
"node",
"--kubeconfig",
kubeconfigPath,
}
runKubeadm := func(ctx context.Context) error {
_, err := nctx.SystemRunner.RunWithOptions(
ctx,
"kubeadm",
args,
system.RunOptions{
Timeout: 10 * time.Minute,
OnStdoutLine: func(line string) {
klog.Infof("[kubeadm] %s", line)
},
OnStderrLine: func(line string) {
klog.Infof("[kubeadm] %s", line)
},
},
)
return err
}
// COMPAT(kubeadm-upgrade-node-hostname)
// RemoveWhen: minimum supported Kubernetes version >= v1.35.0
// Replace this wrapper with direct runKubeadm(ctx).
if err := runKubeadmUpgradeNodeWithCompat(ctx, nctx, wantVersion, runKubeadm); err != nil {
return fmt.Errorf("run kubeadm upgrade node: %w", err)
}
return nil
}

View File

@@ -8,9 +8,18 @@ import (
"strings"
"time"
"k8s.io/client-go/discovery"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/clientcmd"
"k8s.io/klog/v2"
)
type kubeVersion struct {
Major int
Minor int
Patch int
}
func ValidateNodeIPAndAPIServerReachability(ctx context.Context, nct *NodeContext) error {
requireLocalIP := func(wantedIP string) error {
wantedIP = strings.TrimSpace(wantedIP)
@@ -189,3 +198,136 @@ func CheckForVersionSkew(ctx context.Context, nctx *NodeContext) error {
return nil
}
func versionEq(a, b string) bool {
return normalizeKubeVersion(a) == normalizeKubeVersion(b)
}
func versionLt(a, b string) (bool, error) {
av, err := parseKubeVersion(a)
if err != nil {
return false, err
}
bv, err := parseKubeVersion(b)
if err != nil {
return false, err
}
if av.Major != bv.Major {
return av.Major < bv.Major, nil
}
if av.Minor != bv.Minor {
return av.Minor < bv.Minor, nil
}
return av.Patch < bv.Patch, nil
}
func normalizeKubeVersion(v string) string {
v = strings.TrimSpace(v)
if v == "" {
return ""
}
if !strings.HasPrefix(v, "v") {
v = "v" + v
}
return v
}
func parseKubeVersion(s string) (kubeVersion, error) {
s = strings.TrimSpace(s)
s = strings.TrimPrefix(s, "v")
var v kubeVersion
n, err := fmt.Sscanf(s, "%d.%d.%d", &v.Major, &v.Minor, &v.Patch)
// Accepts "1.29" or "1.29.3"
if err != nil || n < 2 {
return kubeVersion{}, fmt.Errorf("invalid kubernetes version %q", s)
}
return v, nil
}
// Control-plane: keep this strict.
// Accept same version, or a one-minor step where the node binary is newer than the current cluster.
// That covers normal control-plane upgrade flow but blocks nonsense.
func isSupportedControlPlaneSkew(clusterVersion, nodeVersion string) bool {
cv, err := parseKubeVersion(clusterVersion)
if err != nil {
return false
}
nv, err := parseKubeVersion(nodeVersion)
if err != nil {
return false
}
if cv.Major != nv.Major {
return false
}
if cv.Minor == nv.Minor {
return true
}
if nv.Minor == cv.Minor+1 {
return true
}
return false
}
// Worker: kubelet generally must not be newer than the apiserver.
// Older kubelets are allowed within supported skew range.
// Your requirement says unsupported worker skew should still proceed, so this
// only classifies support status and must NOT be used to block this function.
func isSupportedWorkerSkew(clusterVersion, nodeVersion string) bool {
cv, err := parseKubeVersion(clusterVersion)
if err != nil {
return false
}
nv, err := parseKubeVersion(nodeVersion)
if err != nil {
return false
}
if cv.Major != nv.Major {
return false
}
// kubelet newer than apiserver => unsupported
if nv.Minor > cv.Minor {
return false
}
// kubelet up to 3 minors older than apiserver => supported
if cv.Minor-nv.Minor <= 3 {
return true
}
return false
}
func getServerVersion(ctx context.Context, kubeconfigPath string) (string, error) {
restCfg, err := clientcmd.BuildConfigFromFlags("", kubeconfigPath)
if err != nil {
return "", fmt.Errorf("build kubeconfig %s: %w", kubeconfigPath, err)
}
// Keep this short. This is a probe, not a long-running client.
restCfg.Timeout = 5 * time.Second
clientset, err := kubernetes.NewForConfig(restCfg)
if err != nil {
return "", fmt.Errorf("create clientset: %w", err)
}
disc := clientset.Discovery()
return discoverServerVersion(ctx, disc)
}
func discoverServerVersion(ctx context.Context, disc discovery.DiscoveryInterface) (string, error) {
info, err := disc.ServerVersion()
if err != nil {
return "", err
}
if info == nil || strings.TrimSpace(info.GitVersion) == "" {
return "", errors.New("server version is empty")
}
return normalizeKubeVersion(info.GitVersion), nil
}

View File

@@ -0,0 +1,284 @@
package render
import (
"fmt"
"strings"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
rbacv1 "k8s.io/api/rbac/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
monov1alpha1 "example.com/monok8s/pkg/apis/monok8s/v1alpha1"
buildinfo "example.com/monok8s/pkg/buildinfo"
)
type AgentConf struct {
Namespace string
Image string
ImagePullSecrets []string
Labels map[string]string
}
func RenderAgentDaemonSets(conf AgentConf) (string, error) {
objs, err := buildAgentDaemonSetObjects(conf)
if err != nil {
return "", err
}
return renderObjects(objs)
}
func buildAgentDaemonSetObjects(conf AgentConf) ([]runtime.Object, error) {
if strings.TrimSpace(conf.Namespace) == "" {
return nil, fmt.Errorf("namespace is required")
}
conf.Labels = map[string]string{
"app.kubernetes.io/name": monov1alpha1.NodeAgentName,
"app.kubernetes.io/component": "agent",
"app.kubernetes.io/part-of": "monok8s",
"app.kubernetes.io/managed-by": monov1alpha1.NodeControlName,
}
return []runtime.Object{
buildAgentServiceAccount(conf),
buildAgentClusterRole(conf),
buildAgentClusterRoleBinding(conf),
buildAgentDaemonSet(conf),
}, nil
}
func buildAgentNamespace(conf AgentConf) *corev1.Namespace {
return &corev1.Namespace{
TypeMeta: metav1.TypeMeta{
APIVersion: "v1",
Kind: "Namespace",
},
ObjectMeta: metav1.ObjectMeta{
Name: conf.Namespace,
Labels: copyStringMap(conf.Labels),
},
}
}
func buildAgentServiceAccount(conf AgentConf) *corev1.ServiceAccount {
return &corev1.ServiceAccount{
TypeMeta: metav1.TypeMeta{
APIVersion: "v1",
Kind: "ServiceAccount",
},
ObjectMeta: metav1.ObjectMeta{
Name: monov1alpha1.NodeAgentName,
Namespace: conf.Namespace,
Labels: copyStringMap(conf.Labels),
},
}
}
func buildAgentClusterRole(conf AgentConf) *rbacv1.ClusterRole {
wantRules := []rbacv1.PolicyRule{
{
APIGroups: []string{monov1alpha1.Group},
Resources: []string{"osupgrades"},
Verbs: []string{"get"},
},
{
APIGroups: []string{monov1alpha1.Group},
Resources: []string{"osupgradeprogresses"},
Verbs: []string{"get", "list", "watch", "create", "patch", "update"},
},
{
APIGroups: []string{monov1alpha1.Group},
Resources: []string{"osupgradeprogresses/status"},
Verbs: []string{"get", "list", "watch", "create", "patch", "update"},
},
{
APIGroups: []string{""},
Resources: []string{"nodes"},
Verbs: []string{"get", "list", "watch"},
},
}
return &rbacv1.ClusterRole{
TypeMeta: metav1.TypeMeta{
APIVersion: "rbac.authorization.k8s.io/v1",
Kind: "ClusterRole",
},
ObjectMeta: metav1.ObjectMeta{
Name: monov1alpha1.NodeAgentName,
Labels: copyStringMap(conf.Labels),
},
Rules: wantRules,
}
}
func buildAgentClusterRoleBinding(conf AgentConf) *rbacv1.ClusterRoleBinding {
return &rbacv1.ClusterRoleBinding{
TypeMeta: metav1.TypeMeta{
APIVersion: "rbac.authorization.k8s.io/v1",
Kind: "ClusterRoleBinding",
},
ObjectMeta: metav1.ObjectMeta{
Name: monov1alpha1.NodeAgentName,
Labels: copyStringMap(conf.Labels),
},
RoleRef: rbacv1.RoleRef{
APIGroup: rbacv1.GroupName,
Kind: "ClusterRole",
Name: monov1alpha1.NodeAgentName,
},
Subjects: []rbacv1.Subject{
{
Kind: "ServiceAccount",
Name: monov1alpha1.NodeAgentName,
Namespace: conf.Namespace,
},
},
}
}
func buildAgentDaemonSet(conf AgentConf) *appsv1.DaemonSet {
privileged := true
dsLabels := monov1alpha1.NodeAgentLabels()
image, pullPolicy := agentImage(conf)
return &appsv1.DaemonSet{
TypeMeta: metav1.TypeMeta{
APIVersion: "apps/v1",
Kind: "DaemonSet",
},
ObjectMeta: metav1.ObjectMeta{
Name: monov1alpha1.NodeAgentName,
Namespace: conf.Namespace,
Labels: copyStringMap(conf.Labels),
},
Spec: appsv1.DaemonSetSpec{
Selector: &metav1.LabelSelector{
MatchLabels: map[string]string{
"app.kubernetes.io/name": monov1alpha1.NodeAgentName,
},
},
Template: corev1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: dsLabels,
},
Spec: corev1.PodSpec{
ServiceAccountName: monov1alpha1.NodeAgentName,
HostNetwork: true,
HostPID: true,
DNSPolicy: corev1.DNSClusterFirstWithHostNet,
ImagePullSecrets: imagePullSecrets(conf.ImagePullSecrets),
NodeSelector: map[string]string{
monov1alpha1.NodeControlKey: "true",
},
Tolerations: []corev1.Toleration{
{Operator: corev1.TolerationOpExists},
},
Containers: []corev1.Container{
{
Name: "agent",
Image: image,
ImagePullPolicy: pullPolicy,
Args: []string{"agent", "--env-file", "$(CLUSTER_ENV_FILE)"},
Env: []corev1.EnvVar{
{
Name: "NODE_NAME",
ValueFrom: &corev1.EnvVarSource{
FieldRef: &corev1.ObjectFieldSelector{
APIVersion: "v1",
FieldPath: "spec.nodeName",
},
},
},
{
Name: "CLUSTER_ENV_FILE",
Value: "/host/opt/monok8s/config/cluster.env",
},
{
Name: "FW_ENV_CONFIG_FILE",
Value: "/host/etc/fw_env.config",
},
},
SecurityContext: &corev1.SecurityContext{
Privileged: &privileged,
},
VolumeMounts: []corev1.VolumeMount{
{
Name: "host-dev",
MountPath: "/dev",
},
{
Name: "host-etc",
MountPath: "/host/etc",
ReadOnly: true,
},
{
Name: "host-config",
MountPath: "/host/opt/monok8s/config",
ReadOnly: true,
},
},
},
},
Volumes: []corev1.Volume{
{
Name: "host-dev",
VolumeSource: corev1.VolumeSource{
HostPath: &corev1.HostPathVolumeSource{
Path: "/dev",
Type: hostPathType(corev1.HostPathDirectory),
},
},
},
{
Name: "host-etc",
VolumeSource: corev1.VolumeSource{
HostPath: &corev1.HostPathVolumeSource{
Path: "/etc",
Type: hostPathType(corev1.HostPathDirectory),
},
},
},
{
Name: "host-config",
VolumeSource: corev1.VolumeSource{
HostPath: &corev1.HostPathVolumeSource{
Path: "/opt/monok8s/config",
Type: hostPathType(corev1.HostPathDirectory),
},
},
},
},
},
},
},
}
}
func agentImage(conf AgentConf) (string, corev1.PullPolicy) {
if conf.Image != "" {
return conf.Image, corev1.PullIfNotPresent
}
return fmt.Sprintf("localhost/monok8s/node-control:%s", buildinfo.Version), corev1.PullNever
}
func copyStringMap(in map[string]string) map[string]string {
if len(in) == 0 {
return nil
}
out := make(map[string]string, len(in))
for k, v := range in {
out[k] = v
}
return out
}
func hostPathType(t corev1.HostPathType) *corev1.HostPathType {
return &t
}

View File

@@ -0,0 +1,203 @@
package render
import (
"context"
"fmt"
"reflect"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
rbacv1 "k8s.io/api/rbac/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/kubernetes"
)
func ApplyAgentDaemonSets(ctx context.Context, kubeClient kubernetes.Interface, conf AgentConf) error {
objs, err := buildAgentDaemonSetObjects(conf)
if err != nil {
return err
}
if err := applyAgentNamespace(ctx, kubeClient, buildAgentNamespace(conf)); err != nil {
return fmt.Errorf("apply namespace: %w", err)
}
for _, obj := range objs {
if err := applyAgentObject(ctx, kubeClient, obj); err != nil {
return err
}
}
return nil
}
func applyAgentObject(ctx context.Context, kubeClient kubernetes.Interface, obj runtime.Object) error {
switch want := obj.(type) {
case *corev1.ServiceAccount:
return applyAgentServiceAccount(ctx, kubeClient, want)
case *rbacv1.ClusterRole:
return applyAgentClusterRole(ctx, kubeClient, want)
case *rbacv1.ClusterRoleBinding:
return applyAgentClusterRoleBinding(ctx, kubeClient, want)
case *appsv1.DaemonSet:
return applyAgentDaemonSet(ctx, kubeClient, want)
default:
return fmt.Errorf("unsupported agent object type %T", obj)
}
}
func applyAgentNamespace(ctx context.Context, kubeClient kubernetes.Interface, want *corev1.Namespace) error {
existing, err := kubeClient.CoreV1().Namespaces().Get(ctx, want.Name, metav1.GetOptions{})
if apierrors.IsNotFound(err) {
_, err = kubeClient.CoreV1().Namespaces().Create(ctx, want, metav1.CreateOptions{})
return err
}
if err != nil {
return err
}
labels, changed := mergeStringMapsInto(existing.Labels, want.Labels)
if !changed {
return nil
}
existing.Labels = labels
_, err = kubeClient.CoreV1().Namespaces().Update(ctx, existing, metav1.UpdateOptions{})
return err
}
func applyAgentServiceAccount(ctx context.Context, kubeClient kubernetes.Interface, want *corev1.ServiceAccount) error {
existing, err := kubeClient.CoreV1().ServiceAccounts(want.Namespace).Get(ctx, want.Name, metav1.GetOptions{})
if apierrors.IsNotFound(err) {
_, err = kubeClient.CoreV1().ServiceAccounts(want.Namespace).Create(ctx, want, metav1.CreateOptions{})
return err
}
if err != nil {
return err
}
changed := false
if !reflect.DeepEqual(existing.Labels, want.Labels) {
existing.Labels = want.Labels
changed = true
}
if !changed {
return nil
}
_, err = kubeClient.CoreV1().ServiceAccounts(want.Namespace).Update(ctx, existing, metav1.UpdateOptions{})
return err
}
func applyAgentClusterRole(ctx context.Context, kubeClient kubernetes.Interface, want *rbacv1.ClusterRole) error {
existing, err := kubeClient.RbacV1().ClusterRoles().Get(ctx, want.Name, metav1.GetOptions{})
if apierrors.IsNotFound(err) {
_, err = kubeClient.RbacV1().ClusterRoles().Create(ctx, want, metav1.CreateOptions{})
return err
}
if err != nil {
return err
}
changed := false
if !reflect.DeepEqual(existing.Labels, want.Labels) {
existing.Labels = want.Labels
changed = true
}
if !reflect.DeepEqual(existing.Rules, want.Rules) {
existing.Rules = want.Rules
changed = true
}
if !changed {
return nil
}
_, err = kubeClient.RbacV1().ClusterRoles().Update(ctx, existing, metav1.UpdateOptions{})
return err
}
func applyAgentClusterRoleBinding(ctx context.Context, kubeClient kubernetes.Interface, want *rbacv1.ClusterRoleBinding) error {
existing, err := kubeClient.RbacV1().ClusterRoleBindings().Get(ctx, want.Name, metav1.GetOptions{})
if apierrors.IsNotFound(err) {
_, err = kubeClient.RbacV1().ClusterRoleBindings().Create(ctx, want, metav1.CreateOptions{})
return err
}
if err != nil {
return err
}
// roleRef is immutable. If it differs, fail loudly instead of pretending we can patch it.
if !reflect.DeepEqual(existing.RoleRef, want.RoleRef) {
return fmt.Errorf("existing ClusterRoleBinding %q has different roleRef and must be recreated", want.Name)
}
changed := false
if !reflect.DeepEqual(existing.Labels, want.Labels) {
existing.Labels = want.Labels
changed = true
}
if !reflect.DeepEqual(existing.Subjects, want.Subjects) {
existing.Subjects = want.Subjects
changed = true
}
if !changed {
return nil
}
_, err = kubeClient.RbacV1().ClusterRoleBindings().Update(ctx, existing, metav1.UpdateOptions{})
return err
}
func applyAgentDaemonSet(ctx context.Context, kubeClient kubernetes.Interface, want *appsv1.DaemonSet) error {
existing, err := kubeClient.AppsV1().DaemonSets(want.Namespace).Get(ctx, want.Name, metav1.GetOptions{})
if apierrors.IsNotFound(err) {
_, err = kubeClient.AppsV1().DaemonSets(want.Namespace).Create(ctx, want, metav1.CreateOptions{})
return err
}
if err != nil {
return err
}
changed := false
if !reflect.DeepEqual(existing.Labels, want.Labels) {
existing.Labels = want.Labels
changed = true
}
if !reflect.DeepEqual(existing.Spec, want.Spec) {
existing.Spec = want.Spec
changed = true
}
if !changed {
return nil
}
_, err = kubeClient.AppsV1().DaemonSets(want.Namespace).Update(ctx, existing, metav1.UpdateOptions{})
return err
}
func mergeStringMapsInto(dst map[string]string, src map[string]string) (map[string]string, bool) {
if len(src) == 0 {
return dst, false
}
changed := false
if dst == nil {
dst = map[string]string{}
changed = true
}
for k, v := range src {
if dst[k] != v {
dst[k] = v
changed = true
}
}
return dst, changed
}

View File

@@ -1,7 +1,6 @@
package render
import (
"bytes"
"fmt"
appsv1 "k8s.io/api/apps/v1"
@@ -9,7 +8,6 @@ import (
rbacv1 "k8s.io/api/rbac/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/serializer/json"
"k8s.io/apimachinery/pkg/util/intstr"
monov1alpha1 "example.com/monok8s/pkg/apis/monok8s/v1alpha1"
@@ -17,9 +15,10 @@ import (
)
type ControllerConf struct {
Namespace string
Image string
Labels map[string]string
Namespace string
Image string
ImagePullSecrets []string
Labels map[string]string
}
func RenderControllerDeployments(conf ControllerConf) (string, error) {
@@ -41,27 +40,7 @@ func RenderControllerDeployments(conf ControllerConf) (string, error) {
buildControllerDeployment(conf),
}
s := runtime.NewScheme()
_ = corev1.AddToScheme(s)
_ = rbacv1.AddToScheme(s)
_ = appsv1.AddToScheme(s)
serializer := json.NewYAMLSerializer(json.DefaultMetaFactory, s, s)
var buf bytes.Buffer
for i, obj := range objs {
if i > 0 {
if _, err := fmt.Fprintln(&buf, "---"); err != nil {
return "", err
}
}
if err := serializer.Encode(obj, &buf); err != nil {
return "", err
}
}
return buf.String(), nil
return renderObjects(objs)
}
func buildControllerServiceAccount(conf ControllerConf) *corev1.ServiceAccount {
@@ -191,6 +170,7 @@ func buildControllerDeployment(conf ControllerConf) *appsv1.Deployment {
},
Spec: corev1.PodSpec{
ServiceAccountName: monov1alpha1.ControllerName,
ImagePullSecrets: imagePullSecrets(conf.ImagePullSecrets),
Containers: []corev1.Container{
{
Name: "controller",

View File

@@ -0,0 +1,74 @@
package render
import (
"bytes"
"fmt"
"strings"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/runtime"
"sigs.k8s.io/yaml"
)
func renderObjects(objs []runtime.Object) (string, error) {
var buf bytes.Buffer
for i, obj := range objs {
if i > 0 {
if _, err := fmt.Fprintln(&buf, "---"); err != nil {
return "", err
}
}
b, err := renderObjectYAML(obj)
if err != nil {
return "", err
}
if _, err := buf.Write(b); err != nil {
return "", err
}
}
return buf.String(), nil
}
func renderObjectYAML(obj runtime.Object) ([]byte, error) {
b, err := yaml.Marshal(obj)
if err != nil {
return nil, err
}
var m map[string]any
if err := yaml.Unmarshal(b, &m); err != nil {
return nil, err
}
delete(m, "status")
return yaml.Marshal(m)
}
func imagePullSecrets(names []string) []corev1.LocalObjectReference {
if len(names) == 0 {
return nil
}
refs := make([]corev1.LocalObjectReference, 0, len(names))
for _, name := range names {
name = strings.TrimSpace(name)
if name == "" {
continue
}
refs = append(refs, corev1.LocalObjectReference{
Name: name,
})
}
if len(refs) == 0 {
return nil
}
return refs
}

View File

@@ -1,16 +1,11 @@
package render
import (
"bytes"
"fmt"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
rbacv1 "k8s.io/api/rbac/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/serializer/json"
"k8s.io/apimachinery/pkg/util/intstr"
monov1alpha1 "example.com/monok8s/pkg/apis/monok8s/v1alpha1"
@@ -39,27 +34,7 @@ func RenderSSHDDeployments(namespace, authKeys string) (string, error) {
buildSSHDDeployment(vals, namespace, labels),
}
s := runtime.NewScheme()
_ = corev1.AddToScheme(s)
_ = rbacv1.AddToScheme(s)
_ = appsv1.AddToScheme(s)
serializer := json.NewYAMLSerializer(json.DefaultMetaFactory, s, s)
var buf bytes.Buffer
for i, obj := range objs {
if i > 0 {
if _, err := fmt.Fprintln(&buf, "---"); err != nil {
return "", err
}
}
if err := serializer.Encode(obj, &buf); err != nil {
return "", err
}
}
return buf.String(), nil
return renderObjects(objs)
}
func buildSSHDConfigMap(

View File

@@ -181,7 +181,7 @@ $(INITRAMFS): $(INITRAMFS_DEPS) $(DOWNLOAD_PACKAGES_STAMP) | $(OUT_DIR)
test -f $@
$(CLITOOLS_BIN): $(CLITOOLS_SRCS)
$(MAKE) -C clitools build-agent
$(MAKE) -C clitools build-local VERSION="$(TAG)"
vpp: $(BUILD_BASE_STAMP) $(VPP_TAR) $(DPDK_TAR) $(FMLIB_TAR) $(FMC_TAR) $(NXP_TAR)
@build_base_tag=$$(docker image inspect \