Update ctl init to support env file

This commit is contained in:
2026-03-30 19:33:44 +08:00
parent 60a9ffeaf6
commit d9ffd1b446
12 changed files with 450 additions and 1191 deletions

View File

@@ -11,3 +11,4 @@ rc-update add fancontrol boot
rc-update add loopback boot
rc-update add hostname boot
rc-update add localmount boot
rc-update add bootstrap-cluster default

View File

@@ -1,23 +0,0 @@
#!/sbin/openrc-run
export PATH="/usr/local/bin:/usr/local/sbin:$PATH"
name="Apply node config"
description="Apply node configurations using node.env from /opt/monok8s/config"
command="/opt/scripts/apply-node-config.sh"
LOG_DIR="/var/log/monok8s"
LOG_FILE="$LOG_DIR/apply-node-config.log"
depend() {
need localmount
}
start() {
checkpath --directory "$LOG_DIR"
ebegin "Applying node config"
"$command" >>"$LOG_FILE" 2>&1
eend $?
}

View File

@@ -2,23 +2,24 @@
export PATH="/usr/local/bin:/usr/local/sbin:$PATH"
name="Bootstrap cluster"
description="Apply cluster configurations using node.env from /opt/monok8s/config"
name="Bootstrap the Cluster"
description="Runs the ctl init bootstrap sequence"
command="/opt/scripts/bootstrap-cluster.sh"
command="/usr/local/bin/ctl"
command_args="init --env-file /opt/monok8s/config/cluster.env"
LOG_DIR="/var/log/monok8s"
LOG_FILE="$LOG_DIR/bootstrap.log"
LOG_FILE="$LOG_DIR/bootstrap-cluster.log"
depend() {
need apply-node-config
use net
need localmount
}
start() {
checkpath --directory "$LOG_DIR"
ebegin "Applying cluster config"
"$command" >>"$LOG_FILE" 2>&1 &
ebegin "Starting the bootstrap sequence"
start-stop-daemon --start \
--exec "$command" \
-- $command_args >>"$LOG_FILE" 2>&1
eend $?
}

View File

@@ -1,201 +0,0 @@
#!/bin/bash
set -euo pipefail
CONFIG_DIR="${CONFIG_DIR:-/opt/monok8s/config}"
NODE_ENV="${NODE_ENV:-$CONFIG_DIR/node.env}"
log() {
echo "[monok8s-node] $*"
}
fail() {
echo "[monok8s-node] ERROR: $*" >&2
exit 1
}
need_cmd() {
command -v "$1" >/dev/null 2>&1 || fail "missing required command: $1"
}
require_file() {
[ -f "$1" ] || fail "required file not found: $1"
}
load_config() {
require_file "$NODE_ENV"
# shellcheck disable=SC1090
. "$NODE_ENV"
HOSTNAME="${HOSTNAME:-}"
MGMT_IFACE="${MGMT_IFACE:-}"
MGMT_ADDRESS="${MGMT_ADDRESS:-}"
MGMT_GATEWAY="${MGMT_GATEWAY:-}"
DNS_NAMESERVERS="${DNS_NAMESERVERS:-}"
DNS_SEARCH_DOMAINS="${DNS_SEARCH_DOMAINS:-}"
}
validate_config() {
[ -n "$HOSTNAME" ] || fail "HOSTNAME is required"
[ -n "$MGMT_IFACE" ] || fail "MGMT_IFACE is required"
[ -n "$MGMT_ADDRESS" ] || fail "MGMT_ADDRESS is required"
case "$MGMT_ADDRESS" in
*/*)
;;
*)
fail "MGMT_ADDRESS must include a CIDR prefix, example: 10.0.0.13/24"
;;
esac
if [ -n "$DNS_NAMESERVERS" ]; then
for ns in $DNS_NAMESERVERS; do
case "$ns" in
10.96.0.10)
fail "DNS_NAMESERVERS must not contain cluster DNS service IP (10.96.0.10)"
;;
esac
done
fi
}
check_prereqs() {
# only the special one, coreutils should not be checked
need_cmd ip
need_cmd hostname
need_cmd grep
}
configure_mgmt_interface() {
local addr_ip
ip link show "$MGMT_IFACE" >/dev/null 2>&1 || fail "interface not found: $MGMT_IFACE"
log "bringing up interface: $MGMT_IFACE"
ip link set "$MGMT_IFACE" up
addr_ip="${MGMT_ADDRESS%/*}"
if ip -o addr show dev "$MGMT_IFACE" | awk '{print $4}' | cut -d/ -f1 | grep -Fx "$addr_ip" >/dev/null 2>&1; then
log "address already present on $MGMT_IFACE: $MGMT_ADDRESS"
else
log "assigning $MGMT_ADDRESS to $MGMT_IFACE"
ip addr add "$MGMT_ADDRESS" dev "$MGMT_IFACE"
fi
if [ -n "${MGMT_GATEWAY:-}" ]; then
log "ensuring default route via $MGMT_GATEWAY"
ip route replace default via "$MGMT_GATEWAY" dev "$MGMT_IFACE"
fi
}
set_hostname_if_needed() {
local current_hostname
current_hostname="$(hostname 2>/dev/null || true)"
if [ "$current_hostname" != "$HOSTNAME" ]; then
log "setting hostname to $HOSTNAME"
hostname "$HOSTNAME"
mkdir -p /etc
printf '%s\n' "$HOSTNAME" > /etc/hostname
if [ -f /etc/hosts ]; then
if ! grep -Eq "[[:space:]]$HOSTNAME([[:space:]]|$)" /etc/hosts; then
printf '127.0.0.1\tlocalhost %s\n' "$HOSTNAME" >> /etc/hosts
fi
else
cat > /etc/hosts <<EOF
127.0.0.1 localhost $HOSTNAME
EOF
fi
else
log "hostname already set: $HOSTNAME"
fi
}
ensure_ip_forward() {
local current
current="$(cat /proc/sys/net/ipv4/ip_forward 2>/dev/null || echo 0)"
if [ "$current" != "1" ]; then
log "enabling IPv4 forwarding"
echo 1 > /proc/sys/net/ipv4/ip_forward
fi
mkdir -p /etc/sysctl.d
cat > /etc/sysctl.d/99-monok8s.conf <<'EOF'
net.ipv4.ip_forward = 1
EOF
}
configure_dns() {
local tmpfile
local ns_count=0
if [ -z "$DNS_NAMESERVERS" ]; then
log "DNS_NAMESERVERS not set; leaving /etc/resolv.conf unchanged"
return
fi
mkdir -p /etc
tmpfile="/etc/resolv.conf.monok8s.tmp"
: > "$tmpfile"
if [ -n "$DNS_SEARCH_DOMAINS" ]; then
printf 'search %s\n' "$DNS_SEARCH_DOMAINS" >> "$tmpfile"
fi
for ns in $DNS_NAMESERVERS; do
printf 'nameserver %s\n' "$ns" >> "$tmpfile"
ns_count=$((ns_count + 1))
done
[ "$ns_count" -gt 0 ] || fail "DNS_NAMESERVERS is set but no valid nameservers were parsed"
printf 'options timeout:2 attempts:3\n' >> "$tmpfile"
mv "$tmpfile" /etc/resolv.conf
log "configured /etc/resolv.conf from DNS_NAMESERVERS"
}
print_summary() {
log "node configuration applied"
log "hostname: $HOSTNAME"
log "interface: $MGMT_IFACE"
log "address: $MGMT_ADDRESS"
if [ -n "${MGMT_GATEWAY:-}" ]; then
log "gateway: $MGMT_GATEWAY"
else
log "gateway: <not set>"
fi
if [ -n "${DNS_NAMESERVERS:-}" ]; then
log "dns nameservers: $DNS_NAMESERVERS"
else
log "dns nameservers: <unchanged>"
fi
if [ -n "${DNS_SEARCH_DOMAINS:-}" ]; then
log "dns search: $DNS_SEARCH_DOMAINS"
else
log "dns search: <not set>"
fi
}
main() {
load_config
validate_config
check_prereqs
ensure_ip_forward
configure_mgmt_interface
configure_dns
set_hostname_if_needed
print_summary
}
main "$@"

View File

@@ -1,820 +0,0 @@
#!/bin/sh
set -eu
CONFIG_DIR="${CONFIG_DIR:-/opt/monok8s/config}"
CLUSTER_ENV="${CONFIG_DIR}/cluster.env"
KUBEADM_CONFIG_OUT="${KUBEADM_CONFIG_OUT:-/tmp/kubeadm-init.yaml}"
ADMIN_KUBECONFIG="/etc/kubernetes/admin.conf"
KUBELET_KUBECONFIG="/etc/kubernetes/kubelet.conf"
log() {
echo "[monok8s] $*"
}
fail() {
echo "[monok8s] ERROR: $*" >&2
exit 1
}
need_cmd() {
command -v "$1" >/dev/null 2>&1 || fail "missing required command: $1"
}
require_file() {
[ -f "$1" ] || fail "required file not found: $1"
}
load_config() {
require_file "$CLUSTER_ENV"
# shellcheck disable=SC1090
. "$CLUSTER_ENV"
: "${KUBERNETES_VERSION:?KUBERNETES_VERSION is required}"
: "${NODE_NAME:?NODE_NAME is required}"
: "${APISERVER_ADVERTISE_ADDRESS:?APISERVER_ADVERTISE_ADDRESS is required}"
POD_SUBNET="${POD_SUBNET:-10.244.0.0/16}"
SERVICE_SUBNET="${SERVICE_SUBNET:-10.96.0.0/12}"
CLUSTER_NAME="${CLUSTER_NAME:-monok8s}"
CLUSTER_DOMAIN="${CLUSTER_DOMAIN:-cluster.local}"
CONTAINER_RUNTIME_ENDPOINT="${CONTAINER_RUNTIME_ENDPOINT:-unix:///var/run/crio/crio.sock}"
SANS="${SANS:-}"
ALLOW_SCHEDULING_ON_CONTROL_PLANE="${ALLOW_SCHEDULING_ON_CONTROL_PLANE:-yes}"
SKIP_IMAGE_CHECK="${SKIP_IMAGE_CHECK:-no}"
KUBE_PROXY_NODEPORT_ADDRESSES="${KUBE_PROXY_NODEPORT_ADDRESSES:-primary}"
BOOTSTRAP_MODE="${BOOTSTRAP_MODE:-init}"
JOIN_KIND="${JOIN_KIND:-worker}"
API_SERVER_ENDPOINT="${API_SERVER_ENDPOINT:-}"
BOOTSTRAP_TOKEN="${BOOTSTRAP_TOKEN:-}"
DISCOVERY_TOKEN_CA_CERT_HASH="${DISCOVERY_TOKEN_CA_CERT_HASH:-}"
CONTROL_PLANE_CERT_KEY="${CONTROL_PLANE_CERT_KEY:-}"
CNI_PLUGIN="${CNI_PLUGIN:-none}"
}
kubectl_admin() {
kubectl --kubeconfig "$ADMIN_KUBECONFIG" "$@"
}
kubectl_kubelet() {
kubectl --kubeconfig "$KUBELET_KUBECONFIG" "$@"
}
start_kubelet() {
log "starting kubelet..."
rc-service kubelet start >/dev/null 2>&1 || true
}
restart_kubelet() {
log "restarting kubelet..."
rc-service kubelet restart
}
check_kubelet_running() {
log "waiting for kubelet to become ready..."
last_status="unknown"
for _ in $(seq 1 30); do
if rc-service kubelet status >/dev/null 2>&1; then
log "kubelet is up"
return 0
fi
last_status="service-not-running"
sleep 1
done
fail "kubelet did not become ready in time (${last_status})"
}
is_local_control_plane_node() {
[ -f /etc/kubernetes/manifests/kube-apiserver.yaml ]
}
wait_for_local_apiserver() {
need_cmd nc
log "waiting for local API server on ${APISERVER_ADVERTISE_ADDRESS}:6443..."
for _ in $(seq 1 90); do
if nc -z "${APISERVER_ADVERTISE_ADDRESS}" 6443 >/dev/null 2>&1; then
log "local API server TCP port is reachable"
return 0
fi
sleep 2
done
fail "local API server did not become reachable on ${APISERVER_ADVERTISE_ADDRESS}:6443"
}
wait_for_admin_api() {
[ -f "$ADMIN_KUBECONFIG" ] || fail "missing admin kubeconfig: $ADMIN_KUBECONFIG"
log "waiting for Kubernetes API to respond via admin.conf..."
for _ in $(seq 1 90); do
if kubectl_admin version -o yaml >/dev/null 2>&1; then
log "Kubernetes API is responding"
return 0
fi
sleep 2
done
fail "Kubernetes API did not become ready in time"
}
wait_for_existing_cluster_if_needed() {
case "$BOOTSTRAP_MODE" in
init)
if [ -f "$ADMIN_KUBECONFIG" ]; then
start_kubelet
check_kubelet_running
if is_local_control_plane_node; then
wait_for_local_apiserver
fi
wait_for_admin_api
fi
;;
join)
if [ -f "$KUBELET_KUBECONFIG" ]; then
start_kubelet
check_kubelet_running
fi
;;
esac
}
get_cluster_server_version() {
kubectl_admin version -o yaml 2>/dev/null \
| awk '
$1 == "serverVersion:" { in_server=1; next }
in_server && $1 == "gitVersion:" { print $2; exit }
'
}
get_api_server_version_from_kubelet_kubeconfig() {
kubectl_kubelet version -o yaml 2>/dev/null \
| awk '
$1 == "serverVersion:" { in_server=1; next }
in_server && $1 == "gitVersion:" { print $2; exit }
'
}
validate_cidr_list_or_primary() {
value="$1"
[ -n "$value" ] || return 0
if [ "$value" = "primary" ]; then
return 0
fi
old_ifs="$IFS"
IFS=','
for item in $value; do
trimmed="$(printf '%s' "$item" | sed 's/^ *//;s/ *$//')"
[ -n "$trimmed" ] || fail "KUBE_PROXY_NODEPORT_ADDRESSES contains an empty entry"
case "$trimmed" in
*/*)
;;
*)
fail "KUBE_PROXY_NODEPORT_ADDRESSES must be 'primary' or a comma-separated list of CIDRs"
;;
esac
ip_part="${trimmed%/*}"
prefix_part="${trimmed#*/}"
printf '%s' "$prefix_part" | grep -Eq '^[0-9]+$' \
|| fail "invalid CIDR prefix in KUBE_PROXY_NODEPORT_ADDRESSES: $trimmed"
case "$ip_part" in
*:*)
printf '%s' "$prefix_part" | awk '{ exit !($1 >= 0 && $1 <= 128) }' \
|| fail "invalid IPv6 CIDR prefix in KUBE_PROXY_NODEPORT_ADDRESSES: $trimmed"
;;
*.*.*.*)
printf '%s' "$prefix_part" | awk '{ exit !($1 >= 0 && $1 <= 32) }' \
|| fail "invalid IPv4 CIDR prefix in KUBE_PROXY_NODEPORT_ADDRESSES: $trimmed"
;;
*)
fail "invalid CIDR entry in KUBE_PROXY_NODEPORT_ADDRESSES: $trimmed"
;;
esac
done
IFS="$old_ifs"
}
validate_config() {
case "$BOOTSTRAP_MODE" in
init)
;;
join)
: "${API_SERVER_ENDPOINT:?API_SERVER_ENDPOINT is required for join mode}"
: "${BOOTSTRAP_TOKEN:?BOOTSTRAP_TOKEN is required for join mode}"
: "${DISCOVERY_TOKEN_CA_CERT_HASH:?DISCOVERY_TOKEN_CA_CERT_HASH is required for join mode}"
case "$JOIN_KIND" in
worker|control-plane)
;;
*)
fail "JOIN_KIND must be 'worker' or 'control-plane'"
;;
esac
if [ "$JOIN_KIND" = "control-plane" ]; then
: "${CONTROL_PLANE_CERT_KEY:?CONTROL_PLANE_CERT_KEY is required for JOIN_KIND=control-plane}"
fi
;;
*)
fail "BOOTSTRAP_MODE must be 'init' or 'join'"
;;
esac
validate_cidr_list_or_primary "$KUBE_PROXY_NODEPORT_ADDRESSES"
}
normalize_version() {
# strip leading "v"
echo "${1#v}"
}
version_major_minor() {
normalize_version "$1" | awk -F. '{ print $1 "." $2 }'
}
version_eq() {
[ "$(normalize_version "$1")" = "$(normalize_version "$2")" ]
}
version_lt() {
[ "$(printf '%s\n%s\n' "$(normalize_version "$1")" "$(normalize_version "$2")" | sort -V | head -n1)" != "$(normalize_version "$2")" ]
}
version_gt() {
[ "$(printf '%s\n%s\n' "$(normalize_version "$1")" "$(normalize_version "$2")" | sort -V | tail -n1)" = "$(normalize_version "$1")" ] \
&& ! version_eq "$1" "$2"
}
minor_diff() {
a="$(version_major_minor "$1")"
b="$(version_major_minor "$2")"
a_major="${a%.*}"
a_minor="${a#*.}"
b_major="${b%.*}"
b_minor="${b#*.}"
[ "$a_major" = "$b_major" ] || fail "major version change unsupported here: $1 -> $2"
echo $((b_minor - a_minor))
}
get_kubeadm_binary_version() {
kubeadm version -o short
}
get_cluster_server_version() {
kubectl --kubeconfig /etc/kubernetes/admin.conf version -o yaml \
| awk '
$1 == "serverVersion:" { in_server=1; next }
in_server && $1 == "gitVersion:" { print $2; exit }
'
}
get_api_server_version_from_kubelet_kubeconfig() {
kubectl --kubeconfig /etc/kubernetes/kubelet.conf version -o yaml \
| awk '
$1 == "serverVersion:" { in_server=1; next }
in_server && $1 == "gitVersion:" { print $2; exit }
'
}
validate_target_matches_local_binaries() {
kubeadm_ver="$(get_kubeadm_binary_version)"
if ! version_eq "$kubeadm_ver" "$KUBERNETES_VERSION"; then
fail "kubeadm binary version ($kubeadm_ver) does not match target KUBERNETES_VERSION ($KUBERNETES_VERSION)"
fi
}
decide_bootstrap_action() {
current_version=""
case "$BOOTSTRAP_MODE" in
init)
if [ ! -f "$ADMIN_KUBECONFIG" ]; then
BOOTSTRAP_ACTION="init"
log "selected bootstrap action: $BOOTSTRAP_ACTION"
return 0
fi
current_version="$(get_cluster_server_version || true)"
[ -n "$current_version" ] || fail "existing control-plane config found, but cluster version could not be determined"
log "detected existing control-plane version: $current_version"
if version_eq "$current_version" "$KUBERNETES_VERSION"; then
BOOTSTRAP_ACTION="reconcile-control-plane"
else
BOOTSTRAP_ACTION="upgrade-control-plane"
fi
;;
join)
if [ ! -f "$KUBELET_KUBECONFIG" ]; then
BOOTSTRAP_ACTION="join"
log "selected bootstrap action: $BOOTSTRAP_ACTION"
return 0
fi
current_version="$(get_api_server_version_from_kubelet_kubeconfig || true)"
[ -n "$current_version" ] || fail "existing kubelet config found, but cluster version could not be determined"
log "detected cluster version visible from this node: $current_version"
if version_eq "$current_version" "$KUBERNETES_VERSION"; then
BOOTSTRAP_ACTION="reconcile-node"
else
BOOTSTRAP_ACTION="upgrade-node"
fi
;;
*)
fail "unsupported BOOTSTRAP_MODE: $BOOTSTRAP_MODE"
;;
esac
log "selected bootstrap action: $BOOTSTRAP_ACTION"
}
reconcile_control_plane() {
log "reconciling existing control-plane node"
start_kubelet
check_kubelet_running
wait_for_local_apiserver
wait_for_admin_api
apply_local_node_metadata_if_possible
allow_single_node_scheduling
}
reconcile_node() {
log "reconciling existing joined node"
start_kubelet
check_kubelet_running
}
validate_upgrade_path() {
current="$1"
target="$2"
if version_eq "$current" "$target"; then
log "cluster is already at target version: $target"
return 0
fi
if version_gt "$current" "$target"; then
fail "downgrade is not supported: current=$current target=$target"
fi
diff="$(minor_diff "$current" "$target")"
case "$diff" in
0|1)
;;
*)
fail "unsupported upgrade path: current=$current target=$target (minor skip too large)"
;;
esac
}
check_upgrade_prereqs() {
validate_target_matches_local_binaries
}
run_kubeadm_upgrade_apply() {
current_version="$(get_cluster_server_version || true)"
[ -n "$current_version" ] || fail "cannot determine current control-plane version; API server is not reachable"
log "current control-plane version: $current_version"
log "target control-plane version: $KUBERNETES_VERSION"
validate_upgrade_path "$current_version" "$KUBERNETES_VERSION"
if version_eq "$current_version" "$KUBERNETES_VERSION"; then
log "control-plane already at target version; skipping kubeadm upgrade apply"
return 0
fi
log "running kubeadm upgrade plan..."
kubeadm upgrade plan "$KUBERNETES_VERSION"
log "running kubeadm upgrade apply..."
kubeadm upgrade apply -y "$KUBERNETES_VERSION"
}
run_kubeadm_upgrade_node() {
cluster_version="$(get_api_server_version_from_kubelet_kubeconfig)"
log "cluster/control-plane version visible from this node: $cluster_version"
log "target node version: $KUBERNETES_VERSION"
if ! version_eq "$cluster_version" "$KUBERNETES_VERSION"; then
fail "control-plane version ($cluster_version) does not match target ($KUBERNETES_VERSION); upgrade control-plane first"
fi
log "running kubeadm upgrade node..."
kubeadm upgrade node
}
check_prereqs() {
need_cmd kubeadm
need_cmd kubelet
need_cmd kubectl
need_cmd crictl
need_cmd rc-service
need_cmd awk
need_cmd ip
need_cmd grep
need_cmd sed
need_cmd hostname
}
check_apiserver_reachable() {
host="${API_SERVER_ENDPOINT%:*}"
port="${API_SERVER_ENDPOINT##*:}"
need_cmd nc
log "checking API server reachability: ${host}:${port}"
for _ in $(seq 1 20); do
if nc -z "$host" "$port" >/dev/null 2>&1; then
log "API server is reachable"
return 0
fi
sleep 1
done
fail "cannot reach API server at ${host}:${port}"
}
start_crio() {
rc-service crio start
}
check_crio_running() {
log "waiting for CRI-O to become ready..."
last_status="unknown"
for _ in $(seq 1 30); do
if rc-service crio status >/dev/null 2>&1; then
last_status="service-running"
if crictl --runtime-endpoint "$CONTAINER_RUNTIME_ENDPOINT" info >/dev/null 2>&1; then
log "CRI-O is up"
return 0
fi
last_status="service-running-but-runtime-not-ready"
else
last_status="service-not-running"
fi
sleep 1
done
fail "CRI-O did not become ready in time (${last_status})"
}
image_present() {
wanted="$1"
repo="${wanted%:*}"
tag="${wanted##*:}"
crictl --runtime-endpoint "$CONTAINER_RUNTIME_ENDPOINT" images \
| awk 'NR>1 { print $1 ":" $2 }' \
| grep -Fx "$repo:$tag" >/dev/null 2>&1
}
check_required_images() {
[ "$SKIP_IMAGE_CHECK" = "yes" ] && {
log "skipping image check (SKIP_IMAGE_CHECK=yes)"
return 0
}
log "checking required Kubernetes images for $KUBERNETES_VERSION..."
missing_any=0
for img in $(kubeadm config images list --kubernetes-version "$KUBERNETES_VERSION"); do
if image_present "$img"; then
log "found image: $img"
else
echo "[monok8s] MISSING image: $img" >&2
missing_any=1
fi
done
[ "$missing_any" -eq 0 ] || fail "preload the Kubernetes images before bootstrapping"
log "all required images are present"
}
check_not_already_bootstrapped() {
case "$BOOTSTRAP_MODE" in
init)
if [ -f /etc/kubernetes/admin.conf ]; then
fail "cluster already appears initialized (/etc/kubernetes/admin.conf exists)"
fi
;;
join)
if [ -f /etc/kubernetes/kubelet.conf ]; then
fail "node already appears joined (/etc/kubernetes/kubelet.conf exists)"
fi
;;
esac
}
run_kubeadm_join() {
log "running kubeadm join..."
case "$JOIN_KIND" in
worker)
kubeadm join "${API_SERVER_ENDPOINT}" \
--token "${BOOTSTRAP_TOKEN}" \
--discovery-token-ca-cert-hash "${DISCOVERY_TOKEN_CA_CERT_HASH}" \
--node-name "${NODE_NAME}" \
--cri-socket "${CONTAINER_RUNTIME_ENDPOINT}"
;;
control-plane)
kubeadm join "${API_SERVER_ENDPOINT}" \
--token "${BOOTSTRAP_TOKEN}" \
--discovery-token-ca-cert-hash "${DISCOVERY_TOKEN_CA_CERT_HASH}" \
--control-plane \
--certificate-key "${CONTROL_PLANE_CERT_KEY}" \
--apiserver-advertise-address "${APISERVER_ADVERTISE_ADDRESS}" \
--node-name "${NODE_NAME}" \
--cri-socket "${CONTAINER_RUNTIME_ENDPOINT}"
;;
esac
}
generate_kube_proxy_config_block() {
if [ -z "${KUBE_PROXY_NODEPORT_ADDRESSES:-}" ]; then
return 0
fi
if [ "$KUBE_PROXY_NODEPORT_ADDRESSES" = "primary" ]; then
cat <<EOF
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
nodePortAddresses:
- primary
EOF
return 0
fi
echo "---"
echo "apiVersion: kubeproxy.config.k8s.io/v1alpha1"
echo "kind: KubeProxyConfiguration"
echo "nodePortAddresses:"
old_ifs="$IFS"
IFS=','
for item in $KUBE_PROXY_NODEPORT_ADDRESSES; do
trimmed="$(printf '%s' "$item" | sed 's/^ *//;s/ *$//')"
[ -n "$trimmed" ] && printf ' - "%s"\n' "$trimmed"
done
IFS="$old_ifs"
}
generate_kubeadm_config() {
log "generating kubeadm config at $KUBEADM_CONFIG_OUT..."
SAN_LINES=""
if [ -n "${SANS:-}" ]; then
old_ifs="$IFS"
IFS=','
for san in $SANS; do
san_trimmed="$(echo "$san" | sed 's/^ *//;s/ *$//')"
[ -n "$san_trimmed" ] && SAN_LINES="${SAN_LINES} - \"${san_trimmed}\"
"
done
IFS="$old_ifs"
fi
cat > "$KUBEADM_CONFIG_OUT" <<EOF
apiVersion: kubeadm.k8s.io/v1beta4
kind: InitConfiguration
localAPIEndpoint:
advertiseAddress: ${APISERVER_ADVERTISE_ADDRESS}
bindPort: 6443
nodeRegistration:
name: ${NODE_NAME}
criSocket: ${CONTAINER_RUNTIME_ENDPOINT}
imagePullPolicy: IfNotPresent
kubeletExtraArgs:
- name: hostname-override
value: "${NODE_NAME}"
- name: node-ip
value: "${APISERVER_ADVERTISE_ADDRESS}"
- name: pod-manifest-path
value: "/etc/kubernetes/manifests"
---
apiVersion: kubeadm.k8s.io/v1beta4
kind: ClusterConfiguration
clusterName: ${CLUSTER_NAME}
kubernetesVersion: ${KUBERNETES_VERSION}
networking:
podSubnet: ${POD_SUBNET}
serviceSubnet: ${SERVICE_SUBNET}
dnsDomain: ${CLUSTER_DOMAIN}
apiServer:
certSANs:
- "${APISERVER_ADVERTISE_ADDRESS}"
${SAN_LINES}---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
cgroupDriver: cgroupfs
containerRuntimeEndpoint: ${CONTAINER_RUNTIME_ENDPOINT}
EOF
generate_kube_proxy_config_block >> "$KUBEADM_CONFIG_OUT"
}
run_kubeadm_init() {
log "running kubeadm init..."
kubeadm init --config "$KUBEADM_CONFIG_OUT"
}
require_local_ip() {
wanted_ip="$1"
ip -o addr show | awk '{print $4}' | cut -d/ -f1 | grep -Fx "$wanted_ip" >/dev/null 2>&1 \
|| fail "required local IP is not present on any interface: $wanted_ip"
}
validate_network_requirements() {
case "$BOOTSTRAP_MODE" in
init)
require_local_ip "$APISERVER_ADVERTISE_ADDRESS"
;;
join)
require_local_ip "$APISERVER_ADVERTISE_ADDRESS"
check_apiserver_reachable
;;
*)
fail "unsupported BOOTSTRAP_MODE: $BOOTSTRAP_MODE"
;;
esac
}
wait_for_node() {
log "waiting for node registration: $NODE_NAME"
for _ in $(seq 1 60); do
if kubectl --kubeconfig /etc/kubernetes/admin.conf get node "$NODE_NAME" >/dev/null 2>&1; then
return 0
fi
sleep 1
done
fail "node $NODE_NAME did not register in time"
}
apply_local_node_metadata_if_possible() {
if [ "$BOOTSTRAP_MODE" != "init" ]; then
log "skipping node labels/annotations from this node (not control-plane init mode)"
return 0
fi
wait_for_node
if [ -n "${NODE_ANNOTATIONS:-}" ]; then
kubectl --kubeconfig /etc/kubernetes/admin.conf annotate node "$NODE_NAME" $(printf '%s' "$NODE_ANNOTATIONS" | tr ',' ' ') --overwrite
fi
if [ -n "${NODE_LABELS:-}" ]; then
kubectl --kubeconfig /etc/kubernetes/admin.conf label node "$NODE_NAME" $(printf '%s' "$NODE_LABELS" | tr ',' ' ') --overwrite
fi
}
install_cni_if_requested() {
case "${CNI_PLUGIN}" in
none)
if [ -f /etc/cni/net.d/10-crio-bridge.conflist ]; then
mv /etc/cni/net.d/10-crio-bridge.conflist \
/etc/cni/net.d/10-crio-bridge.conflist.disabled
fi
log "bootstrap bridge CNI disabled; install a cluster CNI (e.g., flannel) for pod networking"
;;
bridge)
if [ -f /etc/cni/net.d/10-crio-bridge.conflist.disabled ]; then
mv /etc/cni/net.d/10-crio-bridge.conflist.disabled \
/etc/cni/net.d/10-crio-bridge.conflist
fi
log "bootstrap bridge CNI enabled"
;;
*)
fail "unsupported CNI_PLUGIN: ${CNI_PLUGIN}"
;;
esac
}
allow_single_node_scheduling() {
if [ "$ALLOW_SCHEDULING_ON_CONTROL_PLANE" != "yes" ]; then
log "leaving control-plane taint in place"
return 0
fi
log "removing control-plane taint so this single node can schedule workloads..."
kubectl --kubeconfig /etc/kubernetes/admin.conf taint nodes "$NODE_NAME" node-role.kubernetes.io/control-plane- >/dev/null 2>&1 || true
}
print_next_steps() {
echo
echo "[monok8s] bootstrap complete"
echo
case "$BOOTSTRAP_MODE" in
init)
cat <<EOF
Notes:
- On a fresh cluster without a CNI, nodes may stay NotReady.
- If you want pods to run on this same node, keep ALLOW_SCHEDULING_ON_CONTROL_PLANE=yes.
- kube-proxy nodePortAddresses is set to: ${KUBE_PROXY_NODEPORT_ADDRESSES:-<unset>}
EOF
;;
join)
cat <<EOF
This node has attempted to join the cluster.
Check from the control-plane node:
kubectl get nodes -o wide
kubectl describe node ${NODE_NAME}
Notes:
- This node can join without a CNI.
- Without a cluster CNI, the node may remain NotReady.
EOF
;;
esac
}
main() {
load_config
validate_config
check_prereqs
validate_network_requirements
install_cni_if_requested
start_crio
check_crio_running
wait_for_existing_cluster_if_needed
decide_bootstrap_action
case "$BOOTSTRAP_ACTION" in
init)
check_required_images
generate_kubeadm_config
run_kubeadm_init
restart_kubelet
apply_local_node_metadata_if_possible
allow_single_node_scheduling
;;
reconcile-control-plane)
reconcile_control_plane
;;
upgrade-control-plane)
check_upgrade_prereqs
check_required_images
generate_kubeadm_config
run_kubeadm_upgrade_apply
restart_kubelet
apply_local_node_metadata_if_possible
allow_single_node_scheduling
;;
join)
run_kubeadm_join
;;
reconcile-node)
reconcile_node
;;
upgrade-node)
check_upgrade_prereqs
run_kubeadm_upgrade_node
restart_kubelet
;;
*)
fail "unsupported BOOTSTRAP_ACTION: $BOOTSTRAP_ACTION"
;;
esac
print_next_steps
}
main "$@"

View File

@@ -1,7 +1,9 @@
package initcmd
import (
"bufio"
"fmt"
"os"
"sort"
"strconv"
"strings"
@@ -12,18 +14,28 @@ import (
"undecided.project/monok8s/pkg/bootstrap"
"undecided.project/monok8s/pkg/config"
types "undecided.project/monok8s/pkg/apis/monok8s/v1alpha1"
"undecided.project/monok8s/pkg/templates"
)
func NewCmdInit(_ *genericclioptions.ConfigFlags) *cobra.Command {
var configPath string
var envFile string
cmd := &cobra.Command{
Use: "init [list|STEPSEL]",
Short: "Start the bootstrap process for this node",
Use: "init [list|STEPSEL] [--config path | --env-file path]",
Short: "Bootstrap this node (from config file or env file)",
Long: `Run the node bootstrap process.
You can provide configuration in two ways:
--config PATH Load MonoKSConfig YAML
--env-file PATH Load MKS_* variables from env file and render config
STEPSEL allows running specific steps instead of the full sequence.
It supports:
Supported formats:
3 Run step 3
1-3 Run steps 1 through 3
@@ -33,23 +45,54 @@ It supports:
9-10,15 Combine ranges and individual steps
`,
Example: `
ctl init
# Run full bootstrap using config file
ctl init --config /etc/monok8s/config.yaml
# Run full bootstrap using env file
ctl init --env-file /opt/monok8s/config/cluster.env
# List steps
ctl init list
ctl init 1-3
ctl init -3
ctl init 3-
ctl init 1,3,5
ctl init 9-10,15
# Run selected steps
ctl init 1-3 --env-file /opt/monok8s/config/cluster.env
ctl init 3- --config /etc/monok8s/config.yaml
`,
Args: cobra.MaximumNArgs(1),
RunE: func(cmd *cobra.Command, args []string) error {
path, err := (config.Loader{}).ResolvePath(configPath)
if err != nil {
return err
if strings.TrimSpace(configPath) != "" && strings.TrimSpace(envFile) != "" {
return fmt.Errorf("--config and --env-file are mutually exclusive")
}
cfg, err := (config.Loader{}).Load(path)
if err != nil {
return err
if strings.TrimSpace(envFile) != "" {
if err := loadEnvFile(envFile); err != nil {
return fmt.Errorf("load env file %q: %w", envFile, err)
}
}
var cfg *types.MonoKSConfig // or value, depending on your API
switch {
case strings.TrimSpace(envFile) != "":
if err := loadEnvFile(envFile); err != nil {
return fmt.Errorf("load env file %q: %w", envFile, err)
}
vals := templates.LoadTemplateValuesFromEnv()
rendered := templates.DefaultMonoKSConfig(vals)
cfg = &rendered
default:
path, err := (config.Loader{}).ResolvePath(configPath)
if err != nil {
return err
}
loaded, err := (config.Loader{}).Load(path)
if err != nil {
return err
}
cfg = loaded
klog.InfoS("starting init", "config", path, "node", cfg.Spec.NodeName, "envFile", envFile)
}
runner := bootstrap.NewRunner(cfg)
@@ -59,18 +102,15 @@ It supports:
fmt.Fprintln(cmd.OutOrStdout(), "Showing current bootstrap sequence")
// width = number of digits of max step number
width := len(fmt.Sprintf("%d", len(steps)))
for i, s := range steps {
fmt.Fprintf(cmd.OutOrStdout(), "\n %*d. %s\n", width, i+1, s.Name)
fmt.Fprintf(cmd.OutOrStdout(), " %s\n", s.Desc)
}
return err
return nil
}
klog.InfoS("starting init", "config", path, "node", cfg.Spec.NodeName)
if len(args) == 0 {
return runner.Init(cmd.Context())
}
@@ -87,9 +127,60 @@ It supports:
}
cmd.Flags().StringVarP(&configPath, "config", "c", "", "path to MonoKSConfig yaml")
cmd.Flags().StringVar(&envFile, "env-file", "", "path to env file containing MKS_* variables")
return cmd
}
func loadEnvFile(path string) error {
f, err := os.Open(path)
if err != nil {
return err
}
defer f.Close()
scanner := bufio.NewScanner(f)
lineNum := 0
for scanner.Scan() {
lineNum++
line := strings.TrimSpace(scanner.Text())
if line == "" || strings.HasPrefix(line, "#") {
continue
}
key, val, ok := strings.Cut(line, "=")
if !ok {
return fmt.Errorf("line %d: expected KEY=VALUE", lineNum)
}
key = strings.TrimSpace(key)
val = strings.TrimSpace(val)
if key == "" {
return fmt.Errorf("line %d: empty variable name", lineNum)
}
// Remove matching single or double quotes around the whole value.
if len(val) >= 2 {
if (val[0] == '"' && val[len(val)-1] == '"') || (val[0] == '\'' && val[len(val)-1] == '\'') {
val = val[1 : len(val)-1]
}
}
if err := os.Setenv(key, val); err != nil {
return fmt.Errorf("line %d: set %q: %w", lineNum, key, err)
}
}
if err := scanner.Err(); err != nil {
return err
}
return nil
}
func parseStepSelection(raw string, max int) (bootstrap.StepSelection, error) {
raw = strings.TrimSpace(raw)
if raw == "" {

View File

@@ -7,11 +7,12 @@ import (
"k8s.io/apimachinery/pkg/runtime/serializer/json"
"undecided.project/monok8s/pkg/scheme"
tmpl "undecided.project/monok8s/pkg/templates"
"undecided.project/monok8s/pkg/templates"
)
func RenderMonoKSConfig() (string, error) {
cfg := tmpl.DefaultMonoKSConfig()
vals := templates.LoadTemplateValuesFromEnv()
cfg := templates.DefaultMonoKSConfig(vals)
s := runtime.NewScheme()
if err := scheme.AddToScheme(s); err != nil {
@@ -19,9 +20,7 @@ func RenderMonoKSConfig() (string, error) {
}
serializer := json.NewYAMLSerializer(
json.DefaultMetaFactory,
s,
s,
json.DefaultMetaFactory, s, s,
)
var buf bytes.Buffer
@@ -33,7 +32,8 @@ func RenderMonoKSConfig() (string, error) {
}
func RenderOSUpgrade() (string, error) {
cfg := tmpl.DefaultOSUpgrade()
vals := templates.LoadTemplateValuesFromEnv()
cfg := templates.DefaultOSUpgrade(vals)
s := runtime.NewScheme()
if err := scheme.AddToScheme(s); err != nil {
@@ -41,9 +41,7 @@ func RenderOSUpgrade() (string, error) {
}
serializer := json.NewYAMLSerializer(
json.DefaultMetaFactory,
s,
s,
json.DefaultMetaFactory, s, s,
)
var buf bytes.Buffer

View File

@@ -1,26 +1,13 @@
package templates
import (
"os"
"strings"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "undecided.project/monok8s/pkg/apis/monok8s/v1alpha1"
buildinfo "undecided.project/monok8s/pkg/buildinfo"
)
var ValAPIServerEndPoint string = "10.0.0.10:6443"
var ValHostname string = "monoks-master-1"
var ValBootstrapToken string = "abcd12.ef3456789abcdef0"
var ValDiscoveryTokenCACertHash string = "sha256:9f1c2b3a4d5e6f7890abc1234567890abcdef1234567890abcdef1234567890ab"
func init() {
ValBootstrapToken = os.Getenv("HOSTNAME")
ValBootstrapToken = os.Getenv("BOOTSTRAP_TOKEN")
ValDiscoveryTokenCACertHash = os.Getenv("TOKEN_CACERT_HASH")
ValAPIServerEndPoint = os.Getenv("API_SERVER_ENDPOINT")
}
func DefaultMonoKSConfig() types.MonoKSConfig {
func DefaultMonoKSConfig(v TemplateValues) types.MonoKSConfig {
return types.MonoKSConfig{
TypeMeta: metav1.TypeMeta{
APIVersion: "monok8s.io/v1alpha1",
@@ -31,66 +18,51 @@ func DefaultMonoKSConfig() types.MonoKSConfig {
Namespace: "kube-system",
},
Spec: types.MonoKSConfigSpec{
KubernetesVersion: buildinfo.Version,
NodeName: ValHostname,
KubernetesVersion: v.KubernetesVersion,
NodeName: firstNonEmpty(v.NodeName, v.Hostname),
ClusterRole: "control-plane",
InitControlPlane: true,
ClusterRole: clusterRoleFromTemplateValues(v),
InitControlPlane: initControlPlaneFromTemplateValues(v),
ClusterName: "monok8s",
ClusterDomain: "cluster.local",
ClusterName: v.ClusterName,
ClusterDomain: v.ClusterDomain,
PodSubnet: "10.244.0.0/16",
ServiceSubnet: "10.96.0.0/12",
PodSubnet: v.PodSubnet,
ServiceSubnet: v.ServiceSubnet,
APIServerAdvertiseAddress: "10.0.0.10",
APIServerEndpoint: ValAPIServerEndPoint,
APIServerAdvertiseAddress: v.APIServerAdvertiseAddress,
APIServerEndpoint: v.APIServerEndpoint,
// Fake token and hash for placeholder purpose
BootstrapToken: ValBootstrapToken,
DiscoveryTokenCACertHash: ValDiscoveryTokenCACertHash,
BootstrapToken: v.BootstrapToken,
DiscoveryTokenCACertHash: v.DiscoveryTokenCACertHash,
ContainerRuntimeEndpoint: "unix:///var/run/crio/crio.sock",
ContainerRuntimeEndpoint: v.ContainerRuntimeEndpoint,
CNIPlugin: v.CNIPlugin,
CNIPlugin: "default",
AllowSchedulingOnControlPlane: true,
SkipImageCheck: false,
AllowSchedulingOnControlPlane: v.AllowSchedulingOnControlPlane,
SkipImageCheck: v.SkipImageCheck,
KubeProxyNodePortAddresses: []string{
"primary",
},
SubjectAltNames: []string{
"10.0.0.10", "localhost", ValHostname,
},
NodeLabels: map[string]string{
"monok8s.io/label": "value",
},
NodeAnnotations: map[string]string{
"monok8s.io/annotation": "value",
},
SubjectAltNames: copyStringSlice(v.SubjectAltNames),
NodeLabels: copyStringMap(v.NodeLabels),
NodeAnnotations: copyStringMap(v.NodeAnnotations),
Network: types.NetworkSpec{
Hostname: "monok8s-worker-1",
ManagementIface: "eth1",
ManagementCIDR: "10.0.0.10/24",
ManagementGW: "10.0.0.1",
DNSNameservers: []string{
"1.1.1.1",
"8.8.8.8",
},
DNSSearchDomains: []string{
"lan",
},
Hostname: firstNonEmpty(v.Hostname, v.NodeName),
ManagementIface: v.MgmtIface,
ManagementCIDR: v.MgmtAddress,
ManagementGW: v.MgmtGateway,
DNSNameservers: copyStringSlice(v.DNSNameservers),
DNSSearchDomains: copyStringSlice(v.DNSSearchDomains),
},
},
}
}
func DefaultOSUpgrade() types.OSUpgrade {
func DefaultOSUpgrade(v TemplateValues) types.OSUpgrade {
return types.OSUpgrade{
TypeMeta: metav1.TypeMeta{
APIVersion: "monok8s.io/v1alpha1",
@@ -105,9 +77,56 @@ func DefaultOSUpgrade() types.OSUpgrade {
ImageURL: "https://example.invalid/images/monok8s-v0.0.1.img.zst",
TargetPartition: "B",
NodeSelector: []string{
ValHostname,
firstNonEmpty(v.NodeName, v.Hostname),
},
Force: false,
},
}
}
func clusterRoleFromTemplateValues(v TemplateValues) string {
switch strings.ToLower(strings.TrimSpace(v.BootstrapMode)) {
case "init":
return "control-plane"
case "join":
if strings.EqualFold(strings.TrimSpace(v.JoinKind), "control-plane") {
return "control-plane"
}
return "worker"
default:
return "control-plane"
}
}
func initControlPlaneFromTemplateValues(v TemplateValues) bool {
return strings.EqualFold(strings.TrimSpace(v.BootstrapMode), "init")
}
func firstNonEmpty(xs ...string) string {
for _, x := range xs {
if strings.TrimSpace(x) != "" {
return strings.TrimSpace(x)
}
}
return ""
}
func copyStringSlice(in []string) []string {
if len(in) == 0 {
return nil
}
out := make([]string, len(in))
copy(out, in)
return out
}
func copyStringMap(in map[string]string) map[string]string {
if len(in) == 0 {
return nil
}
out := make(map[string]string, len(in))
for k, v := range in {
out[k] = v
}
return out
}

View File

@@ -0,0 +1,204 @@
package templates
import (
"os"
"strings"
buildinfo "undecided.project/monok8s/pkg/buildinfo"
)
type TemplateValues struct {
Hostname string
NodeName string
KubernetesVersion string
ClusterName string
ClusterDomain string
PodSubnet string
ServiceSubnet string
APIServerAdvertiseAddress string
APIServerEndpoint string
BootstrapToken string
DiscoveryTokenCACertHash string
ControlPlaneCertKey string
ContainerRuntimeEndpoint string
CNIPlugin string
BootstrapMode string // init, join
JoinKind string // worker, control-plane
AllowSchedulingOnControlPlane bool
SkipImageCheck bool
MgmtIface string
MgmtAddress string
MgmtGateway string
DNSNameservers []string
DNSSearchDomains []string
SubjectAltNames []string
NodeLabels map[string]string
NodeAnnotations map[string]string
}
func defaultTemplateValues() TemplateValues {
return TemplateValues{
Hostname: "monok8s-master-1",
NodeName: "monok8s-master-1",
KubernetesVersion: buildinfo.Version,
ClusterName: "monok8s",
ClusterDomain: "cluster.local",
PodSubnet: "10.244.0.0/16",
ServiceSubnet: "10.96.0.0/12",
APIServerAdvertiseAddress: "10.0.0.10",
APIServerEndpoint: "10.0.0.10:6443",
BootstrapToken: "abcd12.ef3456789abcdef0",
DiscoveryTokenCACertHash: "sha256:9f1c2b3a4d5e6f7890abc1234567890abcdef1234567890abcdef1234567890ab",
ControlPlaneCertKey: "",
ContainerRuntimeEndpoint: "unix:///var/run/crio/crio.sock",
CNIPlugin: "default",
BootstrapMode: "init",
JoinKind: "worker",
AllowSchedulingOnControlPlane: true,
SkipImageCheck: false,
MgmtIface: "eth1",
MgmtAddress: "10.0.0.10/24",
MgmtGateway: "10.0.0.1",
DNSNameservers: []string{"1.1.1.1", "8.8.8.8"},
DNSSearchDomains: []string{"lan"},
SubjectAltNames: []string{"10.0.0.10", "localhost", "monok8s-master-1"},
NodeLabels: map[string]string{
"monok8s.io/label": "value",
},
NodeAnnotations: map[string]string{
"monok8s.io/annotation": "value",
},
}
}
func LoadTemplateValuesFromEnv() TemplateValues {
v := defaultTemplateValues()
v.Hostname = getenvDefault("MKS_HOSTNAME", v.Hostname)
v.NodeName = getenvDefault("MKS_NODE_NAME", v.Hostname)
v.KubernetesVersion = getenvDefault("MKS_KUBERNETES_VERSION", v.KubernetesVersion)
v.ClusterName = getenvDefault("MKS_CLUSTER_NAME", v.ClusterName)
v.ClusterDomain = getenvDefault("MKS_CLUSTER_DOMAIN", v.ClusterDomain)
v.PodSubnet = getenvDefault("MKS_POD_SUBNET", v.PodSubnet)
v.ServiceSubnet = getenvDefault("MKS_SERVICE_SUBNET", v.ServiceSubnet)
v.APIServerAdvertiseAddress = getenvDefault("MKS_APISERVER_ADVERTISE_ADDRESS", v.APIServerAdvertiseAddress)
v.APIServerEndpoint = getenvDefault("MKS_API_SERVER_ENDPOINT", v.APIServerEndpoint)
v.BootstrapToken = getenvDefault("MKS_BOOTSTRAP_TOKEN", v.BootstrapToken)
v.DiscoveryTokenCACertHash = getenvDefault("MKS_DISCOVERY_TOKEN_CA_CERT_HASH", v.DiscoveryTokenCACertHash)
v.ControlPlaneCertKey = getenvDefault("MKS_CONTROL_PLANE_CERT_KEY", v.ControlPlaneCertKey)
v.ContainerRuntimeEndpoint = getenvDefault("MKS_CONTAINER_RUNTIME_ENDPOINT", v.ContainerRuntimeEndpoint)
v.CNIPlugin = getenvDefault("MKS_CNI_PLUGIN", v.CNIPlugin)
v.BootstrapMode = getenvDefault("MKS_BOOTSTRAP_MODE", v.BootstrapMode)
v.JoinKind = getenvDefault("MKS_JOIN_KIND", v.JoinKind)
v.AllowSchedulingOnControlPlane = getenvBoolDefault("MKS_ALLOW_SCHEDULING_ON_CONTROL_PLANE", v.AllowSchedulingOnControlPlane)
v.SkipImageCheck = getenvBoolDefault("MKS_SKIP_IMAGE_CHECK", v.SkipImageCheck)
v.MgmtIface = getenvDefault("MKS_MGMT_IFACE", v.MgmtIface)
v.MgmtAddress = getenvDefault("MKS_MGMT_ADDRESS", v.MgmtAddress)
v.MgmtGateway = getenvDefault("MKS_MGMT_GATEWAY", v.MgmtGateway)
if xs := splitWhitespaceList(os.Getenv("MKS_DNS_NAMESERVERS")); len(xs) > 0 {
v.DNSNameservers = xs
}
if xs := splitWhitespaceList(os.Getenv("MKS_DNS_SEARCH_DOMAINS")); len(xs) > 0 {
v.DNSSearchDomains = xs
}
if xs := splitCommaList(os.Getenv("MKS_SANS")); len(xs) > 0 {
v.SubjectAltNames = xs
}
if m := parseKeyValueMap(os.Getenv("MKS_NODE_LABELS")); len(m) > 0 {
v.NodeLabels = m
}
if m := parseKeyValueMap(os.Getenv("MKS_NODE_ANNOTATIONS")); len(m) > 0 {
v.NodeAnnotations = m
}
return v
}
func getenvDefault(key, def string) string {
s := strings.TrimSpace(os.Getenv(key))
if s == "" {
return def
}
return s
}
func getenvBoolDefault(key string, def bool) bool {
s := strings.TrimSpace(strings.ToLower(os.Getenv(key)))
if s == "" {
return def
}
switch s {
case "1", "true", "yes", "y", "on":
return true
case "0", "false", "no", "n", "off":
return false
default:
return def
}
}
func splitCommaList(s string) []string {
if strings.TrimSpace(s) == "" {
return nil
}
parts := strings.Split(s, ",")
out := make([]string, 0, len(parts))
for _, p := range parts {
p = strings.TrimSpace(p)
if p != "" {
out = append(out, p)
}
}
return out
}
func splitWhitespaceList(s string) []string {
if strings.TrimSpace(s) == "" {
return nil
}
return strings.Fields(s)
}
func parseKeyValueMap(s string) map[string]string {
out := map[string]string{}
if strings.TrimSpace(s) == "" {
return out
}
for _, item := range strings.Split(s, ",") {
item = strings.TrimSpace(item)
if item == "" {
continue
}
k, val, ok := strings.Cut(item, "=")
if !ok {
continue
}
k = strings.TrimSpace(k)
val = strings.TrimSpace(val)
if k == "" {
continue
}
out[k] = val
}
return out
}

View File

@@ -1,44 +1,58 @@
# Required
KUBERNETES_VERSION=v1.35.3
NODE_NAME=monok8s-master
APISERVER_ADVERTISE_ADDRESS=192.168.1.50
## Host config
MKS_HOSTNAME=monok8s-master
MKS_NODE_NAME=monok8s-master
# RJ45 Ports from left to right (interface name inside the OS)
# - eth1, eth2, eth0
MKS_MGMT_IFACE=eth1
MKS_MGMT_ADDRESS=10.0.0.14/24
MKS_MGMT_GATEWAY=10.0.0.1
# Space-separated real upstream DNS servers.
# Do NOT put the cluster DNS Service IP here.
MKS_DNS_NAMESERVERS="10.0.0.1 1.1.1.1 1.0.0.1"
# Optional. Space-separated.
MKS_DNS_SEARCH_DOMAINS="lan"
## k8s config
MKS_KUBERNETES_VERSION=v1.35.3
MKS_APISERVER_ADVERTISE_ADDRESS=10.0.0.14
# Optional but strongly recommended
CLUSTER_NAME=monok8s
POD_SUBNET=10.244.0.0/16
SERVICE_SUBNET=10.96.0.0/12
CLUSTER_DOMAIN=cluster.local
MKS_CLUSTER_NAME=monok8s
MKS_POD_SUBNET=10.244.0.0/16
MKS_SERVICE_SUBNET=10.96.0.0/12
MKS_CLUSTER_DOMAIN=cluster.local
# Bootstrap mode: init, join
BOOTSTRAP_MODE=init
MKS_BOOTSTRAP_MODE=init
# For join mode: worker, control-plane
JOIN_KIND=worker
API_SERVER_ENDPOINT=
BOOTSTRAP_TOKEN=
DISCOVERY_TOKEN_CA_CERT_HASH=
CONTROL_PLANE_CERT_KEY=
MKS_JOIN_KIND=worker
MKS_API_SERVER_ENDPOINT=
MKS_BOOTSTRAP_TOKEN=
MKS_DISCOVERY_TOKEN_CA_CERT_HASH=
MKS_CONTROL_PLANE_CERT_KEY=
# none: Install manually
# bridge: CRIO's default bridge CNI
CNI_PLUGIN=none
# none: install manually
# default|bridge: CRI-O default bridge CNI
MKS_CNI_PLUGIN=none
# Node registration metadata
NODE_LABELS=topology.kubernetes.io/zone=lab,node.kubernetes.io/instance-type=mono-gateway
NODE_ANNOTATIONS=mono.si/board=ls1046a,mono.si/image-version=dev
# Comma-separated key=value pairs
MKS_NODE_LABELS=topology.kubernetes.io/zone=lab,node.kubernetes.io/instance-type=mono-gateway
MKS_NODE_ANNOTATIONS=mono.si/board=ls1046a,mono.si/image-version=dev
# Optional
# Extra API server SANs, comma-separated
SANS=127.0.0.1,localhost,monok8s-master
# Single-node mode: allow workloads on the control plane
ALLOW_SCHEDULING_ON_CONTROL_PLANE=yes
MKS_SANS=127.0.0.1,localhost,monok8s-master,10.0.0.14
# CRI-O socket
CONTAINER_RUNTIME_ENDPOINT=unix:///var/run/crio/crio.sock
MKS_CONTAINER_RUNTIME_ENDPOINT=unix:///var/run/crio/crio.sock
# Usually leave this alone
KUBECONFIG_USER_HOME=/root
# Optional
MKS_ALLOW_SCHEDULING_ON_CONTROL_PLANE=true
# Emergency override only
SKIP_IMAGE_CHECK=no
MKS_SKIP_IMAGE_CHECK=no

View File

@@ -1,14 +0,0 @@
HOSTNAME=monok8s-master
# RJ45 Ports from left to right (interface name inside the OS)
# - eth1, eth2, eth0
MGMT_IFACE=eth0
MGMT_ADDRESS=10.0.0.13/24
MGMT_GATEWAY=10.0.0.1
# Space-separated real upstream DNS servers.
# Do NOT put the cluster DNS Service IP here.
DNS_NAMESERVERS="10.0.0.1 1.1.1.1 1.0.0.1"
# Optional
DNS_SEARCH_DOMAINS="lan"

View File

@@ -238,18 +238,7 @@ check-functions:
@echo "Missing functions:"
@comm -23 /tmp/called.txt /tmp/defined.txt || true
# ---- node targets ------------------------------------------------------------
node-config: $(NODE_ENV_DEFAULT) $(SCRIPTS_DIR)/merge-env.sh | $(OUT_DIR)
sh $(SCRIPTS_DIR)/merge-env.sh $(NODE_ENV_DEFAULT) $(NODE_ENV)
node-defconfig: $(NODE_ENV_DEFAULT) | $(OUT_DIR)
cp $(NODE_ENV_DEFAULT) $(NODE_ENV)
node-print:
@cat $(NODE_ENV)
# ---- cluster targets ------------------------------------------------------------
# ---- config targets ------------------------------------------------------------
cluster-config: $(CLUSTER_ENV_DEFAULT) $(SCRIPTS_DIR)/merge-env.sh | $(OUT_DIR)
sh $(SCRIPTS_DIR)/merge-env.sh $(CLUSTER_ENV_DEFAULT) $(CLUSTER_ENV)