Update ctl init to support env file

This commit is contained in:
2026-03-30 19:33:44 +08:00
parent 60a9ffeaf6
commit d9ffd1b446
12 changed files with 450 additions and 1191 deletions

View File

@@ -11,3 +11,4 @@ rc-update add fancontrol boot
rc-update add loopback boot
rc-update add hostname boot
rc-update add localmount boot
rc-update add bootstrap-cluster default

View File

@@ -1,23 +0,0 @@
#!/sbin/openrc-run
export PATH="/usr/local/bin:/usr/local/sbin:$PATH"
name="Apply node config"
description="Apply node configurations using node.env from /opt/monok8s/config"
command="/opt/scripts/apply-node-config.sh"
LOG_DIR="/var/log/monok8s"
LOG_FILE="$LOG_DIR/apply-node-config.log"
depend() {
need localmount
}
start() {
checkpath --directory "$LOG_DIR"
ebegin "Applying node config"
"$command" >>"$LOG_FILE" 2>&1
eend $?
}

View File

@@ -2,23 +2,24 @@
export PATH="/usr/local/bin:/usr/local/sbin:$PATH"
name="Bootstrap cluster"
description="Apply cluster configurations using node.env from /opt/monok8s/config"
name="Bootstrap the Cluster"
description="Runs the ctl init bootstrap sequence"
command="/opt/scripts/bootstrap-cluster.sh"
command="/usr/local/bin/ctl"
command_args="init --env-file /opt/monok8s/config/cluster.env"
LOG_DIR="/var/log/monok8s"
LOG_FILE="$LOG_DIR/bootstrap.log"
LOG_FILE="$LOG_DIR/bootstrap-cluster.log"
depend() {
need apply-node-config
use net
need localmount
}
start() {
checkpath --directory "$LOG_DIR"
ebegin "Applying cluster config"
"$command" >>"$LOG_FILE" 2>&1 &
ebegin "Starting the bootstrap sequence"
start-stop-daemon --start \
--exec "$command" \
-- $command_args >>"$LOG_FILE" 2>&1
eend $?
}

View File

@@ -1,201 +0,0 @@
#!/bin/bash
set -euo pipefail
CONFIG_DIR="${CONFIG_DIR:-/opt/monok8s/config}"
NODE_ENV="${NODE_ENV:-$CONFIG_DIR/node.env}"
log() {
echo "[monok8s-node] $*"
}
fail() {
echo "[monok8s-node] ERROR: $*" >&2
exit 1
}
need_cmd() {
command -v "$1" >/dev/null 2>&1 || fail "missing required command: $1"
}
require_file() {
[ -f "$1" ] || fail "required file not found: $1"
}
load_config() {
require_file "$NODE_ENV"
# shellcheck disable=SC1090
. "$NODE_ENV"
HOSTNAME="${HOSTNAME:-}"
MGMT_IFACE="${MGMT_IFACE:-}"
MGMT_ADDRESS="${MGMT_ADDRESS:-}"
MGMT_GATEWAY="${MGMT_GATEWAY:-}"
DNS_NAMESERVERS="${DNS_NAMESERVERS:-}"
DNS_SEARCH_DOMAINS="${DNS_SEARCH_DOMAINS:-}"
}
validate_config() {
[ -n "$HOSTNAME" ] || fail "HOSTNAME is required"
[ -n "$MGMT_IFACE" ] || fail "MGMT_IFACE is required"
[ -n "$MGMT_ADDRESS" ] || fail "MGMT_ADDRESS is required"
case "$MGMT_ADDRESS" in
*/*)
;;
*)
fail "MGMT_ADDRESS must include a CIDR prefix, example: 10.0.0.13/24"
;;
esac
if [ -n "$DNS_NAMESERVERS" ]; then
for ns in $DNS_NAMESERVERS; do
case "$ns" in
10.96.0.10)
fail "DNS_NAMESERVERS must not contain cluster DNS service IP (10.96.0.10)"
;;
esac
done
fi
}
check_prereqs() {
# only the special one, coreutils should not be checked
need_cmd ip
need_cmd hostname
need_cmd grep
}
configure_mgmt_interface() {
local addr_ip
ip link show "$MGMT_IFACE" >/dev/null 2>&1 || fail "interface not found: $MGMT_IFACE"
log "bringing up interface: $MGMT_IFACE"
ip link set "$MGMT_IFACE" up
addr_ip="${MGMT_ADDRESS%/*}"
if ip -o addr show dev "$MGMT_IFACE" | awk '{print $4}' | cut -d/ -f1 | grep -Fx "$addr_ip" >/dev/null 2>&1; then
log "address already present on $MGMT_IFACE: $MGMT_ADDRESS"
else
log "assigning $MGMT_ADDRESS to $MGMT_IFACE"
ip addr add "$MGMT_ADDRESS" dev "$MGMT_IFACE"
fi
if [ -n "${MGMT_GATEWAY:-}" ]; then
log "ensuring default route via $MGMT_GATEWAY"
ip route replace default via "$MGMT_GATEWAY" dev "$MGMT_IFACE"
fi
}
set_hostname_if_needed() {
local current_hostname
current_hostname="$(hostname 2>/dev/null || true)"
if [ "$current_hostname" != "$HOSTNAME" ]; then
log "setting hostname to $HOSTNAME"
hostname "$HOSTNAME"
mkdir -p /etc
printf '%s\n' "$HOSTNAME" > /etc/hostname
if [ -f /etc/hosts ]; then
if ! grep -Eq "[[:space:]]$HOSTNAME([[:space:]]|$)" /etc/hosts; then
printf '127.0.0.1\tlocalhost %s\n' "$HOSTNAME" >> /etc/hosts
fi
else
cat > /etc/hosts <<EOF
127.0.0.1 localhost $HOSTNAME
EOF
fi
else
log "hostname already set: $HOSTNAME"
fi
}
ensure_ip_forward() {
local current
current="$(cat /proc/sys/net/ipv4/ip_forward 2>/dev/null || echo 0)"
if [ "$current" != "1" ]; then
log "enabling IPv4 forwarding"
echo 1 > /proc/sys/net/ipv4/ip_forward
fi
mkdir -p /etc/sysctl.d
cat > /etc/sysctl.d/99-monok8s.conf <<'EOF'
net.ipv4.ip_forward = 1
EOF
}
configure_dns() {
local tmpfile
local ns_count=0
if [ -z "$DNS_NAMESERVERS" ]; then
log "DNS_NAMESERVERS not set; leaving /etc/resolv.conf unchanged"
return
fi
mkdir -p /etc
tmpfile="/etc/resolv.conf.monok8s.tmp"
: > "$tmpfile"
if [ -n "$DNS_SEARCH_DOMAINS" ]; then
printf 'search %s\n' "$DNS_SEARCH_DOMAINS" >> "$tmpfile"
fi
for ns in $DNS_NAMESERVERS; do
printf 'nameserver %s\n' "$ns" >> "$tmpfile"
ns_count=$((ns_count + 1))
done
[ "$ns_count" -gt 0 ] || fail "DNS_NAMESERVERS is set but no valid nameservers were parsed"
printf 'options timeout:2 attempts:3\n' >> "$tmpfile"
mv "$tmpfile" /etc/resolv.conf
log "configured /etc/resolv.conf from DNS_NAMESERVERS"
}
print_summary() {
log "node configuration applied"
log "hostname: $HOSTNAME"
log "interface: $MGMT_IFACE"
log "address: $MGMT_ADDRESS"
if [ -n "${MGMT_GATEWAY:-}" ]; then
log "gateway: $MGMT_GATEWAY"
else
log "gateway: <not set>"
fi
if [ -n "${DNS_NAMESERVERS:-}" ]; then
log "dns nameservers: $DNS_NAMESERVERS"
else
log "dns nameservers: <unchanged>"
fi
if [ -n "${DNS_SEARCH_DOMAINS:-}" ]; then
log "dns search: $DNS_SEARCH_DOMAINS"
else
log "dns search: <not set>"
fi
}
main() {
load_config
validate_config
check_prereqs
ensure_ip_forward
configure_mgmt_interface
configure_dns
set_hostname_if_needed
print_summary
}
main "$@"

View File

@@ -1,820 +0,0 @@
#!/bin/sh
set -eu
CONFIG_DIR="${CONFIG_DIR:-/opt/monok8s/config}"
CLUSTER_ENV="${CONFIG_DIR}/cluster.env"
KUBEADM_CONFIG_OUT="${KUBEADM_CONFIG_OUT:-/tmp/kubeadm-init.yaml}"
ADMIN_KUBECONFIG="/etc/kubernetes/admin.conf"
KUBELET_KUBECONFIG="/etc/kubernetes/kubelet.conf"
log() {
echo "[monok8s] $*"
}
fail() {
echo "[monok8s] ERROR: $*" >&2
exit 1
}
need_cmd() {
command -v "$1" >/dev/null 2>&1 || fail "missing required command: $1"
}
require_file() {
[ -f "$1" ] || fail "required file not found: $1"
}
load_config() {
require_file "$CLUSTER_ENV"
# shellcheck disable=SC1090
. "$CLUSTER_ENV"
: "${KUBERNETES_VERSION:?KUBERNETES_VERSION is required}"
: "${NODE_NAME:?NODE_NAME is required}"
: "${APISERVER_ADVERTISE_ADDRESS:?APISERVER_ADVERTISE_ADDRESS is required}"
POD_SUBNET="${POD_SUBNET:-10.244.0.0/16}"
SERVICE_SUBNET="${SERVICE_SUBNET:-10.96.0.0/12}"
CLUSTER_NAME="${CLUSTER_NAME:-monok8s}"
CLUSTER_DOMAIN="${CLUSTER_DOMAIN:-cluster.local}"
CONTAINER_RUNTIME_ENDPOINT="${CONTAINER_RUNTIME_ENDPOINT:-unix:///var/run/crio/crio.sock}"
SANS="${SANS:-}"
ALLOW_SCHEDULING_ON_CONTROL_PLANE="${ALLOW_SCHEDULING_ON_CONTROL_PLANE:-yes}"
SKIP_IMAGE_CHECK="${SKIP_IMAGE_CHECK:-no}"
KUBE_PROXY_NODEPORT_ADDRESSES="${KUBE_PROXY_NODEPORT_ADDRESSES:-primary}"
BOOTSTRAP_MODE="${BOOTSTRAP_MODE:-init}"
JOIN_KIND="${JOIN_KIND:-worker}"
API_SERVER_ENDPOINT="${API_SERVER_ENDPOINT:-}"
BOOTSTRAP_TOKEN="${BOOTSTRAP_TOKEN:-}"
DISCOVERY_TOKEN_CA_CERT_HASH="${DISCOVERY_TOKEN_CA_CERT_HASH:-}"
CONTROL_PLANE_CERT_KEY="${CONTROL_PLANE_CERT_KEY:-}"
CNI_PLUGIN="${CNI_PLUGIN:-none}"
}
kubectl_admin() {
kubectl --kubeconfig "$ADMIN_KUBECONFIG" "$@"
}
kubectl_kubelet() {
kubectl --kubeconfig "$KUBELET_KUBECONFIG" "$@"
}
start_kubelet() {
log "starting kubelet..."
rc-service kubelet start >/dev/null 2>&1 || true
}
restart_kubelet() {
log "restarting kubelet..."
rc-service kubelet restart
}
check_kubelet_running() {
log "waiting for kubelet to become ready..."
last_status="unknown"
for _ in $(seq 1 30); do
if rc-service kubelet status >/dev/null 2>&1; then
log "kubelet is up"
return 0
fi
last_status="service-not-running"
sleep 1
done
fail "kubelet did not become ready in time (${last_status})"
}
is_local_control_plane_node() {
[ -f /etc/kubernetes/manifests/kube-apiserver.yaml ]
}
wait_for_local_apiserver() {
need_cmd nc
log "waiting for local API server on ${APISERVER_ADVERTISE_ADDRESS}:6443..."
for _ in $(seq 1 90); do
if nc -z "${APISERVER_ADVERTISE_ADDRESS}" 6443 >/dev/null 2>&1; then
log "local API server TCP port is reachable"
return 0
fi
sleep 2
done
fail "local API server did not become reachable on ${APISERVER_ADVERTISE_ADDRESS}:6443"
}
wait_for_admin_api() {
[ -f "$ADMIN_KUBECONFIG" ] || fail "missing admin kubeconfig: $ADMIN_KUBECONFIG"
log "waiting for Kubernetes API to respond via admin.conf..."
for _ in $(seq 1 90); do
if kubectl_admin version -o yaml >/dev/null 2>&1; then
log "Kubernetes API is responding"
return 0
fi
sleep 2
done
fail "Kubernetes API did not become ready in time"
}
wait_for_existing_cluster_if_needed() {
case "$BOOTSTRAP_MODE" in
init)
if [ -f "$ADMIN_KUBECONFIG" ]; then
start_kubelet
check_kubelet_running
if is_local_control_plane_node; then
wait_for_local_apiserver
fi
wait_for_admin_api
fi
;;
join)
if [ -f "$KUBELET_KUBECONFIG" ]; then
start_kubelet
check_kubelet_running
fi
;;
esac
}
get_cluster_server_version() {
kubectl_admin version -o yaml 2>/dev/null \
| awk '
$1 == "serverVersion:" { in_server=1; next }
in_server && $1 == "gitVersion:" { print $2; exit }
'
}
get_api_server_version_from_kubelet_kubeconfig() {
kubectl_kubelet version -o yaml 2>/dev/null \
| awk '
$1 == "serverVersion:" { in_server=1; next }
in_server && $1 == "gitVersion:" { print $2; exit }
'
}
validate_cidr_list_or_primary() {
value="$1"
[ -n "$value" ] || return 0
if [ "$value" = "primary" ]; then
return 0
fi
old_ifs="$IFS"
IFS=','
for item in $value; do
trimmed="$(printf '%s' "$item" | sed 's/^ *//;s/ *$//')"
[ -n "$trimmed" ] || fail "KUBE_PROXY_NODEPORT_ADDRESSES contains an empty entry"
case "$trimmed" in
*/*)
;;
*)
fail "KUBE_PROXY_NODEPORT_ADDRESSES must be 'primary' or a comma-separated list of CIDRs"
;;
esac
ip_part="${trimmed%/*}"
prefix_part="${trimmed#*/}"
printf '%s' "$prefix_part" | grep -Eq '^[0-9]+$' \
|| fail "invalid CIDR prefix in KUBE_PROXY_NODEPORT_ADDRESSES: $trimmed"
case "$ip_part" in
*:*)
printf '%s' "$prefix_part" | awk '{ exit !($1 >= 0 && $1 <= 128) }' \
|| fail "invalid IPv6 CIDR prefix in KUBE_PROXY_NODEPORT_ADDRESSES: $trimmed"
;;
*.*.*.*)
printf '%s' "$prefix_part" | awk '{ exit !($1 >= 0 && $1 <= 32) }' \
|| fail "invalid IPv4 CIDR prefix in KUBE_PROXY_NODEPORT_ADDRESSES: $trimmed"
;;
*)
fail "invalid CIDR entry in KUBE_PROXY_NODEPORT_ADDRESSES: $trimmed"
;;
esac
done
IFS="$old_ifs"
}
validate_config() {
case "$BOOTSTRAP_MODE" in
init)
;;
join)
: "${API_SERVER_ENDPOINT:?API_SERVER_ENDPOINT is required for join mode}"
: "${BOOTSTRAP_TOKEN:?BOOTSTRAP_TOKEN is required for join mode}"
: "${DISCOVERY_TOKEN_CA_CERT_HASH:?DISCOVERY_TOKEN_CA_CERT_HASH is required for join mode}"
case "$JOIN_KIND" in
worker|control-plane)
;;
*)
fail "JOIN_KIND must be 'worker' or 'control-plane'"
;;
esac
if [ "$JOIN_KIND" = "control-plane" ]; then
: "${CONTROL_PLANE_CERT_KEY:?CONTROL_PLANE_CERT_KEY is required for JOIN_KIND=control-plane}"
fi
;;
*)
fail "BOOTSTRAP_MODE must be 'init' or 'join'"
;;
esac
validate_cidr_list_or_primary "$KUBE_PROXY_NODEPORT_ADDRESSES"
}
normalize_version() {
# strip leading "v"
echo "${1#v}"
}
version_major_minor() {
normalize_version "$1" | awk -F. '{ print $1 "." $2 }'
}
version_eq() {
[ "$(normalize_version "$1")" = "$(normalize_version "$2")" ]
}
version_lt() {
[ "$(printf '%s\n%s\n' "$(normalize_version "$1")" "$(normalize_version "$2")" | sort -V | head -n1)" != "$(normalize_version "$2")" ]
}
version_gt() {
[ "$(printf '%s\n%s\n' "$(normalize_version "$1")" "$(normalize_version "$2")" | sort -V | tail -n1)" = "$(normalize_version "$1")" ] \
&& ! version_eq "$1" "$2"
}
minor_diff() {
a="$(version_major_minor "$1")"
b="$(version_major_minor "$2")"
a_major="${a%.*}"
a_minor="${a#*.}"
b_major="${b%.*}"
b_minor="${b#*.}"
[ "$a_major" = "$b_major" ] || fail "major version change unsupported here: $1 -> $2"
echo $((b_minor - a_minor))
}
get_kubeadm_binary_version() {
kubeadm version -o short
}
get_cluster_server_version() {
kubectl --kubeconfig /etc/kubernetes/admin.conf version -o yaml \
| awk '
$1 == "serverVersion:" { in_server=1; next }
in_server && $1 == "gitVersion:" { print $2; exit }
'
}
get_api_server_version_from_kubelet_kubeconfig() {
kubectl --kubeconfig /etc/kubernetes/kubelet.conf version -o yaml \
| awk '
$1 == "serverVersion:" { in_server=1; next }
in_server && $1 == "gitVersion:" { print $2; exit }
'
}
validate_target_matches_local_binaries() {
kubeadm_ver="$(get_kubeadm_binary_version)"
if ! version_eq "$kubeadm_ver" "$KUBERNETES_VERSION"; then
fail "kubeadm binary version ($kubeadm_ver) does not match target KUBERNETES_VERSION ($KUBERNETES_VERSION)"
fi
}
decide_bootstrap_action() {
current_version=""
case "$BOOTSTRAP_MODE" in
init)
if [ ! -f "$ADMIN_KUBECONFIG" ]; then
BOOTSTRAP_ACTION="init"
log "selected bootstrap action: $BOOTSTRAP_ACTION"
return 0
fi
current_version="$(get_cluster_server_version || true)"
[ -n "$current_version" ] || fail "existing control-plane config found, but cluster version could not be determined"
log "detected existing control-plane version: $current_version"
if version_eq "$current_version" "$KUBERNETES_VERSION"; then
BOOTSTRAP_ACTION="reconcile-control-plane"
else
BOOTSTRAP_ACTION="upgrade-control-plane"
fi
;;
join)
if [ ! -f "$KUBELET_KUBECONFIG" ]; then
BOOTSTRAP_ACTION="join"
log "selected bootstrap action: $BOOTSTRAP_ACTION"
return 0
fi
current_version="$(get_api_server_version_from_kubelet_kubeconfig || true)"
[ -n "$current_version" ] || fail "existing kubelet config found, but cluster version could not be determined"
log "detected cluster version visible from this node: $current_version"
if version_eq "$current_version" "$KUBERNETES_VERSION"; then
BOOTSTRAP_ACTION="reconcile-node"
else
BOOTSTRAP_ACTION="upgrade-node"
fi
;;
*)
fail "unsupported BOOTSTRAP_MODE: $BOOTSTRAP_MODE"
;;
esac
log "selected bootstrap action: $BOOTSTRAP_ACTION"
}
reconcile_control_plane() {
log "reconciling existing control-plane node"
start_kubelet
check_kubelet_running
wait_for_local_apiserver
wait_for_admin_api
apply_local_node_metadata_if_possible
allow_single_node_scheduling
}
reconcile_node() {
log "reconciling existing joined node"
start_kubelet
check_kubelet_running
}
validate_upgrade_path() {
current="$1"
target="$2"
if version_eq "$current" "$target"; then
log "cluster is already at target version: $target"
return 0
fi
if version_gt "$current" "$target"; then
fail "downgrade is not supported: current=$current target=$target"
fi
diff="$(minor_diff "$current" "$target")"
case "$diff" in
0|1)
;;
*)
fail "unsupported upgrade path: current=$current target=$target (minor skip too large)"
;;
esac
}
check_upgrade_prereqs() {
validate_target_matches_local_binaries
}
run_kubeadm_upgrade_apply() {
current_version="$(get_cluster_server_version || true)"
[ -n "$current_version" ] || fail "cannot determine current control-plane version; API server is not reachable"
log "current control-plane version: $current_version"
log "target control-plane version: $KUBERNETES_VERSION"
validate_upgrade_path "$current_version" "$KUBERNETES_VERSION"
if version_eq "$current_version" "$KUBERNETES_VERSION"; then
log "control-plane already at target version; skipping kubeadm upgrade apply"
return 0
fi
log "running kubeadm upgrade plan..."
kubeadm upgrade plan "$KUBERNETES_VERSION"
log "running kubeadm upgrade apply..."
kubeadm upgrade apply -y "$KUBERNETES_VERSION"
}
run_kubeadm_upgrade_node() {
cluster_version="$(get_api_server_version_from_kubelet_kubeconfig)"
log "cluster/control-plane version visible from this node: $cluster_version"
log "target node version: $KUBERNETES_VERSION"
if ! version_eq "$cluster_version" "$KUBERNETES_VERSION"; then
fail "control-plane version ($cluster_version) does not match target ($KUBERNETES_VERSION); upgrade control-plane first"
fi
log "running kubeadm upgrade node..."
kubeadm upgrade node
}
check_prereqs() {
need_cmd kubeadm
need_cmd kubelet
need_cmd kubectl
need_cmd crictl
need_cmd rc-service
need_cmd awk
need_cmd ip
need_cmd grep
need_cmd sed
need_cmd hostname
}
check_apiserver_reachable() {
host="${API_SERVER_ENDPOINT%:*}"
port="${API_SERVER_ENDPOINT##*:}"
need_cmd nc
log "checking API server reachability: ${host}:${port}"
for _ in $(seq 1 20); do
if nc -z "$host" "$port" >/dev/null 2>&1; then
log "API server is reachable"
return 0
fi
sleep 1
done
fail "cannot reach API server at ${host}:${port}"
}
start_crio() {
rc-service crio start
}
check_crio_running() {
log "waiting for CRI-O to become ready..."
last_status="unknown"
for _ in $(seq 1 30); do
if rc-service crio status >/dev/null 2>&1; then
last_status="service-running"
if crictl --runtime-endpoint "$CONTAINER_RUNTIME_ENDPOINT" info >/dev/null 2>&1; then
log "CRI-O is up"
return 0
fi
last_status="service-running-but-runtime-not-ready"
else
last_status="service-not-running"
fi
sleep 1
done
fail "CRI-O did not become ready in time (${last_status})"
}
image_present() {
wanted="$1"
repo="${wanted%:*}"
tag="${wanted##*:}"
crictl --runtime-endpoint "$CONTAINER_RUNTIME_ENDPOINT" images \
| awk 'NR>1 { print $1 ":" $2 }' \
| grep -Fx "$repo:$tag" >/dev/null 2>&1
}
check_required_images() {
[ "$SKIP_IMAGE_CHECK" = "yes" ] && {
log "skipping image check (SKIP_IMAGE_CHECK=yes)"
return 0
}
log "checking required Kubernetes images for $KUBERNETES_VERSION..."
missing_any=0
for img in $(kubeadm config images list --kubernetes-version "$KUBERNETES_VERSION"); do
if image_present "$img"; then
log "found image: $img"
else
echo "[monok8s] MISSING image: $img" >&2
missing_any=1
fi
done
[ "$missing_any" -eq 0 ] || fail "preload the Kubernetes images before bootstrapping"
log "all required images are present"
}
check_not_already_bootstrapped() {
case "$BOOTSTRAP_MODE" in
init)
if [ -f /etc/kubernetes/admin.conf ]; then
fail "cluster already appears initialized (/etc/kubernetes/admin.conf exists)"
fi
;;
join)
if [ -f /etc/kubernetes/kubelet.conf ]; then
fail "node already appears joined (/etc/kubernetes/kubelet.conf exists)"
fi
;;
esac
}
run_kubeadm_join() {
log "running kubeadm join..."
case "$JOIN_KIND" in
worker)
kubeadm join "${API_SERVER_ENDPOINT}" \
--token "${BOOTSTRAP_TOKEN}" \
--discovery-token-ca-cert-hash "${DISCOVERY_TOKEN_CA_CERT_HASH}" \
--node-name "${NODE_NAME}" \
--cri-socket "${CONTAINER_RUNTIME_ENDPOINT}"
;;
control-plane)
kubeadm join "${API_SERVER_ENDPOINT}" \
--token "${BOOTSTRAP_TOKEN}" \
--discovery-token-ca-cert-hash "${DISCOVERY_TOKEN_CA_CERT_HASH}" \
--control-plane \
--certificate-key "${CONTROL_PLANE_CERT_KEY}" \
--apiserver-advertise-address "${APISERVER_ADVERTISE_ADDRESS}" \
--node-name "${NODE_NAME}" \
--cri-socket "${CONTAINER_RUNTIME_ENDPOINT}"
;;
esac
}
generate_kube_proxy_config_block() {
if [ -z "${KUBE_PROXY_NODEPORT_ADDRESSES:-}" ]; then
return 0
fi
if [ "$KUBE_PROXY_NODEPORT_ADDRESSES" = "primary" ]; then
cat <<EOF
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
nodePortAddresses:
- primary
EOF
return 0
fi
echo "---"
echo "apiVersion: kubeproxy.config.k8s.io/v1alpha1"
echo "kind: KubeProxyConfiguration"
echo "nodePortAddresses:"
old_ifs="$IFS"
IFS=','
for item in $KUBE_PROXY_NODEPORT_ADDRESSES; do
trimmed="$(printf '%s' "$item" | sed 's/^ *//;s/ *$//')"
[ -n "$trimmed" ] && printf ' - "%s"\n' "$trimmed"
done
IFS="$old_ifs"
}
generate_kubeadm_config() {
log "generating kubeadm config at $KUBEADM_CONFIG_OUT..."
SAN_LINES=""
if [ -n "${SANS:-}" ]; then
old_ifs="$IFS"
IFS=','
for san in $SANS; do
san_trimmed="$(echo "$san" | sed 's/^ *//;s/ *$//')"
[ -n "$san_trimmed" ] && SAN_LINES="${SAN_LINES} - \"${san_trimmed}\"
"
done
IFS="$old_ifs"
fi
cat > "$KUBEADM_CONFIG_OUT" <<EOF
apiVersion: kubeadm.k8s.io/v1beta4
kind: InitConfiguration
localAPIEndpoint:
advertiseAddress: ${APISERVER_ADVERTISE_ADDRESS}
bindPort: 6443
nodeRegistration:
name: ${NODE_NAME}
criSocket: ${CONTAINER_RUNTIME_ENDPOINT}
imagePullPolicy: IfNotPresent
kubeletExtraArgs:
- name: hostname-override
value: "${NODE_NAME}"
- name: node-ip
value: "${APISERVER_ADVERTISE_ADDRESS}"
- name: pod-manifest-path
value: "/etc/kubernetes/manifests"
---
apiVersion: kubeadm.k8s.io/v1beta4
kind: ClusterConfiguration
clusterName: ${CLUSTER_NAME}
kubernetesVersion: ${KUBERNETES_VERSION}
networking:
podSubnet: ${POD_SUBNET}
serviceSubnet: ${SERVICE_SUBNET}
dnsDomain: ${CLUSTER_DOMAIN}
apiServer:
certSANs:
- "${APISERVER_ADVERTISE_ADDRESS}"
${SAN_LINES}---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
cgroupDriver: cgroupfs
containerRuntimeEndpoint: ${CONTAINER_RUNTIME_ENDPOINT}
EOF
generate_kube_proxy_config_block >> "$KUBEADM_CONFIG_OUT"
}
run_kubeadm_init() {
log "running kubeadm init..."
kubeadm init --config "$KUBEADM_CONFIG_OUT"
}
require_local_ip() {
wanted_ip="$1"
ip -o addr show | awk '{print $4}' | cut -d/ -f1 | grep -Fx "$wanted_ip" >/dev/null 2>&1 \
|| fail "required local IP is not present on any interface: $wanted_ip"
}
validate_network_requirements() {
case "$BOOTSTRAP_MODE" in
init)
require_local_ip "$APISERVER_ADVERTISE_ADDRESS"
;;
join)
require_local_ip "$APISERVER_ADVERTISE_ADDRESS"
check_apiserver_reachable
;;
*)
fail "unsupported BOOTSTRAP_MODE: $BOOTSTRAP_MODE"
;;
esac
}
wait_for_node() {
log "waiting for node registration: $NODE_NAME"
for _ in $(seq 1 60); do
if kubectl --kubeconfig /etc/kubernetes/admin.conf get node "$NODE_NAME" >/dev/null 2>&1; then
return 0
fi
sleep 1
done
fail "node $NODE_NAME did not register in time"
}
apply_local_node_metadata_if_possible() {
if [ "$BOOTSTRAP_MODE" != "init" ]; then
log "skipping node labels/annotations from this node (not control-plane init mode)"
return 0
fi
wait_for_node
if [ -n "${NODE_ANNOTATIONS:-}" ]; then
kubectl --kubeconfig /etc/kubernetes/admin.conf annotate node "$NODE_NAME" $(printf '%s' "$NODE_ANNOTATIONS" | tr ',' ' ') --overwrite
fi
if [ -n "${NODE_LABELS:-}" ]; then
kubectl --kubeconfig /etc/kubernetes/admin.conf label node "$NODE_NAME" $(printf '%s' "$NODE_LABELS" | tr ',' ' ') --overwrite
fi
}
install_cni_if_requested() {
case "${CNI_PLUGIN}" in
none)
if [ -f /etc/cni/net.d/10-crio-bridge.conflist ]; then
mv /etc/cni/net.d/10-crio-bridge.conflist \
/etc/cni/net.d/10-crio-bridge.conflist.disabled
fi
log "bootstrap bridge CNI disabled; install a cluster CNI (e.g., flannel) for pod networking"
;;
bridge)
if [ -f /etc/cni/net.d/10-crio-bridge.conflist.disabled ]; then
mv /etc/cni/net.d/10-crio-bridge.conflist.disabled \
/etc/cni/net.d/10-crio-bridge.conflist
fi
log "bootstrap bridge CNI enabled"
;;
*)
fail "unsupported CNI_PLUGIN: ${CNI_PLUGIN}"
;;
esac
}
allow_single_node_scheduling() {
if [ "$ALLOW_SCHEDULING_ON_CONTROL_PLANE" != "yes" ]; then
log "leaving control-plane taint in place"
return 0
fi
log "removing control-plane taint so this single node can schedule workloads..."
kubectl --kubeconfig /etc/kubernetes/admin.conf taint nodes "$NODE_NAME" node-role.kubernetes.io/control-plane- >/dev/null 2>&1 || true
}
print_next_steps() {
echo
echo "[monok8s] bootstrap complete"
echo
case "$BOOTSTRAP_MODE" in
init)
cat <<EOF
Notes:
- On a fresh cluster without a CNI, nodes may stay NotReady.
- If you want pods to run on this same node, keep ALLOW_SCHEDULING_ON_CONTROL_PLANE=yes.
- kube-proxy nodePortAddresses is set to: ${KUBE_PROXY_NODEPORT_ADDRESSES:-<unset>}
EOF
;;
join)
cat <<EOF
This node has attempted to join the cluster.
Check from the control-plane node:
kubectl get nodes -o wide
kubectl describe node ${NODE_NAME}
Notes:
- This node can join without a CNI.
- Without a cluster CNI, the node may remain NotReady.
EOF
;;
esac
}
main() {
load_config
validate_config
check_prereqs
validate_network_requirements
install_cni_if_requested
start_crio
check_crio_running
wait_for_existing_cluster_if_needed
decide_bootstrap_action
case "$BOOTSTRAP_ACTION" in
init)
check_required_images
generate_kubeadm_config
run_kubeadm_init
restart_kubelet
apply_local_node_metadata_if_possible
allow_single_node_scheduling
;;
reconcile-control-plane)
reconcile_control_plane
;;
upgrade-control-plane)
check_upgrade_prereqs
check_required_images
generate_kubeadm_config
run_kubeadm_upgrade_apply
restart_kubelet
apply_local_node_metadata_if_possible
allow_single_node_scheduling
;;
join)
run_kubeadm_join
;;
reconcile-node)
reconcile_node
;;
upgrade-node)
check_upgrade_prereqs
run_kubeadm_upgrade_node
restart_kubelet
;;
*)
fail "unsupported BOOTSTRAP_ACTION: $BOOTSTRAP_ACTION"
;;
esac
print_next_steps
}
main "$@"