Added kubeadm init

This commit is contained in:
2026-03-30 01:31:38 +08:00
parent 5fbc2846a1
commit 210fabdcc6
7 changed files with 419 additions and 102 deletions

View File

@@ -22,29 +22,29 @@ func NewRegistry(ctx *node.NodeContext) *Registry {
return &Registry{
steps: map[string]node.Step{
"validate_network_requirements": node.ValidateNetworkRequirements,
"detect_local_cluster_state": node.DetectLocalClusterState,
"classify_bootstrap_action": node.ClassifyBootstrapAction,
"configure_default_cni": node.ConfigureDefaultCNI,
"start_crio": node.StartCRIO,
"wait_for_existing_cluster_if_needed": node.WaitForExistingClusterIfNeeded,
"validate_required_images": node.ValidateRequiredImagesPresent,
"generate_kubeadm_config": node.GenerateKubeadmConfig,
"run_kubeadm_init": node.RunKubeadmInit,
"apply_local_node_metadata_if_possible": node.ApplyLocalNodeMetadataIfPossible,
"allow_single_node_scheduling": node.AllowSingleNodeScheduling,
"ensure_ip_forward": node.EnsureIPForward,
"configure_hostname": node.ConfigureHostname(netCfg),
"configure_mgmt_interface": node.ConfigureMgmtInterface(netCfg),
"configure_dns": node.ConfigureDNS(netCfg),
"set_hostname_if_needed": node.SetHostnameIfNeeded,
"print_summary": node.PrintSummary,
"reconcile_control_plane": node.ReconcileControlPlane,
"check_upgrade_prereqs": node.CheckUpgradePrereqs,
"run_kubeadm_upgrade_apply": node.RunKubeadmUpgradeApply,
"run_kubeadm_join": node.RunKubeadmJoin,
"reconcile_node": node.ReconcileNode,
"run_kubeadm_upgrade_node": node.RunKubeadmUpgradeNode,
"AllowSingleNodeScheduling": node.AllowSingleNodeScheduling,
"ApplyLocalNodeMetadataIfPossible": node.ApplyLocalNodeMetadataIfPossible,
"CheckUpgradePrereqs": node.CheckUpgradePrereqs,
"ClassifyBootstrapAction": node.ClassifyBootstrapAction,
"ConfigureDNS": node.ConfigureDNS(netCfg),
"ConfigureDefaultCNI": node.ConfigureDefaultCNI,
"ConfigureHostname": node.ConfigureHostname(netCfg),
"ConfigureMgmtInterface": node.ConfigureMgmtInterface(netCfg),
"DetectLocalClusterState": node.DetectLocalClusterState,
"EnsureIPForward": node.EnsureIPForward,
"GenerateKubeadmConfig": node.GenerateKubeadmConfig,
"PrintSummary": node.PrintSummary,
"ReconcileControlPlane": node.ReconcileControlPlane,
"ReconcileNode": node.ReconcileNode,
"RunKubeadmInit": node.RunKubeadmInit,
"RunKubeadmJoin": node.RunKubeadmJoin,
"RunKubeadmUpgradeApply": node.RunKubeadmUpgradeApply,
"RunKubeadmUpgradeNode": node.RunKubeadmUpgradeNode,
"SetHostnameIfNeeded": node.SetHostnameIfNeeded,
"StartCRIO": node.StartCRIO,
"ValidateNodeIPAndAPIServerReachability": node.ValidateNodeIPAndAPIServerReachability,
"ValidateRequiredImagesPresent": node.ValidateRequiredImagesPresent,
"WaitForExistingClusterIfNeeded": node.WaitForExistingClusterIfNeeded,
},
}
}

View File

@@ -37,112 +37,107 @@ func NewRunner(cfg *monov1alpha1.MonoKSConfig) *Runner {
Registry: NewRegistry(nctx),
initSteps: []StepInfo{
{
RegKey: "configure_hostname",
RegKey: "ConfigureHostname",
Name: "Configure hostname",
Desc: "Set system hostname according to cluster configuration",
},
{
RegKey: "configure_mgmt_interface",
RegKey: "ConfigureMgmtInterface",
Name: "Configure management interface",
Desc: "Configure management network interface, IP address, and gateway",
},
{
RegKey: "configure_dns",
RegKey: "ConfigureDNS",
Name: "Configure DNS",
Desc: "Set system DNS resolver configuration for cluster and external access",
},
{
RegKey: "ensure_ip_forward",
RegKey: "EnsureIPForward",
Name: "Ensure IP forwarding",
Desc: "Enable kernel IP forwarding required for pod networking",
},
{
RegKey: "configure_default_cni",
RegKey: "ConfigureDefaultCNI",
Name: "Configure default CNI",
Desc: "Install or configure default container networking (CNI bridge, IPAM, etc.)",
},
{
RegKey: "start_crio",
RegKey: "StartCRIO",
Name: "Start CRI-O runtime",
Desc: "Start container runtime and verify it is ready for Kubernetes workloads",
},
{
RegKey: "validate_required_images",
RegKey: "ValidateRequiredImagesPresent",
Name: "Validate required images",
Desc: "Ensure all required Kubernetes images are present or available locally",
},
{
RegKey: "validate_network_requirements",
Name: "Validate network requirements",
Desc: "Ensure required kernel networking features (iptables/nftables, bridge, forwarding) are available",
RegKey: "ValidateNodeIPAndAPIServerReachability",
Name: "Validate Node IP and wether API Server is available",
Desc: "Verify the local ip address with the api server advertise address. Contact remote api server",
},
{
RegKey: "detect_local_cluster_state",
RegKey: "DetectLocalClusterState",
Name: "Detect local cluster state",
Desc: "Inspect local node to determine existing Kubernetes membership and configuration",
},
{
RegKey: "classify_bootstrap_action",
RegKey: "ClassifyBootstrapAction",
Name: "Classify bootstrap action",
Desc: "Decide whether to init, join, upgrade, or reconcile based on local state and desired version",
},
{
RegKey: "wait_for_existing_cluster_if_needed",
RegKey: "WaitForExistingClusterIfNeeded",
Name: "Wait for existing cluster",
Desc: "Block until control plane is reachable when joining or reconciling an existing cluster",
},
{
RegKey: "generate_kubeadm_config",
Name: "Generate kubeadm config",
Desc: "Render kubeadm configuration for init, join, or upgrade operations",
},
{
RegKey: "apply_local_node_metadata_if_possible",
Name: "Apply node metadata",
Desc: "Apply labels/annotations to the local node if API server is reachable",
},
{
RegKey: "allow_single_node_scheduling",
Name: "Allow single-node scheduling",
Desc: "Remove control-plane taints to allow workloads on single-node clusters",
},
{
RegKey: "reconcile_control_plane",
RegKey: "ReconcileControlPlane",
Name: "Reconcile control plane",
Desc: "Ensure control plane components match desired state without full reinitialization",
},
{
RegKey: "check_upgrade_prereqs",
RegKey: "CheckUpgradePrereqs",
Name: "Check upgrade prerequisites",
Desc: "Validate cluster state and version compatibility before upgrade",
},
{
RegKey: "run_kubeadm_upgrade_apply",
RegKey: "RunKubeadmUpgradeApply",
Name: "Run kubeadm upgrade apply",
Desc: "Upgrade control plane components using kubeadm",
},
{
RegKey: "run_kubeadm_init",
RegKey: "RunKubeadmInit",
Name: "Run kubeadm init",
Desc: "Initialize a new Kubernetes control plane using kubeadm",
},
{
RegKey: "run_kubeadm_join",
RegKey: "RunKubeadmJoin",
Name: "Run kubeadm join",
Desc: "Join node to existing cluster as worker or control-plane",
},
{
RegKey: "reconcile_node",
RegKey: "ReconcileNode",
Name: "Reconcile node state",
Desc: "Ensure node configuration matches desired state after join or upgrade",
},
{
RegKey: "run_kubeadm_upgrade_node",
RegKey: "RunKubeadmUpgradeNode",
Name: "Run kubeadm upgrade node",
Desc: "Upgrade node components (kubelet, config) to match control plane",
},
{
RegKey: "print_summary",
RegKey: "ApplyLocalNodeMetadataIfPossible",
Name: "Apply node metadata",
Desc: "Apply labels/annotations to the local node if API server is reachable",
},
{
RegKey: "AllowSingleNodeScheduling",
Name: "Allow single-node scheduling",
Desc: "Remove control-plane taints to allow workloads on single-node clusters",
},
{
RegKey: "PrintSummary",
Name: "Print summary",
Desc: "Output final bootstrap summary and detected state",
},

View File

@@ -20,14 +20,27 @@ func NewCmdInit(_ *genericclioptions.ConfigFlags) *cobra.Command {
cmd := &cobra.Command{
Use: "init [list|STEPSEL]",
Short: "Start the bootstrap process for this node",
Example: strings.TrimSpace(`
Long: `Run the node bootstrap process.
STEPSEL allows running specific steps instead of the full sequence.
It supports:
3 Run step 3
1-3 Run steps 1 through 3
-3 Run steps from start through 3
3- Run steps from 3 to the end
1,3,5 Run specific steps
9-10,15 Combine ranges and individual steps
`,
Example: `
ctl init
ctl init list
ctl init 1-3
ctl init -3
ctl init 3-
ctl init 1,3,5
`),
ctl init 9-10,15
`,
Args: cobra.MaximumNArgs(1),
RunE: func(cmd *cobra.Command, args []string) error {
path, err := (config.Loader{}).ResolvePath(configPath)
@@ -68,6 +81,7 @@ func NewCmdInit(_ *genericclioptions.ConfigFlags) *cobra.Command {
return err
}
klog.InfoS("Running selected init steps", "steps", sel.Indices)
return runner.InitSelected(cmd.Context(), sel)
},
}
@@ -87,26 +101,27 @@ func parseStepSelection(raw string, max int) (bootstrap.StepSelection, error) {
selected := map[int]struct{}{}
parts := strings.Split(raw, ",")
for _, part := range parts {
part = strings.TrimSpace(part)
if part == "" {
for _, item := range strings.Split(raw, ",") {
item = strings.TrimSpace(item)
if item == "" {
return bootstrap.StepSelection{}, fmt.Errorf("invalid empty selector in %q", raw)
}
if strings.Contains(part, "-") {
if strings.Count(part, "-") != 1 {
return bootstrap.StepSelection{}, fmt.Errorf("invalid range %q", part)
// Range or open-ended range
if strings.Contains(item, "-") {
if strings.Count(item, "-") != 1 {
return bootstrap.StepSelection{}, fmt.Errorf("invalid range %q", item)
}
bounds := strings.SplitN(part, "-", 2)
left := strings.TrimSpace(bounds[0])
right := strings.TrimSpace(bounds[1])
parts := strings.SplitN(item, "-", 2)
left := strings.TrimSpace(parts[0])
right := strings.TrimSpace(parts[1])
var start, end int
switch {
case left == "" && right == "":
return bootstrap.StepSelection{}, fmt.Errorf("invalid range %q", part)
return bootstrap.StepSelection{}, fmt.Errorf("invalid range %q", item)
case left == "":
n, err := parseStepNumber(right, max)
@@ -131,11 +146,10 @@ func parseStepSelection(raw string, max int) (bootstrap.StepSelection, error) {
if err != nil {
return bootstrap.StepSelection{}, err
}
start, end = a, b
if a > b {
return bootstrap.StepSelection{}, fmt.Errorf("invalid descending range %q", item)
}
if start > end {
return bootstrap.StepSelection{}, fmt.Errorf("invalid descending range %q", part)
start, end = a, b
}
for i := start; i <= end; i++ {
@@ -144,20 +158,21 @@ func parseStepSelection(raw string, max int) (bootstrap.StepSelection, error) {
continue
}
n, err := parseStepNumber(part, max)
// Single step
n, err := parseStepNumber(item, max)
if err != nil {
return bootstrap.StepSelection{}, err
}
selected[n] = struct{}{}
}
out := make([]int, 0, len(selected))
indices := make([]int, 0, len(selected))
for n := range selected {
out = append(out, n)
indices = append(indices, n)
}
sort.Ints(out)
sort.Ints(indices)
return bootstrap.StepSelection{Indices: out}, nil
return bootstrap.StepSelection{Indices: indices}, nil
}
func parseStepNumber(raw string, max int) (int, error) {

View File

@@ -1,6 +1,7 @@
package node
import (
"bytes"
"context"
"errors"
"fmt"
@@ -9,16 +10,20 @@ import (
"strings"
"time"
"gopkg.in/yaml.v3"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/discovery"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/clientcmd"
"k8s.io/klog/v2"
system "undecided.project/monok8s/pkg/system"
)
const (
adminKubeconfigPath = "/etc/kubernetes/admin.conf"
kubeletKubeconfigPath = "/etc/kubernetes/kubelet.conf"
tmpKubeadmInitConf = "/tmp/kubeadm-init.yaml"
)
func DetectLocalClusterState(ctx context.Context, nctx *NodeContext) error {
@@ -64,11 +69,16 @@ func DetectLocalClusterState(ctx context.Context, nctx *NodeContext) error {
}
func WaitForExistingClusterIfNeeded(ctx context.Context, nctx *NodeContext) error {
if nctx.LocalClusterState == nil {
return errors.New("LocalClusterState is nil, please run dependency step first")
}
switch nctx.LocalClusterState.MembershipKind {
case LocalMembershipFresh:
klog.V(4).Infof("Nothing to to do LocalMembershipFresh")
return nil
case LocalMembershipExistingWorker:
klog.V(4).Infof("Starting Kubelet in LocalMembershipExistingWorker")
if err := StartKubelet(ctx, nctx); err != nil {
return fmt.Errorf("start kubelet: %w", err)
}
@@ -79,6 +89,7 @@ func WaitForExistingClusterIfNeeded(ctx context.Context, nctx *NodeContext) erro
return fmt.Errorf("start kubelet: %w", err)
}
klog.V(4).Infof("Waiting for local apiserver in LocalMembershipExistingControlPlane")
// Existing local control-plane state: wait for local apiserver if this
// machine is meant to be a control-plane node.
if strings.TrimSpace(nctx.Config.Spec.ClusterRole) == "control-plane" {
@@ -92,7 +103,6 @@ func WaitForExistingClusterIfNeeded(ctx context.Context, nctx *NodeContext) erro
return nil
case LocalMembershipPartial:
// Be strict here. Partial state is suspicious.
return fmt.Errorf("partial local cluster state detected: admin=%t kubelet=%t",
nctx.LocalClusterState.HasAdminKubeconfig,
nctx.LocalClusterState.HasKubeletKubeconfig,
@@ -543,20 +553,224 @@ func normalizeKubeVersion(v string) string {
return v
}
func GenerateKubeadmConfig(context.Context, *NodeContext) error {
klog.Info("generate_kubeadm_config: TODO render kubeadm v1beta4 config from MonoKSConfig")
func GenerateKubeadmConfig(_ context.Context, nctx *NodeContext) error {
if nctx == nil {
return fmt.Errorf("node context is nil")
}
spec := nctx.Config.Spec
advertiseAddress := strings.TrimSpace(spec.APIServerAdvertiseAddress)
if advertiseAddress == "" {
return fmt.Errorf("api server advertise address is required")
}
nodeName := strings.TrimSpace(spec.NodeName)
if nodeName == "" {
return fmt.Errorf("node name is required")
}
criSocket := strings.TrimSpace(spec.ContainerRuntimeEndpoint)
if criSocket == "" {
return fmt.Errorf("container runtime endpoint is required")
}
clusterName := strings.TrimSpace(spec.ClusterName)
if clusterName == "" {
return fmt.Errorf("cluster name is required")
}
kubernetesVersion := strings.TrimSpace(spec.KubernetesVersion)
if kubernetesVersion == "" {
return fmt.Errorf("kubernetes version is required")
}
podSubnet := strings.TrimSpace(spec.PodSubnet)
if podSubnet == "" {
return fmt.Errorf("pod subnet is required")
}
serviceSubnet := strings.TrimSpace(spec.ServiceSubnet)
if serviceSubnet == "" {
return fmt.Errorf("service subnet is required")
}
clusterDomain := strings.TrimSpace(spec.ClusterDomain)
if clusterDomain == "" {
return fmt.Errorf("cluster domain is required")
}
certSANs := []string{advertiseAddress}
seen := map[string]struct{}{
advertiseAddress: {},
}
for _, raw := range spec.SubjectAltNames {
san := strings.TrimSpace(raw)
if san == "" {
continue
}
if _, ok := seen[san]; ok {
continue
}
seen[san] = struct{}{}
certSANs = append(certSANs, san)
}
type kubeadmInitConfiguration struct {
APIVersion string `yaml:"apiVersion"`
Kind string `yaml:"kind"`
LocalAPIEndpoint struct {
AdvertiseAddress string `yaml:"advertiseAddress"`
BindPort int `yaml:"bindPort"`
} `yaml:"localAPIEndpoint"`
NodeRegistration struct {
Name string `yaml:"name"`
CRISocket string `yaml:"criSocket"`
ImagePullPolicy string `yaml:"imagePullPolicy"`
KubeletExtraArgs []struct {
Name string `yaml:"name"`
Value string `yaml:"value"`
} `yaml:"kubeletExtraArgs"`
} `yaml:"nodeRegistration"`
}
type kubeadmClusterConfiguration struct {
APIVersion string `yaml:"apiVersion"`
Kind string `yaml:"kind"`
ClusterName string `yaml:"clusterName"`
KubernetesVersion string `yaml:"kubernetesVersion"`
Networking struct {
PodSubnet string `yaml:"podSubnet"`
ServiceSubnet string `yaml:"serviceSubnet"`
DNSDomain string `yaml:"dnsDomain"`
} `yaml:"networking"`
APIServer struct {
CertSANs []string `yaml:"certSANs"`
} `yaml:"apiServer"`
}
type kubeletConfiguration struct {
APIVersion string `yaml:"apiVersion"`
Kind string `yaml:"kind"`
CgroupDriver string `yaml:"cgroupDriver"`
ContainerRuntimeEndpoint string `yaml:"containerRuntimeEndpoint"`
}
initCfg := kubeadmInitConfiguration{
APIVersion: "kubeadm.k8s.io/v1beta4",
Kind: "InitConfiguration",
}
initCfg.LocalAPIEndpoint.AdvertiseAddress = advertiseAddress
initCfg.LocalAPIEndpoint.BindPort = 6443
initCfg.NodeRegistration.Name = nodeName
initCfg.NodeRegistration.CRISocket = criSocket
initCfg.NodeRegistration.ImagePullPolicy = "IfNotPresent"
initCfg.NodeRegistration.KubeletExtraArgs = []struct {
Name string `yaml:"name"`
Value string `yaml:"value"`
}{
{Name: "hostname-override", Value: nodeName},
{Name: "node-ip", Value: advertiseAddress},
{Name: "pod-manifest-path", Value: "/etc/kubernetes/manifests"},
}
clusterCfg := kubeadmClusterConfiguration{
APIVersion: "kubeadm.k8s.io/v1beta4",
Kind: "ClusterConfiguration",
ClusterName: clusterName,
KubernetesVersion: kubernetesVersion,
}
clusterCfg.Networking.PodSubnet = podSubnet
clusterCfg.Networking.ServiceSubnet = serviceSubnet
clusterCfg.Networking.DNSDomain = clusterDomain
clusterCfg.APIServer.CertSANs = certSANs
kubeletCfg := kubeletConfiguration{
APIVersion: "kubelet.config.k8s.io/v1beta1",
Kind: "KubeletConfiguration",
CgroupDriver: "cgroupfs",
ContainerRuntimeEndpoint: criSocket,
}
var docs [][]byte
for _, doc := range []any{initCfg, clusterCfg, kubeletCfg} {
b, err := yaml.Marshal(doc)
if err != nil {
return fmt.Errorf("marshal kubeadm config document: %w", err)
}
docs = append(docs, bytes.TrimSpace(b))
}
var buf bytes.Buffer
for i, doc := range docs {
if i > 0 {
buf.WriteString("\n---\n")
}
buf.Write(doc)
buf.WriteByte('\n')
}
rendered := buf.String()
if err := os.WriteFile(tmpKubeadmInitConf, []byte(rendered), 0o600); err != nil {
return fmt.Errorf("write kubeadm config to %s: %w", tmpKubeadmInitConf, err)
}
klog.V(4).Infof("generated kubeadm config at %s:\n%s", tmpKubeadmInitConf, rendered)
return nil
}
func RunKubeadmInit(context.Context, *NodeContext) error {
klog.Info("run_kubeadm_init: TODO implement kubeadm init --config <file>")
return nil
func RunKubeadmInit(ctx context.Context, nctx *NodeContext) error {
if err := GenerateKubeadmConfig(ctx, nctx); err != nil {
return err
}
_, err := nctx.SystemRunner.RunWithOptions(
ctx,
"kubeadm",
[]string{"init", "--config", tmpKubeadmInitConf},
system.RunOptions{
Timeout: 10 * time.Minute,
OnStdoutLine: func(line string) { klog.Infof("[kubeadm] %s", line) },
OnStderrLine: func(line string) { klog.Infof("[kubeadm] %s", line) },
},
)
return err
}
func RunKubeadmUpgradeApply(context.Context, *NodeContext) error {
klog.Info("run_kubeadm_upgrade_apply: TODO implement kubeadm upgrade apply")
return nil
}
func RunKubeadmJoin(context.Context, *NodeContext) error {
klog.Info("run_kubeadm_join: TODO implement kubeadm join")
/*
run_kubeadm_join() {
log "running kubeadm join..."
case "$JOIN_KIND" in
worker)
kubeadm join "${API_SERVER_ENDPOINT}" \
--token "${BOOTSTRAP_TOKEN}" \
--discovery-token-ca-cert-hash "${DISCOVERY_TOKEN_CA_CERT_HASH}" \
--node-name "${NODE_NAME}" \
--cri-socket "${CONTAINER_RUNTIME_ENDPOINT}"
;;
control-plane)
kubeadm join "${API_SERVER_ENDPOINT}" \
--token "${BOOTSTRAP_TOKEN}" \
--discovery-token-ca-cert-hash "${DISCOVERY_TOKEN_CA_CERT_HASH}" \
--control-plane \
--certificate-key "${CONTROL_PLANE_CERT_KEY}" \
--apiserver-advertise-address "${APISERVER_ADVERTISE_ADDRESS}" \
--node-name "${NODE_NAME}" \
--cri-socket "${CONTAINER_RUNTIME_ENDPOINT}"
;;
esac
}
*/
return nil
}
func RunKubeadmUpgradeNode(context.Context, *NodeContext) error {

View File

@@ -10,7 +10,7 @@ import (
"k8s.io/klog/v2"
)
func ValidateNetworkRequirements(ctx context.Context, nct *NodeContext) error {
func ValidateNodeIPAndAPIServerReachability(ctx context.Context, nct *NodeContext) error {
requireLocalIP := func(wantedIP string) error {
wantedIP = strings.TrimSpace(wantedIP)
if wantedIP == "" {

View File

@@ -1,6 +1,7 @@
package system
import (
"bufio"
"bytes"
"context"
"errors"
@@ -54,6 +55,10 @@ type RunOptions struct {
Stderr io.Writer
Quiet bool
RedactEnv []string
// Optional line hooks. Called for each complete line seen on stdout/stderr.
OnStdoutLine func(line string)
OnStderrLine func(line string)
}
type RetryOptions struct {
@@ -86,23 +91,30 @@ func (r *Runner) RunWithOptions(ctx context.Context, name string, args []string,
var stdoutBuf bytes.Buffer
var stderrBuf bytes.Buffer
stdoutW := io.Writer(&stdoutBuf)
stderrW := io.Writer(&stderrBuf)
stdoutDst := io.Writer(&stdoutBuf)
stderrDst := io.Writer(&stderrBuf)
if opt.Stdout != nil {
stdoutW = io.MultiWriter(stdoutW, opt.Stdout)
stdoutDst = io.MultiWriter(stdoutDst, opt.Stdout)
} else if r.cfg.StreamOutput && !opt.Quiet {
stdoutW = io.MultiWriter(stdoutW, os.Stdout)
stdoutDst = io.MultiWriter(stdoutDst, os.Stdout)
}
if opt.Stderr != nil {
stderrW = io.MultiWriter(stderrW, opt.Stderr)
stderrDst = io.MultiWriter(stderrDst, opt.Stderr)
} else if r.cfg.StreamOutput && !opt.Quiet {
stderrW = io.MultiWriter(stderrW, os.Stderr)
stderrDst = io.MultiWriter(stderrDst, os.Stderr)
}
cmd.Stdout = stdoutW
cmd.Stderr = stderrW
if opt.OnStdoutLine != nil {
stdoutDst = newLineHookWriter(stdoutDst, opt.OnStdoutLine)
}
if opt.OnStderrLine != nil {
stderrDst = newLineHookWriter(stderrDst, opt.OnStderrLine)
}
cmd.Stdout = stdoutDst
cmd.Stderr = stderrDst
start := time.Now()
if r.cfg.Logger != nil {
@@ -110,6 +122,13 @@ func (r *Runner) RunWithOptions(ctx context.Context, name string, args []string,
}
err := cmd.Run()
if hw, ok := stdoutDst.(interface{ Flush() }); ok {
hw.Flush()
}
if hw, ok := stderrDst.(interface{ Flush() }); ok {
hw.Flush()
}
end := time.Now()
res := &Result{
@@ -344,3 +363,61 @@ func (l *StdLogger) Printf(format string, args ...any) {
defer l.mu.Unlock()
fmt.Fprintf(os.Stderr, format+"\n", args...)
}
type lineHookWriter struct {
dst io.Writer
fn func(string)
mu sync.Mutex
buf bytes.Buffer
}
func newLineHookWriter(dst io.Writer, fn func(string)) *lineHookWriter {
return &lineHookWriter{
dst: dst,
fn: fn,
}
}
func (w *lineHookWriter) Write(p []byte) (int, error) {
w.mu.Lock()
defer w.mu.Unlock()
n, err := w.dst.Write(p)
// Keep line processing separate from dst write result.
w.buf.Write(p)
w.flushCompleteLinesLocked()
return n, err
}
func (w *lineHookWriter) flushCompleteLinesLocked() {
r := bufio.NewReader(&w.buf)
var pending bytes.Buffer
for {
line, err := r.ReadString('\n')
if err == io.EOF {
pending.WriteString(line)
break
}
line = strings.TrimRight(line, "\r\n")
w.fn(line)
}
w.buf.Reset()
_, _ = w.buf.Write(pending.Bytes())
}
// Flush emits the final unterminated line, if any.
// Not strictly required for kubeadm, but useful and correct.
func (w *lineHookWriter) Flush() {
w.mu.Lock()
defer w.mu.Unlock()
if w.buf.Len() == 0 {
return
}
w.fn(strings.TrimRight(w.buf.String(), "\r\n"))
w.buf.Reset()
}

View File

@@ -1,11 +1,23 @@
package templates
import (
"os"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "undecided.project/monok8s/pkg/apis/monok8s/v1alpha1"
buildinfo "undecided.project/monok8s/pkg/buildinfo"
)
var ValHostname string = "monoks-master-1"
var ValBootstrapToken string = "abcd12.ef3456789abcdef0"
var ValDiscoveryTokenCACertHash string = "sha256:9f1c2b3a4d5e6f7890abc1234567890abcdef1234567890abcdef1234567890ab"
func init() {
ValBootstrapToken = os.Getenv("HOSTNAME")
ValBootstrapToken = os.Getenv("BOOTSTRAP_TOKEN")
ValDiscoveryTokenCACertHash = os.Getenv("TOKEN_CACERT_HASH")
}
func DefaultMonoKSConfig() types.MonoKSConfig {
return types.MonoKSConfig{
TypeMeta: metav1.TypeMeta{
@@ -18,7 +30,7 @@ func DefaultMonoKSConfig() types.MonoKSConfig {
},
Spec: types.MonoKSConfigSpec{
KubernetesVersion: buildinfo.Version,
NodeName: "monok8s-master-1",
NodeName: ValHostname,
ClusterRole: "control-plane",
InitControlPlane: true,
@@ -32,6 +44,10 @@ func DefaultMonoKSConfig() types.MonoKSConfig {
APIServerAdvertiseAddress: "10.0.0.10",
APIServerEndpoint: "10.0.0.10:6443",
// Fake token and hash for placeholder purpose
BootstrapToken: ValBootstrapToken,
DiscoveryTokenCACertHash: ValDiscoveryTokenCACertHash,
ContainerRuntimeEndpoint: "unix:///var/run/crio/crio.sock",
CNIPlugin: "default",
@@ -44,17 +60,17 @@ func DefaultMonoKSConfig() types.MonoKSConfig {
},
SubjectAltNames: []string{
"10.0.0.10",
"10.0.0.10", "localhost", ValHostname,
},
NodeLabels: map[string]string{
"node-role.kubernetes.io/control-plane": "",
"monok8s.io/label": "label",
},
NodeAnnotations: map[string]string{},
Network: types.NetworkSpec{
Hostname: "monok8s-master-1",
Hostname: "monok8s-worker-1",
ManagementIface: "eth1",
ManagementCIDR: "10.0.0.10/24",
ManagementGW: "10.0.0.1",
@@ -85,7 +101,7 @@ func DefaultOSUpgrade() types.OSUpgrade {
ImageURL: "https://example.invalid/images/monok8s-v0.0.1.img.zst",
TargetPartition: "B",
NodeSelector: []string{
"monok8s-master-1",
ValHostname,
},
Force: false,
},