Now we properly preloaded the images without load at boot

This commit is contained in:
2026-03-24 21:16:21 +08:00
parent 58da0aada6
commit 81d192d577
3 changed files with 148 additions and 125 deletions

View File

@@ -4,103 +4,117 @@ set -euo pipefail
: "${KUBE_IMG_CACHE:?KUBE_IMG_CACHE is required}"
: "${KUBE_VERSION:?KUBE_VERSION is required}"
: "${ARCH:?ARCH is required}"
: "${ROOTFS:?ROOTFS is required}"
OS=linux
REGISTRY_PORT=5000
# Keep everything version/arch scoped so caches do not get mixed.
# IMPORTANT:
# This must match what the target system will use at runtime.
# If your target CRI-O uses overlay, leave this as overlay.
# If overlay seeding fails in your build environment, fix the build env;
# do not silently switch to vfs unless the target will also use vfs.
STORAGE_DRIVER="${STORAGE_DRIVER:-overlay}"
# Optional. Needed by many overlay-in-container setups.
FUSE_OVERLAYFS="${FUSE_OVERLAYFS:-/usr/bin/fuse-overlayfs}"
# Keep archive cache version/arch scoped so downloads do not get mixed.
ARCHIVE_DIR="${KUBE_IMG_CACHE}/archives/${ARCH}/${KUBE_VERSION}"
REGISTRY_DATA_DIR="${KUBE_IMG_CACHE}/registry-data/${ARCH}/${KUBE_VERSION}"
REGISTRY_TAR="${KUBE_IMG_CACHE}/registry.tar"
REGISTRY_IMAGE="docker.io/library/registry:2"
if podman image exists "${REGISTRY_IMAGE}"; then
echo "Registry image already present: ${REGISTRY_IMAGE}"
# Seed directly into the target rootfs image store.
TARGET_GRAPHROOT="${ROOTFS}/var/lib/containers/storage"
elif [ -f "${REGISTRY_TAR}" ]; then
echo "Loading registry image from cache: ${REGISTRY_TAR}"
podman load -i "${REGISTRY_TAR}"
else
echo "Cache miss → pulling and saving registry image"
podman pull "${REGISTRY_IMAGE}"
podman save -o "${REGISTRY_TAR}" "${REGISTRY_IMAGE}"
fi
mkdir -p "${ARCHIVE_DIR}"
rm -rf "${REGISTRY_DATA_DIR}"
mkdir -p "${REGISTRY_DATA_DIR}"
echo "============================================================"
echo "Preparing Kubernetes image cache"
echo " KUBE_VERSION = ${KUBE_VERSION}"
echo " ARCH = ${ARCH}"
echo " ARCHIVE_DIR = ${ARCHIVE_DIR}"
echo " REGISTRY_DATA_DIR = ${REGISTRY_DATA_DIR}"
echo "============================================================"
# Start a temporary local registry backed by REGISTRY_DATA_DIR.
# We use host network here to avoid messing with nested networking.
podman rm -f temp-registry >/dev/null 2>&1 || true
podman run -d \
--name temp-registry \
--network host \
--cgroups=disabled \
-v "${REGISTRY_DATA_DIR}:/var/lib/registry" \
docker.io/library/registry:2
# Build-host temporary runtime state for containers/storage.
TMPDIR="$(mktemp -d)"
RUNROOT="${TMPDIR}/runroot"
STORAGE_CONF="${TMPDIR}/storage.conf"
cleanup() {
podman rm -f temp-registry >/dev/null 2>&1 || true
rm -rf "${TMPDIR}"
}
trap cleanup EXIT
# Wait for registry to answer.
for _ in $(seq 1 30); do
if curl -fsS "http://127.0.0.1:${REGISTRY_PORT}/v2/" >/dev/null; then
break
fi
sleep 1
done
mkdir -p "${ARCHIVE_DIR}"
mkdir -p "${TARGET_GRAPHROOT}"
mkdir -p "${RUNROOT}"
if ! curl -fsS "http://127.0.0.1:${REGISTRY_PORT}/v2/" >/dev/null; then
echo "Temporary registry did not start"
echo "============================================================"
echo "Preparing Kubernetes CRI-O image store"
echo " KUBE_VERSION = ${KUBE_VERSION}"
echo " ARCH = ${ARCH}"
echo " ARCHIVE_DIR = ${ARCHIVE_DIR}"
echo " TARGET_GRAPHROOT = ${TARGET_GRAPHROOT}"
echo " STORAGE_DRIVER = ${STORAGE_DRIVER}"
echo "============================================================"
# Build a temporary containers/storage config that writes directly into
# the target rootfs image store.
if [ "${STORAGE_DRIVER}" = "overlay" ]; then
cat > "${STORAGE_CONF}" <<EOF
[storage]
driver = "overlay"
runroot = "${RUNROOT}"
graphroot = "${TARGET_GRAPHROOT}"
[storage.options]
mount_program = "${FUSE_OVERLAYFS}"
EOF
else
cat > "${STORAGE_CONF}" <<EOF
[storage]
driver = "${STORAGE_DRIVER}"
runroot = "${RUNROOT}"
graphroot = "${TARGET_GRAPHROOT}"
EOF
fi
export CONTAINERS_STORAGE_CONF="${STORAGE_CONF}"
# Sanity check: list required images using the target kubeadm binary.
mapfile -t kube_images < <(
qemu-aarch64-static \
"${ROOTFS}/usr/local/bin/kubeadm" \
config images list \
--kubernetes-version "${KUBE_VERSION}"
)
if [ "${#kube_images[@]}" -eq 0 ]; then
echo "No kubeadm images returned"
exit 1
fi
# Cache and seed all kubeadm-required images.
while read -r img; do
[ -n "$img" ] || continue
for img in "${kube_images[@]}"; do
[ -n "${img}" ] || continue
safe_name=$(printf '%s' "$img" | sed 's#/#_#g; s#:#__#g')
safe_name="$(printf '%s' "${img}" | sed 's#/#_#g; s#:#__#g')"
archive="${ARCHIVE_DIR}/${safe_name}.tar"
ref="${img#registry.k8s.io/}"
if [ ! -f "$archive" ]; then
echo "Caching: $img"
if [ ! -f "${archive}" ]; then
echo "Caching archive: ${img}"
skopeo copy \
--override-os "${OS}" \
--override-arch "${ARCH}" \
"docker://$img" \
"oci-archive:$archive"
"docker://${img}" \
"oci-archive:${archive}"
else
echo "Cache hit: $img"
echo "Archive hit: ${img}"
fi
if skopeo inspect --tls-verify=false "docker://127.0.0.1:${REGISTRY_PORT}/${ref}" >/dev/null 2>&1; then
echo "Registry hit: 127.0.0.1:${REGISTRY_PORT}/${ref}"
# If already present in the target containers/storage, skip.
if skopeo inspect "containers-storage:${img}" >/dev/null 2>&1; then
echo "Store hit: ${img}"
continue
fi
echo "Seeding registry: $ref"
echo "Seeding CRI-O store: ${img}"
skopeo copy \
--dest-tls-verify=false \
"oci-archive:$archive" \
"docker://127.0.0.1:${REGISTRY_PORT}/${ref}"
done < <(qemu-aarch64-static "$ROOTFS/usr/local/bin/kubeadm" config images list --kubernetes-version "${KUBE_VERSION}")
--override-os "${OS}" \
--override-arch "${ARCH}" \
"oci-archive:${archive}" \
"containers-storage:${img}"
done
echo
echo "Done."
echo "Archives: ${ARCHIVE_DIR}"
echo "Registry data: ${REGISTRY_DATA_DIR}"
echo "Archives: ${ARCHIVE_DIR}"
echo "CRI-O storage: ${TARGET_GRAPHROOT}"

View File

@@ -1,56 +0,0 @@
#!/bin/sh
set -eu
KUBE_VERSION="${KUBE_VERSION:-v1.35.3}"
REGISTRY_ADDR="${REGISTRY_ADDR:-127.0.0.1:5000}"
mkdir -p /opt/registry
cat >/opt/registry/config.yml <<EOF
version: 0.1
log:
level: info
storage:
filesystem:
rootdirectory: /var/lib/registry
http:
addr: ${REGISTRY_ADDR}
EOF
cat >/opt/crictl.yaml <<'EOF'
runtime-endpoint: unix:///var/run/crio/crio.sock
image-endpoint: unix:///var/run/crio/crio.sock
timeout: 10
debug: false
EOF
registry serve /opt/registry/config.yml >/var/log/registry.log 2>&1 &
REG_PID=$!
cleanup() {
kill "$REG_PID" 2>/dev/null || true
}
trap cleanup EXIT INT TERM
for _ in $(seq 1 30); do
if curl -fsS "http://${REGISTRY_ADDR}/v2/_catalog" >/dev/null; then
break
fi
sleep 1
done
curl -fsS "http://${REGISTRY_ADDR}/v2/_catalog" >/dev/null
for img in $(kubeadm config images list --kubernetes-version "${KUBE_VERSION}"); do
name="${img#registry.k8s.io/}"
echo "Importing ${img} from ${REGISTRY_ADDR}/${name}"
skopeo copy --src-tls-verify=false \
"docker://${REGISTRY_ADDR}/${name}" \
"containers-storage:${img}"
done
echo "Imported images now visible to CRI-O:"
crictl images
kill "$REG_PID" 2>/dev/null || true
wait "$REG_PID" 2>/dev/null || true
trap - EXIT INT TERM

View File

@@ -0,0 +1,65 @@
#!/bin/bash
set -euo pipefail
RUNTIME_ENDPOINT="${RUNTIME_ENDPOINT:-unix:///var/run/crio/crio.sock}"
# 1. Get pause image
PAUSE_IMAGE="$(kubeadm config images list | grep pause)"
echo "Using pause image: $PAUSE_IMAGE"
# 2. Create pod sandbox config
cat > pod.json <<EOF
{
"metadata": {
"name": "pause-test",
"namespace": "default",
"attempt": 1
},
"log_directory": "/tmp",
"linux": {}
}
EOF
# 3. Create container config
cat > container.json <<EOF
{
"metadata": {
"name": "pause-container"
},
"image": {
"image": "$PAUSE_IMAGE"
},
"log_path": "pause.log",
"linux": {}
}
EOF
# 4. Run pod sandbox
POD_ID=$(crictl --runtime-endpoint "$RUNTIME_ENDPOINT" runp pod.json)
echo "Pod ID: $POD_ID"
# 5. Create container
CONTAINER_ID=$(crictl --runtime-endpoint "$RUNTIME_ENDPOINT" create "$POD_ID" container.json pod.json)
echo "Container ID: $CONTAINER_ID"
# 6. Start container
crictl --runtime-endpoint "$RUNTIME_ENDPOINT" start "$CONTAINER_ID"
# 7. Verify
echo "=== Pods ==="
crictl --runtime-endpoint "$RUNTIME_ENDPOINT" pods
echo "=== Containers ==="
crictl --runtime-endpoint "$RUNTIME_ENDPOINT" ps
# 8. Cleanup
echo "Cleaning up..."
crictl --runtime-endpoint "$RUNTIME_ENDPOINT" stop "$CONTAINER_ID" || true
crictl --runtime-endpoint "$RUNTIME_ENDPOINT" rm "$CONTAINER_ID" || true
crictl --runtime-endpoint "$RUNTIME_ENDPOINT" stopp "$POD_ID" || true
crictl --runtime-endpoint "$RUNTIME_ENDPOINT" rmp "$POD_ID" || true
rm -f pod.json container.json
echo "Done."