一、阶段 A:在「有网打包机」制作离线包
打包机推荐同为 Rocky 9;也可用任意 x86_64 Linux。以下默认使用 dnf 和 ctr/docker 二选一抓镜像。
1.1 目录与变量
export K8S_VER="1.27.16"
export K8S_MINOR="v1.27"
export WORK="/opt/k8s-offline-${K8S_VER}"
sudo mkdir -p $WORK/{rpms,images,cni,calico,tools}
1.2 配置 Kubernetes 1.27 专属 RPM 仓库(仅打包机临时用)
#/etc/yum.repos.d/kubernetes-1.27.repo
[kubernetes-1.27]
name=Kubernetes 1.27
baseurl=https://pkgs.k8s.io/core:/stable:/v1.27/rpm/
enabled=1
gpgcheck=1
gpgkey=https://pkgs.k8s.io/core:/stable:/v1.27/rpm/repodata/repomd.xml.key
Kubernetes 从 2023 起使用 pkgs.k8s.io 的分小版本仓库,上面这个是 1.27 专用源。
1.3 下载 RPM(含依赖,供离线节点安装)
sudo dnf -y install dnf-plugins-core
# containerd / runc / 常用依赖
sudo dnf -y download --resolve --destdir=$WORK/rpms \
containerd runc conntrack-tools iptables iproute-tc ethtool socat \
tar openssl curl bash-completion
#Rocky 默认仓库里没有叫 containerd 的包,所以 dnf download 在严格模式下直接退出了 所以要加下面这步
# 安装 dnf 插件并添加 Docker CE 源(RHEL/EL9 适用)
sudo dnf -y install dnf-plugins-core
sudo dnf config-manager --add-repo https://download.docker.com/linux/rhel/docker-ce.repo
# 更新元数据
sudo dnf clean all && sudo dnf makecache
# kube 组件(固定 1.27.16)
#sudo dnf -y download --resolve --destdir=$WORK/rpms \
#kubelet-${K8S_VER} kubeadm-${K8S_VER} kubectl-${K8S_VER} \
#kubernetes-cni cri-tools
#上面别用
# 仅下载,不解析依赖
sudo dnf -y download --destdir="$WORK/rpms" \
kubelet-${K8S_VER} kubeadm-${K8S_VER} kubectl-${K8S_VER} \
kubernetes-cni cri-tools
1.4 下载 CNI 插件与 crictl 工具
# CNI plugins(官方二进制包,放到 /opt/cni/bin)
curl -L -o $WORK/cni/cni-plugins-linux-amd64-v1.3.0.tgz \
https://github.com/containernetworking/plugins/releases/download/v1.3.0/cni-plugins-linux-amd64-v1.3.0.tgz
# crictl(来自 cri-tools)
CRICTL_VER="v1.27.0" # 与集群兼容即可
curl -L -o $WORK/tools/crictl-${CRICTL_VER}-linux-amd64.tar.gz \
https://github.com/kubernetes-sigs/cri-tools/releases/download/${CRICTL_VER}/crictl-${CRICTL_VER}-linux-amd64.tar.gz
1.5 下载 Calico 清单与镜像
curl -L -o $WORK/calico/calico-v3.26.4.yaml \
https://raw.githubusercontent.com/projectcalico/calico/v3.26.4/manifests/calico.yaml
# 提取镜像名(也可手工列出)
grep -E "image: .*calico" $WORK/calico/calico-v3.26.4.yaml | awk '{print $2}' | sort -u > $WORK/images/calico-images.txt
[root@localhost ~]# cat $WORK/images/calico-images.txt
docker.io/calico/cni:v3.26.4
docker.io/calico/kube-controllers:v3.26.4
docker.io/calico/node:v3.26.4
1.6 生成 kubeadm 所需镜像清单(精确到 v1.27.16)
# 本机先临时装 kubeadm(或用容器)来打印镜像列表
sudo dnf -y install kubeadm-${K8S_VER}
kubeadm config images list --kubernetes-version v${K8S_VER} > $WORK/images/k8s-images.txt
#kubeadm config images list 是官方推荐获取离线镜像列表的方式;也支持 --config 指定自定义仓库。
1.7 拉取并打包镜像(二选一:有 Docker 或有 containerd)
# 方式 A:Docker
while read -r img; do docker pull "$img"; done < $WORK/images/k8s-images.txt
while read -r img; do docker pull "$img"; done < $WORK/images/calico-images.txt
docker save $(cat $WORK/images/k8s-images.txt $WORK/images/calico-images.txt) \
-o $WORK/images/k8s-${K8S_VER}-and-calico-v3.26.4.tar
# 方式 B:containerd(ctr)
sudo systemctl enable --now containerd || true
while read -r img; do sudo ctr -n k8s.io i pull "$img"; done < $WORK/images/k8s-images.txt
while read -r img; do sudo ctr -n k8s.io i pull "$img"; done < $WORK/images/calico-images.txt
sudo ctr -n k8s.io i export $WORK/images/k8s-${K8S_VER}-and-calico-v3.26.4.tar $(cat $WORK/images/k8s-images.txt $WORK/images/calico-images.txt)
1.8 打总包
cd $(dirname $WORK)
sudo tar czf k8s-offline-${K8S_VER}-rocky9.tar.gz $(basename $WORK)
# 把这个 tar.gz 拷贝到所有离线节点(控制面/工作节点)
二、docker安装离线包
2.1在线机器上打离线包
# 0) 变量
export WORK="/opt/docker-offline-$(date +%F)"
sudo mkdir -p "$WORK"/{rpms,images,scripts}
ARCH=$(uname -m) # 一般是 x86_64;如是 ARM64 则为 aarch64
# 1) 加 Docker 官方仓库(RHEL/EL 系列通用,Rocky 9 适用)
sudo dnf -y install dnf-plugins-core
sudo dnf config-manager --add-repo https://download.docker.com/linux/rhel/docker-ce.repo
sudo dnf clean all && sudo dnf makecache
# 2) 下载“完整功能”所需 RPM(含依赖)
PKGS="docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin docker-ce-rootless-extras"
# 用 --resolve 拉全依赖;若个别包临时不可用,strict=0 可跳过不中断
sudo dnf -y download --resolve --setopt=strict=0 \
--destdir="$WORK/rpms" --arch="$ARCH" $PKGS
# 同时把 Rootless 相关常见依赖也一并打包(如尚未被上面带下)
sudo dnf -y download --resolve --setopt=strict=0 \
--destdir="$WORK/rpms" --arch="$ARCH" \
slirp4netns fuse-overlayfs container-selinux
# 3)(可选)打基础测试镜像离线包
docker pull hello-world:latest
docker pull alpine:latest
docker pull busybox:stable
docker save hello-world:latest alpine:latest busybox:stable -o "$WORK/images/docker-base-images.tar"
# 4) 生成本地仓库元数据 + 安装脚本
sudo dnf -y install createrepo_c
createrepo_c "$WORK/rpms"
cat > "$WORK/scripts/install-offline.sh" <<"EOF"
#!/usr/bin/env bash
set -euo pipefail
DIR="$(cd "$(dirname "$0")"/.. && pwd)"
# 临时本地仓库安装方法(更稳妥)
sudo dnf -y install createrepo_c || true
sudo createrepo_c "$DIR/rpms"
sudo tee /etc/yum.repos.d/docker-offline.repo >/dev/null <<REPO
[docker-offline]
name=Docker Offline
baseurl=file://$DIR/rpms
enabled=1
gpgcheck=0
REPO
# 安装
sudo dnf -y install docker-ce docker-ce-cli containerd.io \
docker-buildx-plugin docker-compose-plugin docker-ce-rootless-extras
# 启动并开机自启
sudo systemctl enable --now docker
# 可选:把当前用户加入 docker 组(需要重新登录生效)
if id -u "$SUDO_USER" &>/dev/null; then
sudo usermod -aG docker "$SUDO_USER" || true
fi
# 导入基础镜像(如存在)
if [ -f "$DIR/images/docker-base-images.tar" ]; then
sudo docker load -i "$DIR/images/docker-base-images.tar"
fi
echo "Done. Check: docker version && docker compose version && docker buildx version"
EOF
chmod +x "$WORK/scripts/install-offline.sh"
# 5) 打一个总包
sudo tar -C "$(dirname "$WORK")" -czf "${WORK}.tar.gz" "$(basename "$WORK")"
echo "离线包已生成:${WORK}.tar.gz"
2.2 离线机器上安装
#把 ${WORK}.tar.gz 拷贝到离线主机,解压并执行脚本:
sudo tar -C /opt -xzf /path/to/docker-offline-*.tar.gz
cd /opt/docker-offline-*/scripts
#sudo ./install-offline.sh
sudo dnf -y --disablerepo='*' --nogpgcheck install \
/opt/docker-offline-2025-09-01/rpms/*.rpm
# 重新登录后验证
docker version
[root@localhost opt]# docker version
Client: Docker Engine - Community
Version: 28.3.3
API version: 1.51
Go version: go1.24.5
Git commit: 980b856
Built: Fri Jul 25 11:36:28 2025
OS/Arch: linux/amd64
Context: default
Server: Docker Engine - Community
Engine:
Version: 28.3.3
API version: 1.51 (minimum version 1.24)
Go version: go1.24.5
Git commit: bea959c
Built: Fri Jul 25 11:33:28 2025
OS/Arch: linux/amd64
Experimental: false
containerd:
Version: 1.7.27
GitCommit: 05044ec0a9a75232cad458027ca83437aae3f4da
runc:
Version: 1.2.5
GitCommit: v1.2.5-0-g59923ef
docker-init:
Version: 0.19.0
GitCommit: de40ad0
docker compose version # 注意:是 docker compose(v2 插件),不是老的 docker-compose
docker run --rm hello-world
三、阶段 B:在「离线节点」安装与初始化
3.1 系统准备(所有节点)
sudo tar xzf k8s-offline-1.27.16-rocky9.tar.gz -C /
OFF="/opt/k8s-offline-1.27.16"
hostnamectl set-hostname k8s-01
echo "192.168.30.150 k8s-01" >> /etc/hosts
ping -c1 k8s-01
swapoff -a
sed -ri 's/^\s*([^#].*\sswap\s)/#\1/' /etc/fstab
cat >/etc/sysctl.d/k8s.conf <<'EOF'
net.ipv4.ip_forward=1
net.bridge.bridge-nf-call-iptables=1
net.bridge.bridge-nf-call-ip6tables=1
EOF
sysctl --system
#先加载 IPVS 内核模块
cat >/etc/modules-load.d/ipvs.conf <<'EOF'
ip_vs
ip_vs_rr
ip_vs_wrr
ip_vs_sh
nf_conntrack
EOF
for m in ip_vs ip_vs_rr ip_vs_wrr ip_vs_sh nf_conntrack; do modprobe $m; done
3.1.1 关闭 swap(含 zram)
#Rocky 9 默认启用 zram,kubelet 需要禁用 swap:
sudo swapoff -a
# 永久:卸载 zram 生成器或禁用其单元
sudo dnf -y remove zram-generator-defaults || true
# 如有 /etc/fstab 的 swap 条目,注释掉;并确认:
lsblk | grep -E 'SWAP|zram' || true
#RHEL9/基于 systemd 的发行版一般通过 zram-generator 提供 swap;禁用/移除是官方建议之一。
3.1.2 内核模块与 sysctl(bridge/overlay/IP 转发)
# /etc/modules-load.d/k8s.conf
echo -e "overlay\nbr_netfilter" | sudo tee /etc/modules-load.d/k8s.conf
sudo modprobe overlay && sudo modprobe br_netfilter
# /etc/sysctl.d/k8s.conf
cat <<'EOF' | sudo tee /etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-iptables = 1
net.bridge.bridge-nf-call-ip6tables = 1
net.ipv4.ip_forward = 1
EOF
sudo sysctl --system
#(这些设置是 Kubernetes 官方与 Fedora/Rocky 指南里明确要求的)
3.1.3 SELinux 与防火墙
1. 建议保留 SELinux Enforcing(若遇容器标记问题可先设为 Permissive 再排障)。
2. 防火墙可开放必要端口或临时停用;端口清单见官方“Ports and Protocols”。至少:
控制面:6443/TCP(API)、2379-2380/TCP(etcd)、10250/10257/10259/TCP
所有节点:10250/TCP;CNI 端口(如 Calico VXLAN 默认 4789/UDP)等按 CNI 文档配置。
3.2 安装 RPM(离线目录直接安装)
cd $OFF/rpms
sudo dnf -y --disablerepo='*' install ./*.rpm
sudo systemctl enable --now containerd
#(--disablerepo='*' 可避免 dnf 去查线上元数据,离线时很有用)
3.2.1 安装 CNI 与 crictl
sudo mkdir -p /opt/cni/bin
sudo tar -xzf $OFF/cni/cni-plugins-linux-amd64-v1.3.0.tgz -C /opt/cni/bin
sudo tar -xzf $OFF/tools/crictl-v1.27.0-linux-amd64.tar.gz -C /usr/local/bin
3.3配置 containerd(systemd cgroup & pause 镜像)
# 生成默认配置后修改
mkdir -p /etc/containerd
containerd config default > /etc/containerd/config.toml
# 关键点:设置 SystemdCgroup=true,并确保 sandbox_image 使用我们已导入的 pause:3.9
sudo sed -i 's/SystemdCgroup = false/SystemdCgroup = true/' /etc/containerd/config.toml
sudo sed -i 's@sandbox_image = .*@sandbox_image = "registry.k8s.io/pause:3.9"@' /etc/containerd/config.toml
#打开 /etc/containerd/config.toml,确保这几处:
disabled_plugins = []
#如果看到 io.containerd.grpc.v1.cri 出现在 disabled_plugins 里,删掉它。
#存在并启用 CRI 插件段落(一般默认就有):
[plugins."io.containerd.grpc.v1.cri"]
# 这里还有一堆子配置,保持默认即可
#kubelet 要求 systemd cgroup,改成:
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options]
SystemdCgroup = true
#建议把 pause 镜像固定为 3.9(1.27.x 对应):
[plugins."io.containerd.grpc.v1.cri"]
sandbox_image = "registry.k8s.io/pause:3.9"
# 离线或私有仓库环境就改成你的地址,比如:
# sandbox_image = "192.168.30.150:5000/pause:3.9"
#打开 /etc/containerd/config.toml,确认/修改以下几处(都在同一文件里):
# 顶部:不要禁用 CRI
disabled_plugins = [] # ← 把 ["cri"] 改成 [],或直接删掉此行
version = 2 # 如果模板没有这一行,建议加上
[plugins."io.containerd.grpc.v1.cri"]
sandbox_image = "registry.k8s.io/pause:3.9" # 你已离线导入了这个镜像,正好保持一致
# 如用私有仓库,写成 "你的仓库/pause:3.9"
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc]
runtime_type = "io.containerd.runc.v2"
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options]
SystemdCgroup = true # kubelet 要求 systemd cgroup
#重启并自检
systemctl daemon-reload
systemctl enable --now containerd
#kubectl暂时不用启动 等kubeadm启动
systemctl status containerd --no-pager -l
#确认 CRI 插件已加载(任一条有结果即可):
ctr plugins ls | grep cri
# 期望看到 io.containerd.grpc.v1.cri <OK>
# 或者
crictl --runtime-endpoint unix:///run/containerd/containerd.sock info
# 能输出 runtimeName 等信息即 OK;若没装 crictl 可跳过
sudo systemctl restart containerd
#(K8s 在 RHEL9/cgroup v2 上推荐 systemd cgroup 驱动;containerd 侧需显式开启
3.4 预载镜像(离线导入)
sudo ctr -n k8s.io images import $OFF/images/k8s-1.27.16-and-calico-v3.26.4.tar
sudo ctr -n k8s.io images ls | grep -E 'kube-|coredns|etcd|pause|calico'
3.5 kubeadm 初始化(控制面节点)
创建 kubeadm-config.yaml(按需改 advertiseAddress、Pod/Service 网段;Calico 习惯 192.168.0.0/16):
# kubeadm-config.yaml
[root@k8s-01 ~]# cat kubeadm.yaml
# kubeadm-config.yaml
apiVersion: kubeadm.k8s.io/v1beta3
kind: InitConfiguration
localAPIEndpoint:
advertiseAddress: 192.168.30.150
bindPort: 6443
nodeRegistration:
criSocket: unix:///run/containerd/containerd.sock
imagePullPolicy: IfNotPresent
---
apiVersion: kubeadm.k8s.io/v1beta3
kind: ClusterConfiguration
clusterName: kubernetes
kubernetesVersion: v1.27.16
imageRepository: registry.k8s.io
networking:
serviceSubnet: 10.96.0.0/12
podSubnet: 172.20.0.0/16
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
mode: ipvs
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
cgroupDriver: systemd
下面是开启ipvs
apiVersion: kubeadm.k8s.io/v1beta3
kind: InitConfiguration
localAPIEndpoint:
advertiseAddress: 192.168.30.151 # ← 改成本机控制面IP
bindPort: 6443
nodeRegistration:
criSocket: unix:///run/containerd/containerd.sock
imagePullPolicy: IfNotPresent
---
apiVersion: kubeadm.k8s.io/v1beta3
kind: ClusterConfiguration
clusterName: kubernetes
kubernetesVersion: v1.27.16
imageRepository: registry.k8s.io # 离线/内网镜像时改成你的私仓
networking:
serviceSubnet: 10.96.0.0/12
podSubnet: 172.20.0.0/16 # 要与 Calico 使用的网段一致(你现在就是用这个)
dns:
type: CoreDNS
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
mode: ipvs
ipvs:
scheduler: rr # 可选:rr / wrr / wlc / sh / mh 等
# strictARP: true # 以后用 MetalLB L2 时再打开
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
cgroupDriver: systemd
# 0) 主机名解析(避免之前的 hostname 警告)
hostnamectl set-hostname k8s-01
grep -q '192.168.30.150 k8s-01' /etc/hosts || echo '192.168.30.150 k8s-01' >> /etc/hosts
# 1) 关闭 swap(若未关)
swapoff -a
sed -ri 's/^\s*([^#].*\sswap\s)/#\1/' /etc/fstab
# 2) 必要内核 & sysctl(kubelet 常见阻塞点)
modprobe br_netfilter || true
cat >/etc/modules-load.d/k8s.conf <<'EOF'
br_netfilter
EOF
cat >/etc/sysctl.d/k8s.conf <<'EOF'
net.bridge.bridge-nf-call-iptables=1
net.bridge.bridge-nf-call-ip6tables=1
net.ipv4.ip_forward=1
EOF
sysctl --system
# 3) (可选)避免策略阻塞:SELinux/防火墙(离线/内网先松)
setenforce 0 2>/dev/null || true
sed -i 's/^SELINUX=enforcing/SELINUX=permissive/' /etc/selinux/config 2>/dev/null || true
systemctl disable --now firewalld 2>/dev/null || true
# 4) 重启关键服务
systemctl restart containerd
systemctl restart kubelet
# 5) 再次观察
crictl --runtime-endpoint /run/containerd/containerd.sock ps -a | egrep 'kube-(apiserver|controller-manager|scheduler)|etcd'
journalctl -u kubelet -e --no-pager | tail -n 200
#执行初始化:
sudo kubeadm init --config kubeadm-config.yaml
#初始化离线 不联网
kubeadm init --config kubeadm.yaml --upload-certs -v=5
# 成功后配置 kubectl
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
sudo systemctl disable --now firewalld || true
# 立刻加载模块
sudo modprobe overlay && sudo modprobe br_netfilter
# 持久化
echo -e "overlay\nbr_netfilter" | sudo tee /etc/modules-load.d/k8s.conf
# 必要 sysctl
sudo tee /etc/sysctl.d/k8s.conf >/dev/null <<'EOF'
net.ipv4.ip_forward = 1
net.bridge.bridge-nf-call-iptables = 1
net.bridge.bridge-nf-call-ip6tables = 1
EOF
sudo sysctl --system
# 快速确认三项都为 1
sysctl net.ipv4.ip_forward
sysctl net.bridge.bridge-nf-call-iptables
sysctl net.bridge.bridge-nf-call-ip6tables
安装 Calico(离线文件):
kubectl apply -f $OFF/calico/calico-v3.26.4.yaml
kubectl -n kube-system get pods -w
mkdir -p $HOME/.kube
cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
chown $(id -u):$(id -g) $HOME/.kube/config
kubectl get pods -n kube-system -o wide
kubectl get nodes -o wide
#kubelet 开机自启(kubeadm 已临时启动,设为自启更规范)
systemctl enable --now kubelet
#配好 kubectl,并验证控制面
# 生成过 admin.conf 的话(kubeadm 已经写过)
[ -f /etc/kubernetes/admin.conf ] && {
mkdir -p $HOME/.kube
cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
chown $(id -u):$(id -g) $HOME/.kube/config
}
kubectl cluster-info
kubectl get pods -n kube-system -o wide
kubectl get nodes -o wide # 现在控制面起来了,但在装 CNI 前 Node 可能是 NotReady
#如果意外没有 /etc/kubernetes/admin.conf(极少数情况),可补一条:
kubeadm init phase kubeconfig admin
#加载 IPVS 内核模块(你 kube-proxy 设了 ipvs)
modprobe ip_vs ip_vs_rr ip_vs_wrr ip_vs_sh nf_conntrack
cat >/etc/modules-load.d/ipvs.conf <<'EOF'
ip_vs
ip_vs_rr
ip_vs_wrr
ip_vs_sh
nf_conntrack
EOF
#安装你的 CNI(Calico 离线)
确保清单里 CALICO_IPV4POOL_CIDR 与你 kubeadm 的 podSubnet: 172.20.0.0/16 一致。
你本地已导入镜像 calico/node|cni|kube-controllers:v3.26.4,直接套用离线 calico.yaml 即可:
kubectl apply -f /path/to/calico.yaml
kubectl -n kube-system get pods -w # 等 calico-*、coredns、kube-proxy 全部 Running
kubectl get nodes # 状态应变为 Ready
3.6 加入工作节点
#在每个工作节点重复 系统准备/安装 RPM/导入镜像 的步骤,然后在控制面上生成 join 命令:
[root@k8s-01 ~]# kubeadm token create --print-join-command
kubeadm join 192.168.30.150:6443 --token fnturx.ph8jg99zgdmze81w --discovery-token-ca-cert-hash sha256:1ef5e1f3558c8f9336dd4785c0207cb837cceb37c253179e9988f03dc0c00146
#把输出的 kubeadm join ... 在各工作节点执行即可。
#拿到的命令在每个 worker 上执行即可加入集群。
#若以后要加 额外控制面节点,再执行:
kubeadm init phase upload-certs --skip-certificate-key-print
kubeadm token create --print-join-command --certificate-key <上一步输出的key>
#持久化服务
systemctl enable --now kubelet
systemctl enable --now containerd
评论 (0)