一、准备工作
三台主机
ubuntu20.04.4使用阿里云的apt源
先备份一份 sudo cp /etc/apt/sources.list /etc/apt/sources.list.bak
vi /etc/apt/sources.list deb http://mirrors.aliyun.com/ubuntu/ jammy main restricted universe multiverse deb-src http://mirrors.aliyun.com/ubuntu/ jammy main restricted universe multiverse deb http://mirrors.aliyun.com/ubuntu/ jammy-updates main restricted universe multiverse deb-src http://mirrors.aliyun.com/ubuntu/ jammy-updates main restricted universe multiverse deb http://mirrors.aliyun.com/ubuntu/ jammy-backports main restricted universe multiverse deb-src http://mirrors.aliyun.com/ubuntu/ jammy-backports main restricted universe multiverse deb http://mirrors.aliyun.com/ubuntu/ jammy-security main restricted universe multiverse deb-src http://mirrors.aliyun.com/ubuntu/ jammy-security main restricted universe multiverse
机器配置
cat >> /etc/hosts << "EOF" 192.168.110.88 k8s-master-01 m1 192.168.110.70 k8s-node-01 n1 192.168.110.176 k8s-node-02 n2 EOF
集群通信
ssh-keygen ssh-copy-id m1 ssh-copy-id n1 ssh-copy-id n2
关闭系统的交换分区swap
集群内主机都需要执行sed -ri 's/^([^#].*swap.*)$/#\1/' /etc/fstab && grep swap /etc/fstab && swapoff -a && free -h
同步时间
主节点做sudo apt install chrony -y mv /etc/chrony/conf.d /etc/chrony/conf.d.bak cat > /etc/chrony/conf.d << EOF server ntp1.aliyun.com iburst minpoll 4 maxpoll 10 server ntp2.aliyun.com iburst minpoll 4 maxpoll 10 server ntp3.aliyun.com iburst minpoll 4 maxpoll 10 server ntp4.aliyun.com iburst minpoll 4 maxpoll 10 server ntp5.aliyun.com iburst minpoll 4 maxpoll 10 server ntp6.aliyun.com iburst minpoll 4 maxpoll 10 server ntp7.aliyun.com iburst minpoll 4 maxpoll 10 driftfile /var/lib/chrony/drift makestep 10 3 rtcsync allow 0.0.0.0/0 local stratum 10 keyfile /etc/chrony.keys logdir /var/log/chrony stratumweight 0.05 noclientlog logchange 0.5 EOF systemctl restart chronyd.service # 最好重启,这样无论原来是否启动都可以重新加载配置 systemctl enable chronyd.service systemctl status chronyd.service
从节点做
sudo apt install chrony -y mv /etc/chrony/conf.d /etc/chrony/conf.d.bak cat > /etc/chrony/conf.d << EOF server 192.168.110.88 iburst driftfile /var/lib/chrony/drift makestep 10 3 rtcsync local stratum 10 keyfile /etc/chrony.key logdir /var/log/chrony stratumweight 0.05 noclientlog logchange 0.5 EOF
设置内核参数
集群内主机都需要执行cat > /etc/sysctl.d/k8s.conf << EOF net.ipv4.ip_forward = 1 net.bridge.bridge-nf-call-iptables = 1 net.bridge.bridge-nf-call-ip6tables = 1 fs.may_detach_mounts = 1 vm.overcommit_memory=1 vm.panic_on_oom=0 fs.inotify.max_user_watches=89100 fs.file-max=52706963 fs.nr_open=52706963 net.ipv4.tcp_keepalive_time = 600 net.ipv4.tcp_keepalive_probes = 3 net.ipv4.tcp_keepalive_intvl = 15 net.ipv4.tcp_max_tw_buckets = 36000 net.ipv4.tcp_tw_reuse = 1 net.ipv4.tcp_max_orphans = 327680 net.ipv4.tcp_orphan_retries = 3 net.ipv4.tcp_syncookies = 1 net.ipv4.tcp_max_syn_backlog = 16384 net.ipv4.ip_conntrack_max = 65536 net.ipv4.tcp_max_syn_backlog = 16384 net.ipv4.tcp_timestamps = 0 net.core.somaxconn = 16384 EOF # 立即生效 sysctl --system
安装常用工具
sudo apt update sudo apt install -y expect wget jq psmisc vim net-tools telnet lvm2 git ntpdate chrony bind9-utils rsync unzip git
安装ipvsadm
安装ipvsadmsudo apt install -y ipvsadm ipset sysstat conntrack #libseccomp 是预装好的 dpkg -l | grep libseccomp
在 Ubuntu 22.04.4 中,/etc/sysconfig/modules/ 目录通常不是默认存在的,因为 Ubuntu 使用的是 systemd 作为初始化系统,而不是传统的 SysVinit 或者其他初始化系统。因此,Ubuntu 不使用 /etc/sysconfig/modules/ 来管理模块加载。
如果你想确保 IPVS 模块在系统启动时自动加载,你可以按照以下步骤操作:
创建一个 /etc/modules-load.d/ipvs.conf 文件: 在这个文件中,你可以列出所有需要在启动时加载的模块。这样做可以确保在启动时自动加载这些模块。echo "ip_vs" > /etc/modules-load.d/ipvs.conf echo "ip_vs_lc" >> /etc/modules-load.d/ipvs.conf echo "ip_vs_wlc" >> /etc/modules-load.d/ipvs.conf echo "ip_vs_rr" >> /etc/modules-load.d/ipvs.conf echo "ip_vs_wrr" >> /etc/modules-load.d/ipvs.conf echo "ip_vs_lblc" >> /etc/modules-load.d/ipvs.conf echo "ip_vs_lblcr" >> /etc/modules-load.d/ipvs.conf echo "ip_vs_dh" >> /etc/modules-load.d/ipvs.conf echo "ip_vs_sh" >> /etc/modules-load.d/ipvs.conf echo "ip_vs_fo" >> /etc/modules-load.d/ipvs.conf echo "ip_vs_nq" >> /etc/modules-load.d/ipvs.conf echo "ip_vs_sed" >> /etc/modules-load.d/ipvs.conf echo "ip_vs_ftp" >> /etc/modules-load.d/ipvs.conf echo "nf_conntrack" >> /etc/modules-load.d/ipvs.conf
加载模块: 你可以使用 modprobe 命令来手动加载这些模块,或者让系统在下次重启时自动加载。
sudo modprobe ip_vs sudo modprobe ip_vs_lc sudo modprobe ip_vs_wlc sudo modprobe ip_vs_rr sudo modprobe ip_vs_wrr sudo modprobe ip_vs_lblc sudo modprobe ip_vs_lblcr sudo modprobe ip_vs_dh sudo modprobe ip_vs_sh sudo modprobe ip_vs_fo sudo modprobe ip_vs_nq sudo modprobe ip_vs_sed sudo modprobe ip_vs_ftp sudo modprobe nf_conntrack
验证模块是否加载: 你可以使用 lsmod 命令来验证这些模块是否已经被成功加载。
lsmod | grep ip_vs
二、安装containerd(三台节点都要做)
#只要超过2.4就不用再安装了
root@k8s-master-01:/etc/modules-load.d# dpkg -l | grep libseccomp
ii libseccomp2:amd64 2.5.3-2ubuntu2 amd64 high level interface to Linux seccomp filter
开始安装
apt install containerd* -y
containerd --version #查看版本
配置
mkdir -pv /etc/containerd
containerd config default > /etc/containerd/config.toml #为containerd生成配置文件
vi /etc/containerd/config.toml
把下面改为自己构建的仓库
sandbox_image = sandbox_image = "registry.cn-guangzhou.aliyuncs.com/xingcangku/eeeee:3.8"
#配置systemd作为容器的cgroup driver
grep SystemdCgroup /etc/containerd/config.toml
sed -i 's/SystemdCgroup \= false/SystemdCgroup \= true/' /etc/containerd/config.toml
grep SystemdCgroup /etc/containerd/config.toml
配置加速器(必须配置,否则后续安装cni网络插件时无法从docker.io里下载镜像)
#参考:https://github.com/containerd/containerd/blob/main/docs/cri/config.md#registry-configuration
#添加 config_path="/etc/containerd/certs.d"
sed -i 's/config_path\ =.*/config_path = \"\/etc\/containerd\/certs.d\"/g' /etc/containerd/config.toml
mkdir -p /etc/containerd/certs.d/docker.io
cat>/etc/containerd/certs.d/docker.io/hosts.toml << EOF
server ="https://docker.io"
[host."https ://dockerproxy.com"]
capabilities = ["pull","resolve"]
[host."https://docker.m.daocloud.io"]
capabilities = ["pull","resolve"]
[host."https://docker.chenby.cn"]
capabilities = ["pull","resolve"]
[host."https://registry.docker-cn.com"]
capabilities = ["pull","resolve" ]
[host."http://hub-mirror.c.163.com"]
capabilities = ["pull","resolve" ]
EOF
#配置containerd开机自启动
#启动containerd服务并配置开机自启动
systemctl daemon-reload && systemctl restart containerd
systemctl enable --now containerd
#查看containerd状态
systemctl status containerd
#查看containerd的版本
ctr version
三、安装最新版本的kubeadm、kubelet 和 kubectl
1、三台机器准备k8s
配置安装源
apt-get update && apt-get install -y apt-transport-https
curl -fsSL https://mirrors.aliyun.com/kubernetes-new/core/stable/v1.30/deb/Release.key |
gpg --dearmor -o /etc/apt/keyrings/kubernetes-apt-keyring.gpg
echo "deb [signed-by=/etc/apt/keyrings/kubernetes-apt-keyring.gpg] https://mirrors.aliyun.com/kubernetes-new/core/stable/v1.30/deb/ /" |
tee /etc/apt/sources.list.d/kubernetes.list
apt-get update
apt-get install -y kubelet kubeadm kubectl
2、主节点操作(node节点不执行)
初始化master节点(仅在master节点上执行)
#可以kubeadm config images list查看
[root@k8s-master-01 ~]# kubeadm config images list
registry.k8s.io/kube-apiserver:v1.30.0
registry.k8s.io/kube-controller-manager:v1.30.0
registry.k8s.io/kube-scheduler:v1.30.0
registry.k8s.io/kube-proxy:v1.30.0
registry.k8s.io/coredns/coredns:v1.11.1
registry.k8s.io/pause:3.9
registry.k8s.io/etcd:3.5.12-0
kubeadm config print init-defaults > kubeadm.yaml
root@k8s-master-01:~# cat kubeadm.yaml
apiVersion: kubeadm.k8s.io/v1beta3
bootstrapTokens:
- groups:
- system:bootstrappers:kubeadm:default-node-token
token: abcdef.0123456789abcdef
ttl: 24h0m0s
usages:
- signing
- authentication
kind: InitConfiguration
localAPIEndpoint:
advertiseAddress: 192.168.110.88
bindPort: 6443
nodeRegistration:
criSocket: unix:///var/run/containerd/containerd.sock
imagePullPolicy: IfNotPresent
name: k8s-master-01
taints: null
---
apiServer:
timeoutForControlPlane: 4m0s
apiVersion: kubeadm.k8s.io/v1beta3
certificatesDir: /etc/kubernetes/pki
clusterName: kubernetes
controllerManager: {}
dns: {}
etcd:
local:
dataDir: /var/lib/etcd
imageRepository: registry.cn-hangzhou.aliyuncs.com/google_containers
kind: ClusterConfiguration
kubernetesVersion: 1.30.3
networking:
dnsDomain: cluster.local
serviceSubnet: 10.96.0.0/12
podSubnet: 10.244.0.0/16
scheduler: {}
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
mode: ipvs
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
cgroupDriver: systemd
部署K8S
kubeadm init --config=kubeadm.yaml --ignore-preflight-errors=SystemVerification --ignore-preflight-errors=Swap
部署网络插件
下载网络插件
wget https://github.com/flannel-io/flannel/releases/latest/download/kube-flannel.yml
[root@k8s-master-01 ~]# grep -i image kube-flannel.yml
image: docker.io/flannel/flannel:v0.25.5
image: docker.io/flannel/flannel-cni-plugin:v1.5.1-flannel1
image: docker.io/flannel/flannel:v0.25.5
改为下面 要去阿里云上面构建自己的镜像
root@k8s-master-01:~# grep -i image kube-flannel.yml
image: registry.cn-guangzhou.aliyuncs.com/xingcangku/cccc:0.25.5
image: registry.cn-guangzhou.aliyuncs.com/xingcangku/ddd:1.5.1
image: registry.cn-guangzhou.aliyuncs.com/xingcangku/cccc:0.25.5
部署在master上即可
kubectl apply -f kube-flannel.yml
kubectl delete -f kube-flannel.yml #这个是删除网络插件的
查看状态
kubectl -n kube-flannel get pods
kubectl -n kube-flannel get pods -w
[root@k8s-master-01 ~]# kubectl get nodes # 全部ready
[root@k8s-master-01 ~]# kubectl -n kube-system get pods # 两个coredns的pod也都ready
部署kubectl命令提示(在所有节点上执行)
yum install bash-completion* -y
kubectl completion bash > ~/.kube/completion.bash.inc
echo "source '$HOME/.kube/completion.bash.inc'" >> $HOME/.bash_profile
source $HOME/.bash_profile
独家揭秘:私服传奇公益火龙,惊艳全服的神秘力量!:https://501h.com/yuanshi/2024-08-18/28835.html
《网者荣耀》剧情片高清在线免费观看:https://www.jgz518.com/xingkong/65252.html