任务目标#
- 完成高可用 k8s 集群安装部署
任务平台#
- 物理设备 --
- 操作系统:openEuler 22.03 LTS SP2
部署指南#
集群拓扑图
任务一:配置准备#
- 重命名 hostname
# 将10.10.3.121的主机名改为future-k8s-node0
hostnamectl set-hostname future-k8s-node0 && bash
# 将10.10.3.122的主机名改为future-k8s-node1
hostnamectl set-hostname future-k8s-node1 && bash
# 将10.10.3.123的主机名改为future-k8s-node2
hostnamectl set-hostname future-k8s-node2 && bash
# 将10.10.3.124的主机名改为future-k8s-node3
hostnamectl set-hostname future-k8s-node3 && bash
- 安装前的配置修改
# 关闭防火墙
systemctl stop firewalld
systemctl disable firewalld
firewall-cmd --state
# selinux永久关闭
setenforce 0
sed -i 's/^SELINUX=enforcing$/SELINUX=permissive/' /etc/selinux/config
cat /etc/selinux/config
# swap永久关闭
swapoff --all
swapoff -a
sed -ri 's/.*swap.*/#&/' /etc/fstab
cat /etc/fstab
# 添加hosts
cat >> /etc/hosts << EOF
10.10.3.121 future-k8s-node0
10.10.3.122 future-k8s-node1
10.10.3.123 future-k8s-node2
10.10.3.124 future-k8s-node3
10.10.3.125 future-k8s-vip
EOF
#查看
cat /etc/hosts
# 添加网桥过滤及内核转发配置文件
cat > /etc/sysctl.d/k8s.conf << EOF
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward = 1
EOF
# 查看
cat /etc/sysctl.d/k8s.conf
# 加载br_netfilter模块
modprobe br_netfilter
# 查看是否加载
lsmod | grep br_netfilter
# 加载网桥过滤及内核转发配置文件
sysctl -p /etc/sysctl.d/k8s.conf
#同步时间
yum install ntp -y
systemctl start ntpd
systemctl enable ntpd
yum install chrony -y
systemctl start chrony
systemctl enable chronyd
#修改配置,添加内容
echo "
server 10.10.3.70 iburst
allow 10.10.3.0/24
" >> /etc/chrony.conf
timedatectl set-ntp true
systemctl restart chronyd
timedatectl status
date
- 安装 ipset 及 ipvsadm
# 安装ipset及ipvsadm
yum -y install ipset ipvsadm
配置ipvsadm模块加载方式
# 添加需要加载的模块
echo ' #!/bin/bash
modprobe -- ip_vs
modprobe -- ip_vs_rr
modprobe -- ip_vs_wrr
modprobe -- ip_vs_sh
modprobe -- nf_conntrack
' > /etc/sysconfig/modules/ipvs.modules
#查看
cat /etc/sysconfig/modules/ipvs.modules
# 授权、运行、检查是否加载
chmod 755 /etc/sysconfig/modules/ipvs.modules
bash /etc/sysconfig/modules/ipvs.modules
lsmod | grep -e ip_vs -e nf_conntrack
#重启
reboot
配置准备完成后,所有节点都需重启
任务二:安装 docker#
- 配置 Docker CE 的 yum 存储库。打开
docker-ce.repo
的文件,并将以下内容复制到文件中:
echo '
[docker-ce-stable]
name=Docker CE Stable - $basearch
baseurl=https://download.docker.com/linux/centos/7/$basearch/stable
enabled=1
gpgcheck=1
gpgkey=https://download.docker.com/linux/centos/gpg ' > /etc/yum.repos.d/docker-ce.repo
保存并退出文件。
- 安装 Docker CE。运行以下命令来安装 Docker CE:
yum -y install docker-ce docker-ce-cli containerd.io
#启动docker并设置开机自启
systemctl start docker
systemctl enable docker
#查看版本
docker -v
docker compose version
- Docker 配置修改,设置 cgroup 驱动,使用 systemd,配置修改为如下。
#将配置写入daemon.json文件
echo '{
"exec-opts": ["native.cgroupdriver=systemd"],
"data-root": "/data/docker"
} ' > /etc/docker/daemon.json
#查看
cat /etc/docker/daemon.json
systemctl daemon-reload
systemctl restart docker
docker info
- 创建所需目录
mkdir cri-dockerd calico dashboard docker metrics-server script ingress-nginx
任务三:安装 cri-dockerd (k8s 1.24 及以上版本)#
cd /data/cri-dockerd
# 下载cri-dockerd安装包
wget https://github.com/Mirantis/cri-dockerd/releases/download/v0.3.4/cri-dockerd-0.3.4-3.el8.x86_64.rpm
# 安装cri-dockerd
rpm -ivh cri-dockerd-0.3.4-3.el8.x86_64.rpm
docker pull registry.aliyuncs.com/google_containers/pause:3.9
# 修改镜像地址为国内,否则kubelet拉取不了镜像导致启动失败
sed -i.bak 's|ExecStart=.*$|ExecStart=/usr/bin/cri-dockerd --container-runtime-endpoint fd:// --pod-infra-container-image=registry.aliyuncs.com/google_containers/pause:3.9|g' /usr/lib/systemd/system/cri-docker.service
cat /usr/lib/systemd/system/cri-docker.service
# 启动cri-dockerd
systemctl daemon-reload
systemctl start cri-docker.service
systemctl enable cri-docker.service
任务四:安装高可用组件#
部署高可用集群需要安装 **keepalived 和 haproxy,实现master节点高可用,**在各 master 节点操作
- 安装 keepalived 与 haproxy
yum install keepalived haproxy -y
- 备份 keepalived 与 haproxy 配置文件
cp /etc/keepalived/keepalived.conf /etc/keepalived/keepalived.conf.bak
cp /etc/haproxy/haproxy.cfg /etc/haproxy/haproxy.cfg.bak
- 修改各 master 节点的
/etc/keepalived/keepalived.conf
文件-
future-k8s-node0
-
echo ' global_defs { router_id k8s } vrrp_script check_haproxy { script "killall -0 haproxy" interval 3 weight -2 fall 10 rise 2 } vrrp_instance VI_1 { state MASTER #主节点 则为MASTER ,其他则为 BACKUP interface ens192 #网卡名称 virtual_router_id 51 priority 250 #优先级 nopreempt #设置非抢占模式 advert_int 1 authentication { auth_type PASS auth_pass ceb1b3ec013d66163d6ab } virtual_ipaddress { 10.10.3.125/24 #虚拟ip } track_script { check_haproxy } } ' > /etc/keepalived/keepalived.conf
-
future-k8s-node1
-
echo ' global_defs { router_id k8s } vrrp_script check_haproxy { script "killall -0 haproxy" interval 3 weight -2 fall 10 rise 2 } vrrp_instance VI_1 { state BACKUP #主节点 则为MASTER ,其他则为 BACKUP interface ens192 #网卡名称 virtual_router_id 51 priority 200 #优先级 nopreempt #设置非抢占模式 advert_int 1 authentication { auth_type PASS auth_pass ceb1b3ec013d66163d6ab } virtual_ipaddress { 10.10.3.125/24 #虚拟ip } track_script { check_haproxy } } ' > /etc/keepalived/keepalived.conf
-
future-k8s-node2
-
echo ' global_defs { router_id k8s } vrrp_script check_haproxy { script "killall -0 haproxy" interval 3 weight -2 fall 10 rise 2 } vrrp_instance VI_1 { state BACKUP #主节点 则为MASTER ,其他则为 BACKUP interface ens192 #网卡名称 virtual_router_id 51 priority 150 #优先级 nopreempt #设置非抢占模式 advert_int 1 authentication { auth_type PASS auth_pass ceb1b3ec013d66163d6ab } virtual_ipaddress { 10.10.3.125/24 #虚拟ip } track_script { check_haproxy } } ' > /etc/keepalived/keepalived.conf
-
- 修改各 master 节点的
/etc/haproxy/haproxy.cfg
文件,(三个 master 节点的配置文件相同)
echo "
#---------------------------------------------------------------------
# Global settings
#---------------------------------------------------------------------
global
# to have these messages end up in /var/log/haproxy.log you will
# need to:
# 1) configure syslog to accept network log events. This is done
# by adding the '-r' option to the SYSLOGD_OPTIONS in
# /etc/sysconfig/syslog
# 2) configure local2 events to go to the /var/log/haproxy.log
# file. A line like the following can be added to
# /etc/sysconfig/syslog
#
# local2.* /var/log/haproxy.log
#
log 127.0.0.1 local2
chroot /var/lib/haproxy
pidfile /var/run/haproxy.pid
maxconn 4000
user haproxy
group haproxy
daemon
# turn on stats unix socket
stats socket /var/lib/haproxy/stats
#---------------------------------------------------------------------
# common defaults that all the 'listen' and 'backend' sections will
# use if not designated in their block
#---------------------------------------------------------------------
defaults
mode http
log global
option httplog
option dontlognull
option http-server-close
option forwardfor except 127.0.0.0/8
option redispatch
retries 3
timeout http-request 10s
timeout queue 1m
timeout connect 10s
timeout client 1m
timeout server 1m
timeout http-keep-alive 10s
timeout check 10s
maxconn 3000
#---------------------------------------------------------------------
# kubernetes apiserver frontend which proxys to the backends
#---------------------------------------------------------------------
frontend kubernetes-apiserver
mode tcp
bind *:16443 #高可用监控端口,初始化k8s集群时会用
option tcplog
default_backend kubernetes-apiserver
#---------------------------------------------------------------------
# round robin balancing between the various backends
#---------------------------------------------------------------------
backend kubernetes-apiserver
mode tcp
balance roundrobin
server future-k8s-node0 10.10.3.121:6443 check
server future-k8s-node1 10.10.3.122:6443 check
server future-k8s-node2 10.10.3.123:6443 check
#---------------------------------------------------------------------
# collection haproxy statistics message
#---------------------------------------------------------------------
listen stats
bind *:1080
stats auth admin:awesomePassword
stats refresh 5s
stats realm HAProxy\ Statistics
stats uri /admin?stats
" > /etc/haproxy/haproxy.cfg
- 启动(各 master 节点按顺序启动)
#启动keepalived
systemctl enable keepalived && systemctl start keepalived
#启动haproxy
systemctl enable haproxy && systemctl start haproxy
systemctl status keepalived
systemctl status haproxy
- 在 future-k8s-node0 查看绑定的 vip 地址
ip add 1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000 link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00 inet 127.0.0.1/8 scope host lo valid_lft forever preferred_lft forever inet6 ::1/128 scope host valid_lft forever preferred_lft forever 2: ens192: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq state UP group default qlen 1000 link/ether 00:50:56:9a:eb:48 brd ff:ff:ff:ff:ff inet 10.10.3.121/24 brd 10.10.3.255 scope global noprefixroute ens192 valid_lft forever preferred_lft forever inet 10.10.3.125/24 scope global ens192 valid_lft forever preferred_lft forever inet6 fe80::250:56ff:fe9a/64 scope link noprefixroute valid_lft forever preferred_lft forever
任务五:部署 k8s 集群#
-
添加 yum 软件源#
cat > /etc/yum.repos.d/kubernetes.repo << EOF
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=0
repo_gpgcheck=0
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF
-
安装 kubeadm,kubelet 和 kubectl#
# 安装kubelet、kubeadm、kubectl
yum install -y kubelet-1.27.0 kubeadm-1.27.0 kubectl-1.27.0 --disableexcludes=kubernetes
#将cgroup改为systemd
echo 'KUBELET_EXTRA_ARGS="--cgroup-driver=systemd"' > /etc/sysconfig/kubelet
# 查看
cat /etc/sysconfig/kubelet
# 设置开机启动
systemctl start kubelet.service
systemctl enable kubelet.service
systemctl status kubelet.service
#查看版本
kubeadm version
kubelet --version
kubectl version
-
初始化 k8s 集群(future-k8s-node0 节点)#
方式一:使用配置文件初始化#
- 导出默认配置文件 (可选)
暂时无法在飞书文档外展示此内容
- 配置文件
echo ' apiVersion: kubeadm.k8s.io/v1beta3 kind: InitConfiguration localAPIEndpoint: advertiseAddress: 10.10.3.125 #虚拟ip bindPort: 6443 nodeRegistration: criSocket: unix:///var/run/cri-dockerd.sock --- apiServer: certSANs: #master节点与对应主机名 - future-k8s-node0 - future-k8s-node1 - future-k8s-node2 - future-k8s-vip - 10.10.3.121 - 10.10.3.122 - 10.10.3.123 - 10.10.3.125 - 127.0.0.1 timeoutForControlPlane: 4m0s apiVersion: kubeadm.k8s.io/v1beta3 certificatesDir: /etc/kubernetes/pki clusterName: kubernetes controlPlaneEndpoint: "future-k8s-vip:16443" #虚拟ip及高可用配置的端口号 controllerManager: {} dns: {} etcd: local: dataDir: /var/lib/etcd imageRepository: registry.aliyuncs.com/google_containers kind: ClusterConfiguration kubernetesVersion: 1.28.0 networking: dnsDomain: cluster.local podSubnet: 10.244.0.0/16 serviceSubnet: 10.96.0.0/12 scheduler: {} ' > /data/script/kubeadm-config.yaml
- 集群初始化
kubeadm init --config kubeadm-config.yaml --upload-certs mkdir -p $HOME/.kube sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config sudo chown $(id -u):$(id -g) $HOME/.kube/config
方式二:使用命令初始化#
- 部署 master 节点,在 10.10.3.121 执行,初始化 master 节点
kubeadm init \ --apiserver-advertise-address=10.10.3.121 \ --image-repository registry.aliyuncs.com/google_containers \ --kubernetes-version v1.27.0 \ --control-plane-endpoint=future-k8s-vip:16443 \ #虚拟ip(未定) --control-plane-endpoint=future-k8s-vip \ #虚拟ip(未定) --service-cidr=10.96.0.0/12 \ --pod-network-cidr=10.244.0.0/16 \ --cri-socket=unix:///var/run/cri-dockerd.sock \ --ignore-preflight-errors=all mkdir -p $HOME/.kube sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config sudo chown $(id -u):$(id -g) $HOME/.kube/config
- 配置 ssh 免密码
#在10.10.3.121上生成公钥,复制到其他master节点上 ssh-keygen -t rsa ssh-copy-id 10.10.3.122 ssh-copy-id 10.10.3.123
- 将 10.10.3.121 上的证书拷贝到其他 master 节点
#在其他master节点创建证书存放目录 cd /root && mkdir -p /etc/kubernetes/pki/etcd &&mkdir -p ~/.kube/ #将future-k8s-node0的证书复制到future-k8s-node1 scp /etc/kubernetes/pki/ca.crt 10.10.3.122:/etc/kubernetes/pki/ scp /etc/kubernetes/pki/ca.key 10.10.3.122:/etc/kubernetes/pki/ scp /etc/kubernetes/pki/sa.key 10.10.3.122:/etc/kubernetes/pki/ scp /etc/kubernetes/pki/sa.pub 10.10.3.122:/etc/kubernetes/pki/ scp /etc/kubernetes/pki/front-proxy-ca.crt 10.10.3.122:/etc/kubernetes/pki/ scp /etc/kubernetes/pki/front-proxy-ca.key 10.10.3.122:/etc/kubernetes/pki/ scp /etc/kubernetes/pki/etcd/ca.crt 10.10.3.122:/etc/kubernetes/pki/etcd/ scp /etc/kubernetes/pki/etcd/ca.key 10.10.3.122:/etc/kubernetes/pki/etcd/ #将future-k8s-node0的证书复制到future-k8s-node1 scp /etc/kubernetes/pki/ca.crt 10.10.3.123:/etc/kubernetes/pki/ scp /etc/kubernetes/pki/ca.key 10.10.3.123:/etc/kubernetes/pki/ scp /etc/kubernetes/pki/sa.key 10.10.3.123:/etc/kubernetes/pki/ scp /etc/kubernetes/pki/sa.pub 10.10.3.123:/etc/kubernetes/pki/ scp /etc/kubernetes/pki/front-proxy-ca.crt 10.10.3.123:/etc/kubernetes/pki/ scp /etc/kubernetes/pki/front-proxy-ca.key 10.10.3.123:/etc/kubernetes/pki/ scp /etc/kubernetes/pki/etcd/ca.crt 10.10.3.123:/etc/kubernetes/pki/etcd/ scp /etc/kubernetes/pki/etcd/ca.key 10.10.3.123:/etc/kubernetes/pki/etcd/
-
初始化其他 master 节点#
kubeadm join future-k8s-vip:16443 --token ysl0xr.knx79yu06cldwiy1 --discovery-token-ca-cert-hash sha256:5dd8de94e08a560c7c2424dde0719a9f4e6ac4e5e5fe538ebbab0cbc5866b000 --control-plane --cri-socket=unix:///var/run/cri-dockerd.sock
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
-
初始化 node 节点#
kubeadm join 10.10.3.121:6443 --token pzyo37.oaaqt1nrw3u7ijuj --discovery-token-ca-cert-hash sha256:b8067f74af04b63399af1de28644223178e5d63e8258c25d465e78aca515e887 --cri-socket=unix:///var/run/cri-dockerd.sock
-
设置 master 节点允许调度 POD (可选)#
默认配置下 Kubernetes 不会将 Pod 调度到 Master 节点。如果希望将 k8s-master 也当作 Node 使用,需去除污点,开启调度。
#查看默认配置的污点
kubectl describe node future-k8s-node2 |grep Taints
Taints: node-role.kubernetes.io/control-plane
#去除污点
kubectl taint nodes future-k8s-node2 node-role.kubernetes.io/control-plane-
添加 woker 标记
#添加worker标记
kubectl label nodes future-k8s-node2 node-role.kubernetes.io/worker=
#删除worker标记
kubectl label nodes future-k8s-node2 node-role.kubernetes.io/worker-
任务六:安装网络插件 (master)#
安装 calico
mkdir /data/calico
cd /data/calico
wget https://docs.tigera.io/archive/v3.25/manifests/calico.yaml
#修改calico.yaml找到CALICO_IPV4POOL_CIDR
vi calico.yaml
##############修改内容###################
value: "10.244.0.0/16"
##############修改内容###################
#在master节点上安装calico
kubectl apply -f calico.yaml
查看节点状态
# 查看所有的节点
kubectl get nodes
kubectl get nodes -o wide
#查看集群健康情况
kubectl get cs
任务七:安装 nginx 进行测试#
# 创建Nginx程序
kubectl create deployment nginx --image=nginx
# 开放80端口
kub