Task Objectives#
- Complete the installation and deployment of a highly available k8s cluster.
Task Platform#
- Physical devices--
- Operating System: openEuler 22.03 LTS SP2
Deployment Guide#
Cluster Topology Diagram
1: Deploy Ceph Cluster#
Task 1: Configuration Preparation#
- Rename hostname
# Change the hostname of the host at 10.10.1.80 to future-k8s-master0
hostnamectl set-hostname future-k8s-master0 && bash
# Change the hostname of the host at 10.10.1.81 to future-k8s-master1
hostnamectl set-hostname future-k8s-master1 && bash
# Change the hostname of the host at 10.10.1.82 to future-k8s-master2
hostnamectl set-hostname future-k8s-master2 && bash
# Change the hostname of the host at 10.10.1.16 to k8s-ceph-node0
hostnamectl set-hostname k8s-ceph-node0 && bash
# Change the hostname of the host at 10.10.1.17 to k8s-ceph-node1
hostnamectl set-hostname k8s-ceph-node1 && bash
# Change the hostname of the host at 10.10.1.18 to k8s-ceph-node2
hostnamectl set-hostname k8s-ceph-node2 && bash
# Change the hostname of the host at 10.10.1.15 to k8s-ceph-node3
hostnamectl set-hostname k8s-ceph-node3 && bash
- Pre-installation configuration modifications
# Disable the firewall
systemctl stop firewalld
systemctl disable firewalld
firewall-cmd --state
# Permanently disable selinux
setenforce 0
sed -i 's/^SELINUX=enforcing$/SELINUX=permissive/' /etc/selinux/config
cat /etc/selinux/config
# Permanently disable swap
swapoff --all
swapoff -a
sed -ri 's/.*swap.*/#&/' /etc/fstab
cat /etc/fstab
# Add hosts
cat >> /etc/hosts << EOF
10.10.1.80 future-k8s-master0
10.10.1.81 future-k8s-master1
10.10.1.82 future-k8s-master2
10.10.1.16 k8s-ceph-node0
10.10.1.17 k8s-ceph-node1
10.10.1.18 k8s-ceph-node2
10.10.1.15 k8s-ceph-node3
10.10.1.83 future-k8s-vip
EOF
# View
cat /etc/hosts
# Add bridge filtering and kernel forwarding configuration file
cat > /etc/sysctl.d/k8s.conf << EOF
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward = 1
EOF
# View
cat /etc/sysctl.d/k8s.conf
# Load br_netfilter module
modprobe br_netfilter
# Check if loaded
lsmod | grep br_netfilter
# Load bridge filtering and kernel forwarding configuration file
sysctl -p /etc/sysctl.d/k8s.conf
# Synchronize time
yum install ntp -y
systemctl start ntpd
systemctl enable ntpd
yum install chrony -y
systemctl start chronyd
systemctl enable chronyd
# Modify configuration, add content
echo "
server 10.10.3.70 iburst
allow 10.10.3.0/24
allow 10.10.1.0/24
" >> /etc/chrony.conf
timedatectl set-ntp true
systemctl restart chronyd
timedatectl status
date
- Install ipset and ipvsadm
# Install ipset and ipvsadm
yum -y install ipset ipvsadm
Configure ipvsadm module loading method
# Add modules to load
echo ' #!/bin/bash
modprobe -- ip_vs
modprobe -- ip_vs_rr
modprobe -- ip_vs_wrr
modprobe -- ip_vs_sh
modprobe -- nf_conntrack
' > /etc/sysconfig/modules/ipvs.modules
# View
cat /etc/sysconfig/modules/ipvs.modules
# Authorize, run, check if loaded
chmod 755 /etc/sysconfig/modules/ipvs.modules
bash /etc/sysconfig/modules/ipvs.modules
lsmod | grep -e ip_vs -e nf_conntrack
# Restart
reboot
After the configuration preparation is complete, all nodes need to restart.
Task 2: Configure Python Environment#
Download Python 2
- Install zlib library, otherwise an error will occur when installing pip (and Python will need to be recompiled)
yum -y install zlib*
- Install GCC package. If GCC is not installed, please use the following command to install it.
yum -y install gcc openssl-devel bzip2-devel
- Download Python-2.7.18
cd /usr/src
yum -y install wget tar
wget https://www.python.org/ftp/python/2.7.18/Python-2.7.18.tgz
tar xzf Python-2.7.18.tgz
- Before compiling, you also need to modify the Modules/Setup.dist file in the source installation files to uncomment.
sed -i 's/#zlib zlibmodule.c -I$(prefix)/zlib zlibmodule.c -I$(prefix)/' Python-2.7.18/Modules/Setup.dist
- Compile Python-2.7.18 (
make altinstall
is used to prevent replacing the default python binary file /usr/bin/python)
cd /usr/src/Python-2.7.18
./configure --enable-optimizations
yum install -y make
make altinstall
Do not overwrite or link the original Python binary file, as this may damage the system.
- Set environment variables
echo "
export PYTHON_HOME=/usr/local/
PATH=\$PATH:\$PYTHON_HOME/bin
" >> /etc/profile
cat /etc/profile
source /etc/profile
- Method 1:
curl "https://bootstrap.pypa.io/pip/2.7/get-pip.py" -o "get-pip.py"
python2.7 get-pip.py
Download Ceph
# k8s-ceph-node0 download
# Method 1: Use pip to download
pip2 install ceph-deploy
yum install -y ceph ceph-radosgw
# Download on other nodes
yum install -y ceph ceph-radosgw
# Check if the installation package is complete
rpm -qa |egrep -i "ceph|rados|rbd"
Task 3: Deploy Ceph Cluster#
mkdir /etc/ceph/
touch /etc/ceph/ceph.conf
- Generate an FSID for the cluster:
uuidgen
30912204-0c26-413f-8e00-6d55c9c0af03
- Create a keyring for the cluster and generate a key for the Monitor service:
ceph-authtool --create-keyring /tmp/ceph.mon.keyring --gen-key -n mon. --cap mon 'allow *'
- Create an admin keyring, generate a client.admin user, and add this user to the keyring:
ceph-authtool --create-keyring /etc/ceph/ceph.client.admin.keyring --gen-key -n client.admin --cap mon 'allow *' --cap osd 'allow *' --cap mds 'allow *' --cap mgr 'allow *'
- Create a bootstrap-osd keyring and add the client.bootstrap-osd user to this keyring:
ceph-authtool --create-keyring /var/lib/ceph/bootstrap-osd/ceph.keyring --gen-key -n client.bootstrap-osd --cap mon 'profile bootstrap-osd'
- Import the generated keys into ceph.mon.keyring.
ceph-authtool /tmp/ceph.mon.keyring --import-keyring /etc/ceph/ceph.client.admin.keyring
ceph-authtool /tmp/ceph.mon.keyring --import-keyring /var/lib/ceph/bootstrap-osd/ceph.keyring
- Use the hostname and IP address along with the FSID to generate the monitor map:
monmaptool --create --add k8s-ceph-node0 10.10.1.16 --fsid 30912204-0c26-413f-8e00-6d55c9c0af03 /tmp/monmap
- Create the directory for the monitor, using the format
cluster-name-hostname
:
mkdir /var/lib/ceph/mon/ceph-k8s-ceph-node0
- Fill in the information for the first monitor daemon:
ceph-mon --mkfs -i k8s-ceph-node0 --monmap /tmp/monmap --keyring /tmp/ceph.mon.keyring
- Configure the /etc/ceph/ceph.conf file:
cat /etc/ceph/ceph.conf
################################################
[global]
fsid = 30912204-0c26-413f-8e00-6d55c9c0af03 # Generated FSID
mon initial members =k8s-ceph-node0
mon host = 10.10.1.16
public network = 10.10.1.0/24
auth cluster required = cephx
auth service required = cephx
auth client required = cephx
osd journal size = 1024
osd pool default size = 3
osd pool default min size = 2
osd pool default pg num = 333
osd pool default pgp num = 333
osd crush chooseleaf type = 1
################################################
- Since we are operating as root, we need to set the permissions to ceph (you can also modify the systemd startup file to change the ceph user to root) and start the Monitor.
chown -R ceph:ceph /var/lib/ceph
systemctl start [email protected]
systemctl enable [email protected]
- Confirm that the service has started normally:
ceph -s
yum install -y net-tools
netstat -lntp|grep ceph-mon
-
Deploy Manager#
Once we have configured the ceph-mon service, we need to configure the ceph-mgr service.
- Generate an authentication key (ceph-mgr is a custom name):
# 10.10.1.16
ceph auth get-or-create mgr.ceph-mgr mon 'allow profile mgr' osd 'allow *' mds 'allow *'
[mgr.ceph-mgr]
key = AQANDD9lfWg2LBAAHY0mprdbuKFBPJDkE7/I5Q==
# 10.10.1.17
ceph auth get-or-create mgr.ceph-mgr1 mon 'allow profile mgr' osd 'allow *' mds 'allow *'
[mgr.ceph-mgr1]
key = AQDbRTZlgjXWBBAAGew4Xta+t9vgIWPCWC8EVg==
- Create a directory to store this key file.
# 10.10.1.16
sudo -u ceph mkdir /var/lib/ceph/mgr/ceph-ceph-mgr
# Store the generated key file in this directory and name it keyring
vi /var/lib/ceph/mgr/ceph-ceph-mgr/keyring
[mgr.ceph-mgr]
key = AQANDD9lfWg2LBAAHY0mprdbuKFBPJDkE7/I5Q==
# 10.10.1.17
sudo -u ceph mkdir /var/lib/ceph/mgr/ceph-ceph-mgr1
# Store the generated key file in this directory and name it keyring
vi /var/lib/ceph/mgr/ceph-ceph-mgr1/keyring
[mgr.ceph-mgr1]
key = AQDbRTZlgjXWBBAAGew4Xta+t9vgIWPCWC8EVg==
- Start the ceph-mgr service.
ceph-mgr -i ceph-mgr
ceph-mgr -i ceph-mgr1
systemctl enable ceph-mgr@k8s-ceph-node0
systemctl enable ceph-mgr@k8s-ceph-node1
# Check if the service has started, view ceph status
ceph -s
# View available modules in the current mgr
ceph mgr module ls
-
Create OSD#
ceph-volume lvm create --data /dev/sda8
# View the current lvm logical volume
ceph-volume lvm list
# View ceph status
ceph -s
-
Install and Configure Ceph Dashboard#
-
Enable dashboard functionality.
ceph mgr module enable dashboard
- Create a certificate.
ceph dashboard create-self-signed-cert
- Configure the username and password for web login.
# Create /etc/ceph/dashboard.key and write the password into it
echo "qishi#09319" >/etc/ceph/dashboard.key
ceph dashboard ac-user-create k8s administrator -i /etc/ceph/dashboard.key
- Modify the default port of the dashboard (optional).
Configure the port, the default port is 8443, change it to 18443, and restart mgr for the change to take effect.
ceph config set mgr mgr/dashboard/server_port 18443
systemctl restart ceph-mgr.target
- View the published service address and log in.
ceph mgr services
{
"dashboard": "https://k8s-ceph-node0:8443/"
}
vi /etc/ceph/ceph.conf
[global]
fsid = 30912204-0c26-413f-8e00-6d55c9c0af03 # Generated FSID
mon initial members =k8s-ceph-node0,k8s-ceph-node1,k8s-ceph-node2,k8s-ceph-node3 # Hostnames
mon host = 10.10.1.16,10.10.1.17,10.10.1.18,10.10.1.15 # Corresponding IPs
public network = 10.10.1.0/24
auth cluster required = cephx
auth service required = cephx
auth client required = cephx
osd journal size = 1024
osd pool default size = 3
osd pool default min size = 2
osd pool default pg num = 333
osd pool default pgp num = 333
osd crush chooseleaf type = 1
[mon]
mon allow pool delete = true
[mds.k8s-ceph-node0]
host = k8s-ceph-node0
- Distribute the configuration and key files to other nodes (master node).
# Generate public key, copy to node nodes
ssh-keygen -t rsa
ssh-copy-id 10.10.1.17
ssh-copy-id 10.10.1.18
ssh-copy-id 10.10.1.15
# Copy authentication keys
scp /etc/ceph/* 10.10.1.17:/etc/ceph/
scp /etc/ceph/* 10.10.1.18:/etc/ceph/
scp /etc/ceph/* 10.10.1.15:/etc/ceph/
- Create ceph-related directories on the node node and add permissions:
mkdir -p /var/lib/ceph/{bootstrap-mds,bootstrap-mgr,bootstrap-osd,bootstrap-rbd,bootstrap-rgw,mds,mgr,mon,osd}
chown -R ceph:ceph /var/lib/ceph
sudo -u ceph mkdir /var/lib/ceph/mon/ceph-k8s-ceph-node1
sudo -u ceph mkdir /var/lib/ceph/mon/ceph-k8s-ceph-node2
- Modify the configuration file on the node node, taking node1 as an example (other nodes are similar).
[global]
fsid = 30912204-0c26-413f-8e00-6d55c9c0af03 # Generated FSID
mon initial members =k8s-ceph-node0,k8s-ceph-node1,k8s-ceph-node2,k8s-ceph-node3 # Hostnames
mon host = 10.10.1.16,10.10.1.17,10.10.1.18,10.10.1.15 # Corresponding IPs
public network = 10.10.1.0/24
auth cluster required = cephx
auth service required = cephx
auth client required = cephx
osd journal size = 1024
osd pool default size = 3
osd pool default min size = 2
osd pool default pg num = 333
osd pool default pgp num = 333
osd crush chooseleaf type = 1
[mon]
mon allow pool delete = true
[mon.k8s-ceph-node1]
mon_addr = 10.10.1.17:6789
host = k8s-ceph-node1
- Obtain the keys and map from the cluster, taking node1 as an example (other nodes are similar).
ceph auth get mon. -o /tmp/monkeyring
ceph mon getmap -o /tmp/monmap
- Use the existing keys and map to add a new Monitor, specifying the hostname, taking node1 as an example (other nodes are similar).
sudo -u ceph ceph-mon --mkfs -i k8s-ceph-node1 --monmap /tmp/monmap --keyring /tmp/monkeyring
- Start the service, taking node1 as an example (other nodes are similar).
systemctl start ceph-mon@k8s-ceph-node1
systemctl enable ceph-mon@k8s-ceph-node1
# Check mon status
ceph -s
ceph mon stat
-
Add OSD#
Copy the initialized key file from the existing OSD on the master node.
scp -p /var/lib/ceph/bootstrap-osd/ceph.keyring 10.10.1.17:/var/lib/ceph/bootstrap-osd/
scp -p /var/lib/ceph/bootstrap-osd/ceph.keyring 10.10.1.18:/var/lib/ceph/bootstrap-osd/
scp -p /var/lib/ceph/bootstrap-osd/ceph.keyring 10.10.1.15:/var/lib/ceph/bootstrap-osd/
Add OSD on the node node.
ceph-volume lvm create --data /dev/sdb
systemctl enable ceph-osd@k8s-ceph-node1
# Check status
ceph -s
-
Add MDS (taking node0 as an example)#
# Create directory
sudo -u ceph mkdir -p /var/lib/ceph/mds/ceph-k8s-ceph-node0
# Create key
ceph-authtool --create-keyring /var/lib/ceph/mds/ceph-k8s-ceph-node0/keyring --gen-key -n mds.k8s-ceph-node0
# Import key and set caps
ceph auth add mds.k8s-ceph-node0 osd "allow rwx" mds "allow" mon "allow profile mds" -i /var/lib/ceph/mds/ceph-k8s-ceph-node0/keyring
# Manually start the service
ceph-mds --cluster ceph -i k8s-ceph-node0 -m k8s-ceph-node0:6789
chown -R ceph:ceph /var/lib/ceph/mds/
systemctl start ceph-mds@k8s-ceph-node0
systemctl enable ceph-mds@k8s-ceph-node0
# Check if the service has started
ps -ef|grep ceph-mds
# Check ceph cluster status
ceph -s
-
Create CephFS#
Create pools.
# Store data
ceph osd pool create cephfs_data 64
# Store metadata
ceph osd pool create cephfs_metadata 64
# Enable cephfs file system
ceph fs new cephfs cephfs_metadata cephfs_data
# View file system status
ceph fs ls
ceph mds stat
-
Create RBD Pool#
# Create RBD pool
ceph osd pool create rbd-k8s 64 64
# Enable
ceph osd pool application enable rbd-k8s rbd
# Initialize
rbd pool init rbd-k8s
# View
ceph osd lspools
2: Deploy Highly Available K8s Cluster#
Task 1: Configuration Preparation (same as Ceph cluster)#
Task 2: Install Docker#
- Configure the yum repository for Docker CE. Open the
docker-ce.repo
file and copy the following content into the file:
echo '
[docker-ce-stable]
name=Docker CE Stable - $basearch
baseurl=https://download.docker.com/linux/centos/7/$basearch/stable
enabled=1
gpgcheck=1
gpgkey=https://download.docker.com/linux/centos/gpg ' > /etc/yum.repos.d/docker-ce.repo
Save and exit the file.
- Install Docker CE. Run the following command to install Docker CE:
yum -y install docker-ce docker-ce-cli containerd.io
# Start docker and set it to start on boot
systemctl start docker
systemctl enable docker
# Check version
docker -v
docker compose version
- Modify Docker configuration, set cgroup driver to use systemd, modify the configuration as follows.
# Write the configuration to the daemon.json file
echo '{
"exec-opts": ["native.cgroupdriver=systemd"],
"data-root": "/data/docker"
} ' > /etc/docker/daemon.json
# View
cat /etc/docker/daemon.json
systemctl daemon-reload
systemctl restart docker
docker info
- Create required directories.
cd /data
mkdir cri-dockerd calico dashboard metrics-server script ingress-nginx
Task 3: Install cri-dockerd (for k8s version 1.24 and above)#
cd /data/cri-dockerd
# Download cri-dockerd installation package
wget https://github.com/Mirantis/cri-dockerd/releases/download/v0.3.4/cri-dockerd-0.3.4-3.el8.x86_64.rpm
# Install cri-dockerd
rpm -ivh cri-dockerd-0.3.4-3.el8.x86_64.rpm
docker pull registry.aliyuncs.com/google_containers/pause:3.9
# Change the image address to domestic, otherwise kubelet cannot pull the image leading to startup failure
sed -i.bak 's|ExecStart=.*$|ExecStart=/usr/bin/cri-dockerd --container-runtime-endpoint fd:// --pod-infra-container-image=registry.aliyuncs.com/google_containers/pause:3.9|g' /usr/lib/systemd/system/cri-docker.service
cat /usr/lib/systemd/system/cri-docker.service
# Start cri-dockerd
systemctl daemon-reload
systemctl start cri-docker.service
systemctl enable cri-docker.service
Task 4: Install High Availability Components#
Deploying a highly available cluster requires installing keepalived and haproxy to achieve high availability of the master nodes, to be done on each master node.
- Install keepalived and haproxy.
yum install keepalived haproxy -y
- Backup keepalived and haproxy configuration files.
cp /etc/keepalived/keepalived.conf /etc/keepalived/keepalived.conf.bak
cp /etc/haproxy/haproxy.cfg /etc/haproxy/haproxy.cfg.bak
- Modify the
/etc/keepalived/keepalived.conf
file on each master node.-
future-k8s-master0
-
echo ' global_defs { router_id k8s } vrrp_script check_haproxy { script "killall -0 haproxy" interval 3 weight -2 fall 10 rise 2 } vrrp_instance VI_1 { state MASTER # If it is the master node, it is MASTER, otherwise BACKUP interface ens192 # Network card name virtual_router_id 51 priority 250 # Priority nopreempt # Set non-preemptive mode advert_int 1 authentication { auth_type PASS auth_pass ceb1b3ec013d66163d6ab } virtual_ipaddress { 10.10.1.83/24 # Virtual IP } track_script { check_haproxy } } ' > /etc/keepalived/keepalived.conf cat /etc/keepalived/keepalived.conf
-
future-k8s-master1
-
echo ' global_defs { router_id k8s } vrrp_script check_haproxy { script "killall -0 haproxy" interval 3 weight -2 fall 10 rise 2 } vrrp_instance VI_1 { state BACKUP # If it is the master node, it is MASTER, otherwise BACKUP interface ens192 # Network card name virtual_router_id 51 priority 200 # Priority nopreempt # Set non-preemptive mode advert_int 1 authentication { auth_type PASS auth_pass ceb1b3ec013d66163d6ab } virtual_ipaddress { 10.10.1.83/24 # Virtual IP } track_script { check_haproxy } } ' > /etc/keepalived/keepalived.conf cat /etc/keepalived/keepalived.conf
-
future-k8s-master2
-
echo ' global_defs { router_id k8s } vrrp_script check_haproxy { script "killall -0 haproxy" interval 3 weight -2 fall 10 rise 2 } vrrp_instance VI_1 { state BACKUP # If it is the master node, it is MASTER, otherwise BACKUP interface ens192 # Network card name virtual_router_id 51 priority 150 # Priority nopreempt # Set non-preemptive mode advert_int 1 authentication { auth_type PASS auth_pass ceb1b3ec013d66163d6ab } virtual_ipaddress { 10.10.1.83/24 # Virtual IP } track_script { check_haproxy } } ' > /etc/keepalived/keepalived.conf cat /etc/keepalived/keepalived.conf
-
- Modify the
/etc/haproxy/haproxy.cfg
file on each master node (the configuration files for the three master nodes are the same).
echo "
#---------------------------------------------------------------------
# Global settings
#---------------------------------------------------------------------
global
# To have these messages end up in /var/log/haproxy.log you will
# need to:
# 1) configure syslog to accept network log events. This is done
# by adding the '-r' option to the SYSLOGD_OPTIONS in
# /etc/sysconfig/syslog
# 2) configure local2 events to go to the /var/log/haproxy.log
# file. A line like the following can be added to
# /etc/sysconfig/syslog
#
# local2.* /var/log/haproxy.log
#
log 127.0.0.1 local2
chroot /var/lib/haproxy
pidfile /var/run/haproxy.pid
maxconn 4000
user haproxy
group haproxy
daemon
# Turn on stats unix socket
stats socket /var/lib/haproxy/stats
#---------------------------------------------------------------------
# Common defaults that all the 'listen' and 'backend' sections will
# use if not designated in their block
#---------------------------------------------------------------------
defaults
mode http
log global
option httplog
option dontlognull
option http-server-close
option forwardfor except 127.0.0.0/8
option redispatch
retries 3
timeout http-request 10s
timeout queue 1m
timeout connect 10s
timeout client 1m
timeout server 1m
timeout http-keep-alive 10s
timeout check 10s
maxconn 3000
#---------------------------------------------------------------------
# Kubernetes apiserver frontend which proxys to the backends
#---------------------------------------------------------------------
frontend kubernetes-apiserver
mode tcp
bind *:16443 # High availability monitoring port, used when initializing k8s cluster
option tcplog
default_backend kubernetes-apiserver
#---------------------------------------------------------------------
# Round robin balancing between the various backends
#---------------------------------------------------------------------
backend kubernetes-apiserver
mode tcp
balance roundrobin
server future-k8s-master0 10.10.1.80:6443 check
server future-k8s-master1 10.10.1.81:6443 check
server future-k8s-master2 10.10.1.82:6443 check
#---------------------------------------------------------------------
# Collection haproxy statistics message
#---------------------------------------------------------------------
listen stats
bind *:1080
stats auth admin:awesomePassword
stats refresh 5s
stats realm HAProxy\ Statistics
stats uri /admin?stats
" > /etc/haproxy/haproxy.cfg
cat /etc/haproxy/haproxy.cfg
- Start (start each master node in order).
# Start keepalived
systemctl enable keepalived && systemctl start keepalived
# Start haproxy
systemctl enable haproxy && systemctl start haproxy
systemctl status keepalived
systemctl status haproxy
- On future-k8s-master0, check the bound VIP address.
ip add 1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000 link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00 inet 127.0.0.1/8 scope host lo valid_lft forever preferred_lft forever inet6 ::1/128 scope host valid_lft forever preferred_lft forever 2: ens192: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq state UP group default qlen 1000 link/ether 00:50:56:9a:eb:48 brd ff:ff:ff:ff:ff inet 10.10.1.80/24 brd 10.10.3.255 scope global noprefixroute ens192 valid_lft forever preferred_lft forever inet 10.10.1.83/24 scope global ens192 valid_lft forever preferred_lft forever inet6 fe80::250:56ff:fe9a/64 scope link noprefixroute valid_lft forever preferred_lft forever
Task 5: Deploy K8s Cluster#
-
Add Yum Software Source#
cat > /etc/yum.repos.d/kubernetes.repo << EOF
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=0
repo_gpgcheck=0
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF
-
Install kubeadm, kubelet, and kubectl#
# Install kubelet, kubeadm, kubectl
yum install -y kubelet-1.28.0 kubeadm-1.28.0 kubectl-1.28.0 --disableexcludes=kubernetes
# Change cgroup to systemd
echo 'KUBELET_EXTRA_ARGS="--cgroup-driver=systemd"' > /etc/sysconfig/kubelet
# View
cat /etc/sysconfig/kubelet
# Set to start on boot
systemctl start kubelet.service
systemctl enable kubelet.service
# Check version
kubeadm version
kubelet --version
kubectl version
-
Initialize K8s Cluster (on future-k8s-master0 node)#
Method 1: Initialize using configuration file#
- Export the default configuration file (optional)
kubeadm config print init-defaults > kubeadm-config.yaml
- Configuration file
echo ' apiVersion: kubeadm.k8s.io/v1beta3 kind: InitConfiguration localAPIEndpoint: advertiseAddress: 10.10.1.83 # Virtual IP bindPort: 6443 nodeRegistration: criSocket: unix:///var/run/cri-dockerd.sock --- apiServer: certSANs: # Master node and corresponding hostname - future-k8s-master0 - future-k8s-master1 - future-k8s-master2 - future-k8s-vip - 10.10.1.80 - 10.10.1.81 - 10.10.1.82 - 10.10.1.83 - 127.0.0.1 timeoutForControlPlane: 4m0s apiVersion: kubeadm.k8s.io/v1beta3 certificatesDir: /etc/kubernetes/pki clusterName: kubernetes controlPlaneEndpoint: "future-k8s-vip:16443" # Virtual IP and port number for high availability configuration controllerManager: {} dns: {} etcd: local: dataDir: /var/lib/etcd imageRepository: registry.aliyuncs.com/google_containers kind: ClusterConfiguration kubernetesVersion: 1.28.0 networking: dnsDomain: cluster.local podSubnet: 10.244.0.0/16 serviceSubnet: 10.96.0.0/12 scheduler: {} ' > /data/script/kubeadm-config.yaml cat /data/script/kubeadm-config.yaml
- Cluster initialization
kubeadm init --config kubeadm-config.yaml --upload-certs mkdir -p $HOME/.kube sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config sudo chown $(id -u):$(id -g) $HOME/.kube/config
Method 2: Initialize using command#
- Deploy the master node, execute on 10.10.1.80 to initialize the master node.
kubeadm init \ --apiserver-advertise-address=10.10.1.80 \ --image-repository registry.aliyuncs.com/google_containers \ --kubernetes-version v1.28.0 \ --control-plane-endpoint=future-k8s-vip:16443 \ # Virtual IP (not determined) --control-plane-endpoint=future-k8s-vip \ # Virtual IP (not determined) --service-cidr=10.96.0.0/12 \ --pod-network-cidr=10.244.0.0/16 \ --cri-socket=unix:///var/run/cri-dockerd.sock \ --ignore-preflight-errors=all mkdir -p $HOME/.kube sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config sudo chown $(id -u):$(id -g) $HOME/.kube/config
- Configure SSH passwordless login.
# Generate public key on 10.10.1.80, copy to other master nodes ssh-keygen -t rsa ssh-copy-id 10.10.1.81 ssh-copy-id 10.10.1.82
- Copy the certificates from 10.10.1.80 to other master nodes.
# Create certificate storage directory on other master nodes cd /root && mkdir -p /etc/kubernetes/pki/etcd &&mkdir -p ~/.kube/ # Copy certificates from future-k8s-master0 to future-k8s-master1 scp /etc/kubernetes/pki/ca.crt 10.10.1.81:/etc/kubernetes/pki/ scp /etc/kubernetes/pki/ca.key 10.10.1.81:/etc/kubernetes/pki/ scp /etc/kubernetes/pki/sa.key 10.10.1.81:/etc/kubernetes/pki/ scp /etc/kubernetes/pki/sa.pub 10.10.1.81:/etc/kubernetes/pki/ scp /etc/kubernetes/pki/front-proxy-ca.crt 10.10.1.81:/etc/kubernetes/pki/ scp /etc/kubernetes/pki/front-proxy-ca.key 10.10.1.81:/etc/kubernetes/pki/ scp /etc/kubernetes/pki/etcd/ca.crt 10.10.1.81:/etc/kubernetes/pki/etcd/ scp /etc/kubernetes/pki/etcd/ca.key 10.10.1.81:/etc/kubernetes/pki/etcd/ # Copy certificates from future-k8s-master0 to future-k8s-master2 scp /etc/kubernetes/pki/ca.crt 10.10.1.82:/etc/kubernetes/pki/ scp /etc/kubernetes/pki/ca.key 10.10.1.82:/etc/kubernetes/pki/ scp /etc/kubernetes/pki/sa.key 10.10.1.82:/etc/kubernetes/pki/ scp /etc/kubernetes/pki/sa.pub 10.10.1.82:/etc/kubernetes/pki/ scp /etc/kubernetes/pki/front-proxy-ca.crt 10.10.1.82:/etc/kubernetes/pki/ scp /etc/kubernetes/pki/front-proxy-ca.key 10.10.1.82:/etc/kubernetes/pki/ scp /etc/kubernetes/pki/etcd/ca.crt 10.10.1.82:/etc/kubernetes/pki/etcd/ scp /etc/kubernetes/pki/etcd/ca.key 10.10.1.82:/etc/kubernetes/pki/etcd/
-
Initialize Other Master Nodes#
kubeadm join future-k8s-vip:16443 --token yjphdh.guefcomqw3am4ask \
--discovery-token-ca-cert-hash sha256:ed44c7deada0ea0fe5a54212ab4e5aa6fc34672ffe2a2c87a31ba73306e75c21 \
--control-plane --certificate-key 4929b83577eafcd5933fc0b6506cb6d82e7bc481751e442888c4c2b32b5d0c9c --cri-socket=unix:///var/run/cri-dockerd.sock
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
-
Initialize Node Nodes#
kubeadm join future-k8s-vip:16443 --token yjphdh.guefcomqw3am4ask \
--discovery-token-ca-cert-hash sha256:ed44c7deada0ea0fe5a54212ab4e5aa6fc34672ffe2a2c87a31ba73306e75c21 --cri-socket=unix:///var/run/cri-dockerd.sock
-
Set Master Nodes to Allow Scheduling PODs (optional)#
By default, Kubernetes will not schedule Pods on Master nodes. If you want to use k8s-master as a Node, you need to remove the taint to enable scheduling.
# View default taint configuration
kubectl describe node future-k8s-master2 |grep Taints
Taints: node-role.kubernetes.io/control-plane
# Remove taint
kubectl taint nodes future-k8s-master2 node-role.kubernetes.io/control-plane-
Add worker label.
# Add worker label
kubectl label nodes future-k8s-master2 node-role.kubernetes.io/worker=
# Remove worker label
kubectl label nodes future-k8s-master2 node-role.kubernetes.io/worker-
Task 6: Install Network Plugin (Master)#
Install Calico.
mkdir /data/calico
wget https://docs.tigera.io/archive/v3.25/manifests/calico.yaml
# Modify calico.yaml to find CALICO_IPV4POOL_CIDR
vi calico.yaml
############## Modify Content ###################
value: "10.244.0.0/16"
############## Modify Content ###################
# Install calico on master node
kubectl apply -f calico.yaml
Check node status.
# View all nodes
kubectl get nodes
kubectl get nodes -o wide
# Check cluster health
kubectl get cs
Task 7: Install Nginx for Testing#
# Create Nginx deployment
kubectl create deployment nginx --image=nginx
# Expose port 80
kubectl expose deployment nginx --port=80 --type=NodePort
# Check pod status
kubectl get pod
# Check service status
kubectl get service
##########################################################################
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
kubernetes ClusterIP 10.96.0.1 <none> 443/TCP 5d1h
nginx NodePort 10.98.221.224 <none> 80:32743/TCP 23s
##########################################################################
# Access the web page for testing (port number based on service status)
http://10.10.1.80:32743/
Task 8: Install Dashboard Interface#
- Download the yaml file.
# Create storage directory
mkdir dashboard
cd dashboard/
# 2.7
wget https://raw.githubusercontent.com/kubernetes/dashboard/v2.7.0/aio/deploy/recommended.yaml
- Modify the yaml file.
vi recommended.yaml
# Set replicas to 2
################# Modify Content #######################
kind: Service
apiVersion: v1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kubernetes-dashboard
spec:
ports:
- port: 443
targetPort: 8443
nodePort: 32009 # Add this line, pay attention to indentation
selector:
k8s-app: kubernetes-dashboard
type: NodePort # Add this line, pay attention to indentation
################# Modify Content #######################
- Apply installation, check pod and svc.
# Install
kubectl apply -f recommended.yaml
# Check pod and svc
kubectl get pod,svc -o wide -n kubernetes-dashboard
#########################################################
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
pod/dashboard-metrics-scraper-5cb4f4bb9c-mg569 0/1 ContainerCreating 0 9s <none> node1 <none> <none>
pod/kubernetes-dashboard-6967859bff-2968p 0/1 ContainerCreating 0 9s <none> node1 <none> <none>
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE SELECTOR
service/dashboard-metrics-scraper ClusterIP 10.100.129.191 <none> 8000/TCP 9s k8s-app=dashboard-metrics-scraper
service/kubernetes-dashboard NodePort 10.106.130.53 <none> 443:31283/TCP 9s k8s-app=kubernetes-dashboard
########################################################
Use the svc you checked to access the Dashboard.
- Create a dashboard service account.
# Create a service account named admin-user and bind it to the cluster
vi dashboard-adminuser.yaml
################## Content ####################
apiVersion: v1
kind: ServiceAccount
metadata:
name: admin-user
namespace: kubernetes-dashboard
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: admin-user
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cluster-admin
subjects:
- kind: ServiceAccount
name: admin-user
namespace: kubernetes-dashboard
---
# Create a secret to obtain the long-term holder token for the service account
apiVersion: v1
kind: Secret
metadata:
name: admin-user
namespace: kubernetes-dashboard
annotations:
kubernetes.io/service-account.name: "admin-user"
type: kubernetes.io/service-account-token
################## Content ####################
# Execute to take effect
kubectl apply -f dashboard-adminuser.yaml
- Login methods.
Option 1: Obtain a long-term available token.
# Save it in the /data/dashboard/ admin-user.token file
cd /data/dashboard/
kubectl get secret admin-user -n kubernetes-dashboard -o jsonpath={".data.token"} | base64 -d > admin-user.token
Obtain long-term available token script.
#!/bin/bash
# Author: Yun
############# Description #############
:<<!
Obtain long-term available token script
Store the token in the admin-user.token file
!
############# Description #############
kubectl get secret admin-user -n kubernetes-dashboard -o jsonpath={".data.token"} | base64 -d > admin-user.token
echo -e "\033[1;32mToken created successfully, please check in the admin-user.token file\033[m"
Option 2: Use the Kubeconfig file to log in.
# Define token variable
DASH_TOCKEN=$(kubectl get secret admin-user -n kubernetes-dashboard -o jsonpath={".data.token"} | base64 -d)
# Set kubeconfig cluster entry
kubectl config set-cluster kubernetes --server=10.10.1.80:6433 --kubeconfig=/root/dashbord-admin.conf
# Set kubeconfig user entry
kubectl config set-credentials admin-user --token=$DASH_TOCKEN --kubeconfig=/root/dashbord-admin.conf
# Set kubeconfig context entry
kubectl config set-context admin-user@kubernetes --cluster=kubernetes --user=admin-user --kubeconfig=/root/dashbord-admin.conf
# Set kubeconfig current context
kubectl config use-context admin-user@kubernetes --kubeconfig=/root/dashbord-admin.conf
Place the generated dashbord-admin.conf file on the local host, and select the Kubeconfig
option when logging in, choosing the kubeconfig file to log in.
Task 9: Install Metrics Server#
Download deployment file.
wget https://github.com/kubernetes-sigs/metrics-server/releases/latest/download/components.yaml -O metrics-server-components.yaml
Modify the Deployment content in the yaml file.
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
k8s-app: metrics-server
name: metrics-server
namespace: kube-system
spec:
selector:
matchLabels:
k8s-app: metrics-server
strategy:
rollingUpdate:
maxUnavailable: 0
template:
metadata:
labels:
k8s-app: metrics-server
spec:
containers:
- args:
- --cert-dir=/tmp
- --secure-port=4443
- --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname
- --kubelet-use-node-status-port
- --metric-resolution=15s
- --kubelet-insecure-tls # Add
image: registry.cn-hangzhou.aliyuncs.com/google_containers/metrics-server:v0.6.4 # Modify
imagePullPolicy: IfNotPresent
# Install
kubectl apply -f metrics-server-components.yaml
Check the pod status of the metrics server.
kubectl get pods --all-namespaces | grep metrics
Wait for a while and check if various monitoring images are displayed successfully.
Task 10: Automatic Completion of kubectl Commands#
yum -y install bash-completion
source /usr/share/bash-completion/bash_completion
echo 'source <(kubectl completion bash)' >> ~/.bashrc
bash
Task 11: Install Ingress-Nginx Controller#
# Download yaml file
wget https://raw.githubusercontent.com/kubernetes/ingress-nginx/controller-v1.0.0/deploy/static/provider/baremetal/deploy.yaml
# Modify the image address in the yaml file
##################### Modify Content ######################
willdockerhub/ingress-nginx-controller:v1.0.0
hzde0128/kube-webhook-certgen:v1.0
##################### Modify Content ######################
# Change Deployment to DaemonSet
# Change network mode to host network
##################### Modify Content ######################
template:
spec:
hostNetwork: true
dnsPolicy: ClusterFirstWithHostNet
tolerations: # Use affinity configuration to deploy on all nodes
- key: node-role.kubernetes.io/control-plane
operator: Exists
effect: NoSchedule
nodeSelector:
kubernetes.io/os: linux
custem/ingress-controller-ready: 'true'
containers:
- name: controller
##################### Modify Content ######################
# Set labels for worker nodes (required)
kubectl label nodes future-k8s-master0 custem/ingress-controller-ready=true
kubectl label nodes future-k8s-master1 custem/ingress-controller-ready=true
kubectl label nodes future-k8s-master2 custem/ingress-controller-ready=true
kubectl label nodes future-k8s-node3 custem/ingress-controller-ready=true
# Install
kubectl apply -f deploy.yaml
# Check status
kubectl get pods -n ingress-nginx
################ Status ##################
NAME READY STATUS RESTARTS AGE
ingress-nginx-admission-create-2lz4v 0/1 Completed 0 5m46s
ingress-nginx-admission-patch-c6896 0/1 Completed 0 5m46s
ingress-nginx-controller-7575fb546-q29qn 1/1 Running 0 5m46s
Task 12: Configure Dashboard Proxy#
echo '
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: k8s-dashboard
namespace: kubernetes-dashboard
labels:
ingress: k8s-dashboard
annotations:
nginx.ingress.kubernetes.io/rewrite-target: / # Rewrite path
nginx.ingress.kubernetes.io/force-ssl-redirect: "true" # Automatically redirect http to https
nginx.ingress.kubernetes.io/use-regex: "true"
nginx.ingress.kubernetes.io/backend-protocol: "HTTPS"
spec:
ingressClassName: nginx
rules:
- host: k8s.yjs.51xueweb.cn
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: kubernetes-dashboard
port:
number: 443
' > /data/dashboard/dashboard-ingress.yaml
3: Integrate K8s Cluster with Ceph Cluster#
Task 1: Install Ceph Client (ceph-common)#
Install ceph-common on each node of the k8s cluster.
yum install ceph-common -y
Task 2: Synchronize Ceph Cluster Configuration Files#
Synchronize the /etc/ceph/{ceph.conf,ceph.client.admin.keyring} files from the Ceph cluster to all nodes in k8s.
# Configure SSH passwordless login
ssh-keygen -t rsa
ssh-copy-id 10.10.1.80
ssh-copy-id 10.10.1.81
ssh-copy-id 10.10.1.82
# Copy files
scp -r /etc/ceph/{ceph.conf,ceph.client.admin.keyring} 10.10.1.80:/etc/ceph
scp -r /etc/ceph/{ceph.conf,ceph.client.admin.keyring} 10.10.1.81:/etc/ceph
scp -r /etc/ceph/{ceph.conf,ceph.client.admin.keyring} 10.10.1.82:/etc/ceph
Task 3: Deploy Ceph-CSI (using RBD)#
- Download the ceph-csi component (on one of the master nodes in k8s).
# Download file
wget https://github.com/ceph/ceph-csi/archive/refs/tags/v3.9.0.tar.gz
# Unzip
mv v3.9.0.tar.gz ceph-csi-v3.9.0.tar.gz
tar -xzf ceph-csi-v3.9.0.tar.gz
# Enter directory
cd ceph-csi-3.9.0/deploy/rbd/kubernetes
mkdir /data/cephfs/csi
# Copy into csi, a total of 6 files
cp * /data/cephfs/csi
- Pull the images required by the csi component.
# View required images
grep image csi-rbdplugin-provisioner.yaml
grep image csi-rbdplugin.yaml
Pull the required images on all k8s nodes.
cd /data/script
./pull-images.sh registry.k8s.io/sig-storage/csi-provisioner:v3.5.0
./pull-images.sh registry.k8s.io/sig-storage/csi-resizer:v1.8.0
./pull-images.sh registry.k8s.io/sig-storage/csi-snapshotter:v6.2.2
docker pull quay.io/cephcsi/cephcsi:v3.9.0
./pull-images.sh registry.k8s.io/sig-storage/csi-attacher:v4.3.0
./pull-images.sh registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.8.0
- Create a namespace
cephfs
.
echo '
apiVersion: v1
kind: Namespace
metadata:
labels:
kubernetes.io/metadata.name: cephfs
name: cephfs
' > ceph-namespace.yaml
# Execute
kubectl apply -f ceph-namespace.yaml
- Create a secret file to connect to the ceph cluster, csi-rbd-secret.yaml.
echo '
apiVersion: v1
kind: Secret
metadata:
name: csi-rbd-secret
namespace: cephfs
stringData:
adminID: admin
adminKey: AQANDD9lfWg2LBAAHY0mprdbuKFBPJDkE7/I5Q==
userID: admin
userKey: AQANDD9lfWg2LBAAHY0mprdbuKFBPJDkE7/I5Q==
' > csi-rbd-secret.yaml
# Execute
kubectl apply -f csi-rbd-secret.yaml
- Create ceph-config-map.yaml.
echo '
apiVersion: v1
kind: ConfigMap
data:
ceph.conf: |
[global]
fsid = 30912204-0c26-413f-8e00-6d55c9c0af03 # Generated FSID
mon initial members =k8s-ceph-node0,k8s-ceph-node1,k8s-ceph-node2 # Hostnames
mon host = 10.10.1.16,10.10.1.17,10.10.1.18 # Corresponding IPs
public network = 10.10.1.0/24
auth cluster required = cephx
auth service required = cephx
auth client required = cephx
osd journal size = 1024
osd pool default size = 3
osd pool default min size = 2
osd pool default pg num = 333
osd pool default pgp num = 333
osd crush chooseleaf type = 1
[mon]
mon allow pool delete = true
[mds.k8s-ceph-node0]
host = k8s-ceph-node0
keyring: |
metadata:
name: ceph-config
namespace: cephfs
' > ceph-config-map.yaml
# Execute
kubectl apply -f ceph-config-map.yaml
- Modify csi-config-map.yaml to configure the information to connect to the ceph cluster.
echo '
apiVersion: v1
kind: ConfigMap
metadata:
name: ceph-csi-config
namespace: cephfs
labels:
addonmanager.kubernetes.io/mode: Reconcile
data:
config.json: |-
[{"clusterID":"30912204-0c26-413f-8e00-6d55c9c0af03","monitors":["10.10.1.16:6789","10.10.1.17:6789","10.10.1.18:6789"]}]
' > csi-config-map.yaml
-
Modify the csi component configuration files.
-
Copy all yaml files in the
/data/cephfs/csi
directory and change the namespace fromdefault
tocephfs
. -
cd /data/cephfs/csi sed -i "s/namespace: default/namespace: cephfs/g" $(grep -rl "namespace: default" ./) sed -i -e "/^kind: ServiceAccount/{N;N;a\ namespace: cephfs}" $(egrep -rl "^kind: ServiceAccount" ./)
-
Comment out the kms part in
csi-rbdplugin-provisioner.yaml
andcsi-rbdplugin.yaml
. -
# - name: KMS_CONFIGMAP_NAME
# value: encryptionConfig
#- name: ceph-csi-encryption-kms-config
# configMap:
# name: ceph-csi-encryption-kms-config
-
# Execute to install csi components
kubectl apply -f csi-config-map.yaml
kubectl apply -f csi-nodeplugin-rbac.yaml
kubectl apply -f csidriver.yaml
kubectl apply -f csi-provisioner-rbac.yaml
kubectl apply -f csi-rbdplugin-provisioner.yaml
kubectl apply -f csi-rbdplugin.yaml
Task 4: Create StorageClass#
echo '
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
annotations:
k8s.kuboard.cn/storageType: cephfs_provisioner
name: csi-rbd-sc
provisioner: rbd.csi.ceph.com
parameters:
# fsName: cephfs (Used for cephfs)
clusterID: 30912204-0c26-413f-8e00-6d55c9c0af03
pool: rbd-k8s
imageFeatures: layering
csi.storage.k8s.io/provisioner-secret-name: csi-rbd-secret
csi.storage.k8s.io/provisioner-secret-namespace: cephfs
csi.storage.k8s.io/controller-expand-secret-name: csi-rbd-secret
csi.storage.k8s.io/controller-expand-secret-namespace: cephfs
csi.storage.k8s.io/node-stage-secret-name: csi-rbd-secret
csi.storage.k8s.io/node-stage-secret-namespace: cephfs
csi.storage.k8s.io/fstype: xfs
reclaimPolicy: Delete
volumeBindingMode: Immediate
allowVolumeExpansion: true
mountOptions:
- discard
' > storageclass.yaml
# Execute
kubectl apply -f storageclass.yaml
Task 5: Create PVC#
echo '
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: rbd-pvc
namespace: cephfs
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 1Gi
storageClassName: csi-rbd-sc
' > pvc.yaml
# Execute
kubectl apply -f pvc.yaml
# Check if PVC is created successfully
kubectl get pvc -n cephfs
# Check if PV is created successfully
kubectl get pv -n cephfs
# Check if an image has been created in the cephfs_data storage pool of the ceph cluster
rbd ls -p rbd-k8s
Task 6: Create Pod for Testing and Verification#
echo '
apiVersion: v1
kind: Pod
metadata:
name: csi-rbd-demo-pod
namespace: cephfs
spec:
containers:
- name: web-server
image: nginx:latest
volumeMounts:
- name: mypvc
mountPath: /var/lib/www/html
volumes:
- name: mypvc
persistentVolumeClaim:
claimName: rbd-pvc
readOnly: false
' > pod.yaml
# Execute
kubectl apply -f pod.yaml
# Enter the container to check the mount information
kubectl exec -it csi-rbd-demo-pod -n cephfs -- bash
lsblk -l|grep rbd