天天看点

二进制方式部署K8S-v1.23.6--(上)

1、环境准备
IP地址 主机 角色 系统
192.168.100.101 master-101 K8S集群主节点 Ubuntu2004
192.168.100.102 master-102 K8S集群主节点 Ubuntu2004
192.168.100.103 master-103 K8S集群主节点 Ubuntu2004
192.168.100.104 node-104 K8S集群从节点 Ubuntu2004
192.168.100.105 node-105 K8S集群从节点 Ubuntu2004
192.168.100.111 wang.cluster.k8s VIP

1-1、关闭防火墙

~]#ufw disable
~]#ufw status      

1-2、时间同步

~]#apt install -y chrony
~]#systemctl restart chrony
~]#systemctl status chrony
~]#chronyc sources      

1-3、主机名互相解析

~]#vim /etc/hosts
192.168.100.101 master-101
192.168.100.102 master-102
192.168.100.103 master-103
192.168.100.104 node-104
192.168.100.105 node-105
192.168.100.111 wang.cluster.k8s      

1-4、禁用swap

~]#swapoff -a && sed -i 's|^/swap.img|#/swap.ing|g' /etc/fstab      

1-5、验证每个节点上IP、MAC 地址和 product_uuid 的唯一性

~]#ifconfig -a

~]#sudo cat /sys/class/dmi/id/product_uuid       # 查看 product_uuid      

1-6、系统内核参数调整

#如果已经调整,请忽略:
~]#echo "vm.swappiness = 0" >> /etc/sysctl.conf 
~]#echo "net.ipv4.ip_forward = 1" >> /etc/sysctl.conf
~]#echo "net.bridge.bridge-nf-call-iptables = 1" >> /etc/sysctl.conf
~]#echo "net.bridge.bridge-nf-call-ip6tables = 1" >> /etc/sysctl.conf      

1-7、安装ipvs模块

~]#apt -y install ipvsadm ipset sysstat conntrack

# 锁定版本
~]#apt-mark hold ipvsadm      
#将模块加载到内核中(开机自动设置-需要重启机器生效)

~]#tee /etc/modules-load.d/k8s.conf <<'EOF'
br_netfilter
overlay
nf_conntrack
ip_vs
ip_vs_lc
ip_vs_lblc
ip_vs_lblcr
ip_vs_rr
ip_vs_wrr
ip_vs_sh
ip_vs_dh
ip_vs_fo
ip_vs_nq
ip_vs_sed
ip_vs_ftp
ip_tables
ip_set
ipt_set
ipt_rpfilter
ipt_REJECT
ipip
xt_set
EOF      
#加载模块到内核中
~]#mkdir -pv /etc/modules.d
~]#tee /etc/modules.d/k8s.modules <<EOF
#!/bin/bash
modprobe -- br_netfilter
modprobe -- overlay
modprobe -- nf_conntrack
modprobe -- ip_vs
modprobe -- ip_vs_lc
modprobe -- ip_vs_lblc
modprobe -- ip_vs_lblcr
modprobe -- ip_vs_rr
modprobe -- ip_vs_wrr
modprobe -- ip_vs_sh
modprobe -- ip_vs_dh
modprobe -- ip_vs_fo
modprobe -- ip_vs_nq
modprobe -- ip_vs_sed
modprobe -- ip_vs_ftp
modprobe -- ip_tables
modprobe -- ip_set
modprobe -- ipt_set
modprobe -- ipt_rpfilter
modprobe -- ipt_REJECT
modprobe -- ipip
modprobe -- xt_set
EOF

~]#chmod 755 /etc/modules.d/k8s.modules && bash /etc/modules.d/k8s.modules && lsmod | grep -e ip_vs -e nf_conntrack

~]#sysctl --system

温馨提示: 在 kernel 4.19 版本及以上将使用 nf_conntrack 模块, 则在 4.18 版本以下则需使用nf_conntrack_ipv4 模块      
2、安装Haproxy、Keepalived

描述: 由于是测试学习环境, 所以直接采用master节点机器,如果是正式环境建议独立出来。

2-1、下载安装

#所有master节点执行:
#安装下载 haproxy (HA代理健康检测) 与 keepalived (虚拟路由协议-主从)

[root@master-101 ~]#apt-cache madison haproxy keepalived
[root@master-101 ~]#apt install -y haproxy=2.0.13-2 keepalived
[root@master-101 ~]#apt-mark hold haproxy keepalived      
#所有master节点执行:
#配置HAproxy
[root@master-101 ~]#tee /etc/haproxy/haproxy.cfg<<'EOF'
global
  user haproxy
  group haproxy
  maxconn 2000
  daemon
  log /dev/log local0
  log /dev/log local1 err
  chroot /var/lib/haproxy
  stats socket /run/haproxy/admin.sock mode 660 level admin expose-fd listeners
  stats timeout 30s
  # Default SSL material locations

  ca-base /etc/ssl/certs
  crt-base /etc/ssl/private
  # See: https://ssl-config.mozilla.org/#server=haproxy&server-version=2.0.3&config=intermediate

  ssl-default-bind-ciphers ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:DHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384
  ssl-default-bind-ciphersuites TLS_AES_128_GCM_SHA256:TLS_AES_256_GCM_SHA384:TLS_CHACHA20_POLY1305_SHA256
  ssl-default-bind-options ssl-min-ver TLSv1.2 no-tls-tickets

defaults
  log     global
  mode    http
  option  httplog
  option  dontlognull
  timeout connect 5000
  timeout client  50000
  timeout server  50000
  timeout http-request 15s
  timeout http-keep-alive 15s

# 注意: 管理HAproxy (可选)
# frontend monitor-in
#   bind *:33305
#   mode http
#   option httplog
#   monitor-uri /monitor

# 注意: 基于四层代理, 1644 3为VIP的 ApiServer 控制平面端口, 由于是与master节点在一起所以不能使用6443端口.
frontend k8s-master
  bind 0.0.0.0:16443
  bind 127.0.0.1:16443
  mode tcp
  option tcplog
  tcp-request inspect-delay 5s
  default_backend k8s-master
# 注意: Master 节点的默认 Apiserver 是6443端口
backend k8s-master
  mode tcp
  option tcplog
  option tcp-check
  balance roundrobin
  default-server inter 10s downinter 5s rise 2 fall 2 slowstart 60s maxconn 250 maxqueue 256 weight 100
  server master-101 192.168.100.101:6443 check
  server master-102 192.168.100.102:6443 check
  server master-103 192.168.100.103:6443 check
EOF      
#所有master节点执行:
#配置Keepalived
[root@master-101 ~]#mkdir /etc/keepalived
[root@master-101 ~]#tee /etc/keepalived/keepalived.conf <<'EOF'
! Configuration File for keepalived
global_defs {
  router_id LVS_DEVEL
script_user root
  enable_script_security
}
vrrp_script chk_apiserver {
  script "/etc/keepalived/check_apiserver.sh"
  interval 5
  weight -5
  fall 2
  rise 1
}
vrrp_instance VI_1 {
  state MASTER                  #另外两个节点是backup
  interface enp1s0              #注意网口名称
  mcast_src_ip 224.8.8.8        #各节点都一样
  virtual_router_id 51
  priority 101
  advert_int 2
  authentication {
    auth_type PASS
    auth_pass 123456
  }
  virtual_ipaddress {
    192.168.100.111             #各节点vip都一样
  }
}
EOF      
#所有master节点执行:
# KeepAlived 健康检查脚本
[root@master-101 ~]#tee /etc/keepalived/check_apiserver.sh <<'EOF'
#!/bin/bash
err=0
for k in $(seq 1 3)
do
  check_code=$(pgrep haproxy)
  if [[ $check_code == "" ]]; then
    err=$(expr $err + 1)
    sleep 1
    continue
  else
    err=0
    break
  fi
done
if [[ $err != "0" ]]; then
  echo "systemctl stop keepalived"
  /usr/bin/systemctl stop keepalived
  exit 1
else
  exit 0
fi
EOF

[root@master-101 ~]#chmod +x /etc/keepalived/check_apiserver.sh
[root@master-101 ~]#systemctl daemon-reload && systemctl enable --now haproxy keepalived
[root@master-101 ~]#systemctl status haproxy keepalived
#测试一下vip是否能漂移,此处略      
3、部署etcd

3-1、签发etcd证书

#master-101执行:
#创建一个配置与相关文件存放的目录, 以及下载获取cfssl工具进行CA证书制作与签发
[root@master-101 ~]#mkdir -pv /app/k8s-init
[root@master-101 ~]#cd /app/k8s-init
#windows下载cfssl相关工具,下载地址:
https://github.com/cloudflare/cfssl/releases/download/v1.6.1/cfssl_1.6.1_linux_amd64 -o /usr/local/bin/cfssl

https://github.com/cloudflare/cfssl/releases/download/v1.6.1/cfssljson_1.6.1_linux_amd64 -o /usr/local/bin/cfssljson

https://github.com/cloudflare/cfssl/releases/download/v1.6.1/cfssl-certinfo_1.6.1_linux_amd64 -o /usr/local/bin/cfssl-certinfo

#把下载的文件上传到该目录:
[root@master-101 k8s-init]#cp cfssl_1.6.1_linux_amd64 /usr/local/bin/cfssl
[root@master-101 k8s-init]#cp cfssljson_1.6.1_linux_amd64 /usr/local/bin/cfssljson
[root@master-101 k8s-init]#cp cfssl-certinfo_1.6.1_linux_amd64 /usr/local/bin/cfssl-certinfo
[root@master-101 k8s-init]#chmod +x /usr/local/bin/cfssl*
[root@master-101 k8s-init]#cfssl version
温馨提示:
  cfssl : CFSSL 命令行工具
  cfssljson : 用于从cfssl程序中获取JSON输出并将证书、密钥、证书签名请求文件CSR和Bundle写入到文件中
  cfssl-certinfo: 可以查看证书信息      
#master-101执行:
#创建ca证书:
[root@master-101 k8s-init]#cfssl print-defaults csr > ca-csr.json

[root@master-101 k8s-init]#tee ca-csr.json <<'EOF'
{
  "CN": "kubernetes",
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "L": "beijing",
      "ST": "beijing",
      "O": "k8s",
      "OU": "System"
    }
  ],
  "ca": {
    "expiry": "87600h"
  }
}
EOF

#关键参数解析:
CN: Common Name,浏览器使用该字段验证网站是否合法,一般写的是域名,非常重要。浏览器使用该字段验证网站是否合法
key:生成证书的算法
hosts:表示哪些主机名(域名)或者IP可以使用此csr申请的证书,为空或者""表示所有的都可以使用(本例中没有`"hosts": [""]`字段)
names:常见属性设置
  * C: Country, 国家
  * ST: State,州或者是省份
  * L: Locality Name,地区,城市
  * O: Organization Name,组织名称,公司名称(在k8s中常用于指定Group,进行RBAC绑定)
  * OU: Organization Unit Name,组织单位名称,公司部门
  
温馨提示: 如果将 expiry 设置为87600h 表示证书过期时间为十年。
==============================================================================================
#CA 证书策略配置文件
[root@master-101 k8s-init]#cfssl print-defaults config > ca-config.json
[root@master-101 k8s-init]#tee ca-config.json <<'EOF'
{
  "signing": {
    "default": {
      "expiry": "87600h"
    },
    "profiles": {
      "kubernetes": {
        "expiry": "87600h",
        "usages": [
            "signing",
            "key encipherment",
            "server auth",
            "client auth"
        ]
      },
      "etcd": {
        "expiry": "87600h",
        "usages": [
            "signing",
            "key encipherment",
            "server auth",
            "client auth"
        ]
      }
    }
  }
}
EOF
# 关键参数解析:
default 默认策略,指定了证书的默认有效期是10年(87600h)
profile 自定义策略配置
  * kubernetes:表示该配置(profile)的用途是为kubernetes生成证书及相关的校验工作
  * signing:表示该证书可用于签名其它证书;生成的 ca.pem 证书中 CA=TRUE
  * server auth:表示可以该CA 对 server 提供的证书进行验证
  * client auth:表示可以用该 CA 对 client 提供的证书进行验证
  * expiry:也表示过期时间,如果不写以default中的为准
  
======================================================================================== 
# 利用CA证书签名请求配置文件 ca-csr.json 生成CA证书和CA私钥和CSR(证书签名请求):
[root@master-101 k8s-init]#cfssl gencert -initca ca-csr.json | cfssljson -bare ca

#查看证书信息:
[root@master-101 k8s-init]#openssl x509 -in ca.pem -text -noout | grep "Not"      
#master-101执行:
#配置ETCD证书相关文件以及生成其证书
[root@master-101 k8s-init]#tee etcd-csr.json <<'EOF'
{
  "CN": "etcd",
  "hosts": [
    "127.0.0.1",
    "192.168.100.101",
    "192.168.100.102",
    "192.168.100.103",
    "etcd1",
    "etcd2",
    "etcd3"
  ],
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "L": "beijing",
      "ST": "beijing",
      "O": "etcd",
      "OU": "System"
    }
  ]
}
EOF

======================================================================================== 
#用ca证书签发生成etcd证书
[root@master-101 k8s-init]#cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=etcd etcd-csr.json | cfssljson -bare etcd

[root@master-101 k8s-init]#openssl x509 -in etcd.pem -text -noout | grep "X509v3" -A1      

3-2、部署etcd

#所有Master节点主机执行:
#下载etcd软件包
下载地址:https://github.com/etcd-io/etcd/releases/download/v3.5.4/etcd-v3.5.4-linux-amd64.tar.gz
#windows下载后上传

[root@master-101 ~]#tar xf etcd-v3.5.4-linux-amd64.tar.gz
[root@master-101 ~]#cd etcd-v3.5.4-linux-amd64/
[root@master-101 etcd-v3.5.4-linux-amd64]#cp -a etcd* /usr/local/bin/

[root@master-101 ~]#mkdir -pv /etc/etcd/pki
[root@master-101 ~]#cd /app/k8s-init/
[root@master-101 k8s-init]#cp *.pem /etc/etcd/pki/
[root@master-101 k8s-init]#scp /etc/etcd/pki/* 192.168.100.102:
[root@master-101 k8s-init]#scp /etc/etcd/pki/* 192.168.100.103:      
#192.168.100.101执行:
[root@master-101 ~]#tee /etc/etcd/etcd.conf <<'EOF'
# [成员配置]
# member 名称
ETCD_NAME=etcd1
# 存储数据的目录(注意需要建立)
ETCD_DATA_DIR="/var/lib/etcd/data"
# 用于监听客户端etcdctl或者curl连接
ETCD_LISTEN_CLIENT_URLS="https://192.168.100.101:2379,https://127.0.0.1:2379"
# 用于监听集群中其它member的连接
ETCD_LISTEN_PEER_URLS="https://192.168.100.101:2380"

# [证书配置]
# ETCD_CERT_FILE=/etc/etcd/pki/etcd.pem
# ETCD_KEY_FILE=/etc/etcd/pki/etcd-key.pem
# ETCD_TRUSTED_CA_FILE=/etc/kubernetes/pki/ca.pem
# ETCD_CLIENT_CERT_AUTH=true
# ETCD_PEER_CLIENT_CERT_AUTH=true
# ETCD_PEER_CERT_FILE=/etc/etcd/pki/etcd.pem
# ETCD_PEER_KEY_FILE=/etc/etcd/pki/etcd-key.pem
# ETCD_PEER_TRUSTED_CA_FILE=/etc/kubernetes/pki/ca.pem

# [集群配置]
# 本机地址用于通知客户端,客户端通过此IPs与集群通信;
ETCD_ADVERTISE_CLIENT_URLS="https://192.168.100.101:2379"
# 本机地址用于通知集群member与member通信
ETCD_INITIAL_ADVERTISE_PEER_URLS="https://192.168.100.101:2380"
# 描述集群中所有节点的信息,本member根据此信息去联系其他member
ETCD_INITIAL_CLUSTER="etcd1=https://192.168.100.101:2380,etcd2=https://192.168.100.102:2380,etcd3=https://192.168.100.103:2380"
# 集群状态新建集群时候设置为new,若是想加入某个已经存在的集群设置为existing
ETCD_INITIAL_CLUSTER_STATE=new
EOF      
#192.168.100.102执行:
[root@master-102 ~]#tee /etc/etcd/etcd.conf <<'EOF'
# [成员配置]
# member 名称
ETCD_NAME=etcd2
# 存储数据的目录(注意需要建立)
ETCD_DATA_DIR="/var/lib/etcd/data"
# 用于监听客户端etcdctl或者curl连接
ETCD_LISTEN_CLIENT_URLS="https://192.168.100.102:2379,https://127.0.0.1:2379"
# 用于监听集群中其它member的连接
ETCD_LISTEN_PEER_URLS="https://192.168.100.102:2380"

# [集群配置]
# 本机地址用于通知客户端,客户端通过此IPs与集群通信;
ETCD_ADVERTISE_CLIENT_URLS="https://192.168.100.102:2379"
# 本机地址用于通知集群member与member通信
ETCD_INITIAL_ADVERTISE_PEER_URLS="https://192.168.100.102:2380"
# 描述集群中所有节点的信息,本member根据此信息去联系其他member
ETCD_INITIAL_CLUSTER="etcd1=https://192.168.100.101:2380,etcd2=https://192.168.100.102:2380,etcd3=https://192.168.100.103:2380"
# 集群状态新建集群时候设置为new,若是想加入某个已经存在的集群设置为existing
ETCD_INITIAL_CLUSTER_STATE=new
EOF      
#192.168.100.103执行:
[root@master-103 ~]#tee /etc/etcd/etcd.conf <<'EOF'
# [成员配置]
# member 名称
ETCD_NAME=etcd3
# 存储数据的目录(注意需要建立)
ETCD_DATA_DIR="/var/lib/etcd/data"
# 用于监听客户端etcdctl或者curl连接
ETCD_LISTEN_CLIENT_URLS="https://192.168.100.103:2379,https://127.0.0.1:2379"
# 用于监听集群中其它member的连接
ETCD_LISTEN_PEER_URLS="https://192.168.100.103:2380"

# [集群配置]
# 本机地址用于通知客户端,客户端通过此IPs与集群通信;
ETCD_ADVERTISE_CLIENT_URLS="https://192.168.100.103:2379"
# 本机地址用于通知集群member与member通信
ETCD_INITIAL_ADVERTISE_PEER_URLS="https://192.168.100.103:2380"
# 描述集群中所有节点的信息,本member根据此信息去联系其他member
ETCD_INITIAL_CLUSTER="etcd1=https://192.168.100.101:2380,etcd2=https://192.168.100.102:2380,etcd3=https://192.168.100.103:2380"
# 集群状态新建集群时候设置为new,若是想加入某个已经存在的集群设置为existing
ETCD_INITIAL_CLUSTER_STATE=new
EOF      
#所有master节点执行:
#创建etcd的service文件,便于systemd管理:

[root@master-101 ~]#mkdir -pv /var/lib/etcd
[root@master-101 ~]#cat > /usr/lib/systemd/system/etcd.service <<EOF
[Unit]
Description=Etcd Server
Documentation=https://github.com/etcd-io/etcd
After=network.target
After=network-online.target
wants=network-online.target

[Service]
Type=notify
WorkingDirectory=/var/lib/etcd/
EnvironmentFile=-/etc/etcd/etcd.conf
ExecStart=/usr/local/bin/etcd \
  --client-cert-auth \
  --trusted-ca-file /etc/etcd/pki/ca.pem \
  --cert-file /etc/etcd/pki/etcd.pem \
  --key-file /etc/etcd/pki/etcd-key.pem \
  --peer-client-cert-auth \
  --peer-trusted-ca-file /etc/etcd/pki/ca.pem \
  --peer-cert-file /etc/etcd/pki/etcd.pem \
  --peer-key-file /etc/etcd/pki/etcd-key.pem
Restart=on-failure
RestartSec=5
LimitNOFILE=65535
LimitNPROC=65535

[Install]
WantedBy=multi-user.target
EOF

[root@master-101 ~]#systemctl daemon-reload && systemctl enable --now etcd.service
[root@master-101 ~]#systemctl status etcd.service      
#查看etcd是否正常以及健康状态:

# 利用 etcdctl 工具查看集群成员信息
export ETCDCTL_API=3
[root@master-101 ~]#etcdctl --endpoints=https://192.168.100.101:2379,https://192.168.100.102:2379,https://192.168.100.103:2379 --cacert="/etc/etcd/pki/ca.pem" --cert="/etc/etcd/pki/etcd.pem" --key="/etc/etcd/pki/etcd-key.pem" --write-out=table member list

# 集群节点信息
[root@master-101 ~]#etcdctl --endpoints=https://192.168.100.101:2379,https://192.168.100.102:2379,https://192.168.100.103:2379 --cacert="/etc/etcd/pki/ca.pem" --cert="/etc/etcd/pki/etcd.pem" --key="/etc/etcd/pki/etcd-key.pem" --write-out=table endpoint status

# 集群节点健康状态
[root@master-101 ~]#etcdctl --endpoints=https://192.168.100.101:2379,https://192.168.100.102:2379,https://192.168.100.103:2379 --cacert="/etc/etcd/pki/ca.pem" --cert="/etc/etcd/pki/etcd.pem" --key="/etc/etcd/pki/etcd-key.pem"  --write-out=table endpoint health

# 集群节点性能测试
[root@master-101 ~]#etcdctl --endpoints=https://192.168.100.101:2379,https://192.168.100.102:2379,https://192.168.100.103:2379 --cacert="/etc/etcd/pki/ca.pem" --cert="/etc/etcd/pki/etcd.pem" --key="/etc/etcd/pki/etcd-key.pem" --write-out=tableendpoint check perf      
4、部署containerd
#所有节点执行:
#下载地址:https://github.com/containerd/containerd/releases/download/v1.6.4/cri-containerd-cni-1.6.4-linux-amd64.tar.gz
#windows下载后上传
[root@master-101 local]#mkdir cri-containerd-cni
[root@master-101 local]#tar xf cri-containerd-cni-1.6.4-linux-amd64.tar.gz -C cri-containerd-cni
[root@master-101 local]#tree cri-containerd-cni
/usr/local/cri-containerd-cni/
├── etc
│   ├── cni
│   │   └── net.d
│   │       └── 10-containerd-net.conflist
│   ├── crictl.yaml
│   └── systemd
│       └── system
│           └── containerd.service
├── opt
│   ├── cni
│   │   └── bin
│   │       ├── bandwidth
│   │       ├── bridge
│   │       ├── dhcp
│   │       ├── firewall
│   │       ├── host-device
│   │       ├── host-local
│   │       ├── ipvlan
│   │       ├── loopback
│   │       ├── macvlan
│   │       ├── portmap
│   │       ├── ptp
│   │       ├── sbr
│   │       ├── static
│   │       ├── tuning
│   │       ├── vlan
│   │       └── vrf
│   └── containerd
│       └── cluster
│           ├── gce
│           │   ├── cloud-init
│           │   │   ├── master.yaml
│           │   │   └── node.yaml
│           │   ├── cni.template
│           │   ├── configure.sh
│           │   └── env
│           └── version
└── usr
    └── local
        ├── bin
        │   ├── containerd
        │   ├── containerd-shim
        │   ├── containerd-shim-runc-v1
        │   ├── containerd-shim-runc-v2
        │   ├── containerd-stress
        │   ├── crictl
        │   ├── critest
        │   ├── ctd-decoder
        │   └── ctr
        └── sbin
            └── runc


#在所有节点上复制到上述文件夹到对应目录中
[root@master-101 local]#cd cri-containerd-cni/
[root@master-101 cri-containerd-cni]#cp -r etc/ /
[root@master-101 cri-containerd-cni]#cp -r opt/ /
[root@master-101 cri-containerd-cni]#cp -r usr/ /      
#所有节点执行:
#配置创建并修改 config.toml
[root@master-101 ~]#mkdir -pv /etc/containerd
[root@master-101 ~]#containerd config default >/etc/containerd/config.toml
[root@master-101 ~]#ls /etc/containerd/config.toml

# pause 镜像源
[root@master-101 ~]#sed -i "s#k8s.gcr.io/pause#registry.cn-hangzhou.aliyuncs.com/google_containers/pause#g"  /etc/containerd/config.toml

# 使用 SystemdCgroup
[root@master-101 ~]#sed -i 's#SystemdCgroup = false#SystemdCgroup = true#g' /etc/containerd/config.toml

# docker.io mirror
[root@master-101 ~]#sed -i '/registry.mirrors]/a\ \ \ \ \ \ \ \ [plugins."io.containerd.grpc.v1.cri".registry.mirrors."docker.io"]' /etc/containerd/config.toml
[root@master-101 ~]#sed -i '/registry.mirrors."docker.io"]/a\ \ \ \ \ \ \ \ \ \ endpoint = ["https://xlx9erfu.mirror.aliyuncs.com","https://docker.mirrors.ustc.edu.cn"]' /etc/containerd/config.toml

# gcr.io mirror
[root@master-101 ~]#sed -i '/registry.mirrors]/a\ \ \ \ \ \ \ \ [plugins."io.containerd.grpc.v1.cri".registry.mirrors."gcr.io"]' /etc/containerd/config.toml
[root@master-101 ~]#sed -i '/registry.mirrors."gcr.io"]/a\ \ \ \ \ \ \ \ \ \ endpoint = ["https://gcr.mirrors.ustc.edu.cn"]' /etc/containerd/config.toml

# k8s.gcr.io mirror
[root@master-101 ~]#sed -i '/registry.mirrors]/a\ \ \ \ \ \ \ \ [plugins."io.containerd.grpc.v1.cri".registry.mirrors."k8s.gcr.io"]' /etc/containerd/config.toml
[root@master-101 ~]#sed -i '/registry.mirrors."k8s.gcr.io"]/a\ \ \ \ \ \ \ \ \ \ endpoint = ["https://gcr.mirrors.ustc.edu.cn/google-containers/","https://registry.cn-hangzhou.aliyuncs.com/google_containers/"]' /etc/containerd/config.toml

# quay.io mirror
[root@master-101 ~]#sed -i '/registry.mirrors]/a\ \ \ \ \ \ \ \ [plugins."io.containerd.grpc.v1.cri".registry.mirrors."quay.io"]' /etc/containerd/config.toml
[root@master-101 ~]#sed -i '/registry.mirrors."quay.io"]/a\ \ \ \ \ \ \ \ \ \ endpoint = ["https://quay.mirrors.ustc.edu.cn"]' /etc/containerd/config.toml      
#所有节点执行:
# 配置文件设置永久生效
[root@master-101 ~]#cat <<EOF > /etc/crictl.yaml
runtime-endpoint: unix:///run/containerd/containerd.sock
image-endpoint: unix:///run/containerd/containerd.sock
timeout: 10
debug: false
EOF

[root@master-101 ~]#systemctl daemon-reload && systemctl enable --now containerd.service
[root@master-101 ~]#systemctl status containerd.service
[root@master-101 ~]#ctr version
[root@master-101 ~]#runc -v      
#温馨提示: 当默认 runc 执行提示 runc: symbol lookup error: runc: undefined symbol: seccomp_notify_respond 时,由于上述软件包中包含的runc对系统依赖过多,所以建议单独下载安装 runc 二进制项目(https://github.com/opencontainers/runc/),如下:
wget https://github.com/opencontainers/runc/releases/download/v1.1.1/runc.amd64
# 执行权限赋予
chmod +x runc.amd64
# 替换掉 /usr/local/sbin/ 路径原软件包中的 runc
mv runc.amd64 /usr/local/sbin/runc      

继续阅读