天天看點

kubernetes 1.7.2 + Calico部署

系統環境

系統

[root@kubernetes-master- ~]# cat /etc/redhat-release 
CentOS Linux release . (Core) 
           

hosts

[[email protected] ~]# cat /etc/hosts
   localhost localhost.localdomain localhost4 localhost4.localdomain4
::         localhost localhost.localdomain localhost6 localhost6.localdomain6
 kubernetes-master-
 kubernetes-master-
 k8s-node-
           

建立 驗證

安裝 cfssl

mkdir -p /opt/local/cfssl

cd /opt/local/cfssl

wget https://pkg.cfssl.org/R1.2/cfssl_linux-amd64
mv cfssl_linux-amd64 cfssl

wget https://pkg.cfssl.org/R1.2/cfssljson_linux-amd64
mv cfssljson_linux-amd64 cfssljson

wget https://pkg.cfssl.org/R1.2/cfssl-certinfo_linux-amd64
mv cfssl-certinfo_linux-amd64 cfssl-certinfo

chmod +x *

ls -l
總用量 
-rwxr-xr-x  root root  月    cfssl
-rwxr-xr-x  root root   月    cfssl-certinfo
-rwxr-xr-x  root root   月    cfssljson
           

建立 CA 證書配置

mkdir /opt/ssl

cd /opt/ssl

/opt/local/cfssl/cfssl print-defaults config > config.json

/opt/local/cfssl/cfssl print-defaults csr > csr.json

[root@k8s-node- ssl]# cat config.json csr.json 
{
  "signing": {
    "default": {
      "expiry": "87600h"
    },
    "profiles": {
      "kubernetes": {
        "usages": [
            "signing",
            "key encipherment",
            "server auth",
            "client auth"
        ],
        "expiry": "87600h"
      }
    }
  }
}
{
  "CN": "kubernetes",
  "key": {
    "algo": "rsa",
    "size": 
  },
  "names": [
    {
      "C": "CN",
      "ST": "ShenZhen",
      "L": "ShenZhen",
      "O": "k8s",
      "OU": "System"
    }
  ]
}
           

生成 CA 證書和私鑰

cd /opt/ssl/

/opt/local/cfssl/cfssl gencert -initca csr.json | /opt/local/cfssl/cfssljson -bare ca

[[email protected]-node-1 ssl]# ls -l
總用量 20
-rw-r--r-- 1 root root 1005 4月   7 15:29 ca.csr
-rw------- 1 root root 1675 4月   7 15:29 ca-key.pem
-rw-r--r-- 1 root root 1363 4月   7 15:29 ca.pem
-rw-r--r-- 1 root root  292 4月   7 15:27 config.json
-rw-r--r-- 1 root root  210 4月   7 15:27 csr.json
           

分發證書

# 建立證書目錄
mkdir -p /etc/kubernetes/ssl

# 拷貝所有檔案到目錄下
cp * /etc/kubernetes/ssl

# 這裡要将檔案拷貝到所有的k8s 機器上,也要建立相應的目錄

scp * [email protected]:/etc/kubernetes/ssl/

scp * [email protected]:/etc/kubernetes/ssl/
           

etcd 叢集

yum install etcd3 -y
           

建立 etcd 證書

cd /opt/ssl/

[root@kubernetes-master- ssl]# vi etcd-csr.json

{
  "CN": "etcd",
  "hosts": [
    "127.0.0.1",
    "192.168.1.109",
    "192.168.1.110",
    "192.168.1.111"
  ],
  "key": {
    "algo": "rsa",
    "size": 
  },
  "names": [
    {
      "C": "CN",
      "ST": "ShenZhen",
      "L": "ShenZhen",
      "O": "k8s",
      "OU": "System"
    }
  ]
}

           
# 生成 etcd   密鑰

/opt/local/cfssl/cfssl gencert -ca=/opt/ssl/ca.pem \
  -ca-key=/opt/ssl/ca-key.pem \
  -config=/opt/ssl/config.json \
  -profile=kubernetes etcd-csr.json | /opt/local/cfssl/cfssljson -bare etcd
           
# 檢視生成

[root@k8s-node- ssl]# ls -l etcd*
-rw-r--r--  root root  月    : etcd.csr
-rw-r--r--  root root   月    : etcd-csr.json
-rw-------  root root  月    : etcd-key.pem
-rw-r--r--  root root  月    : etcd.pem



# 拷貝到etcd伺服器

# etcd-1 
cp etcd*.pem /etc/kubernetes/ssl/

# etcd-2
scp etcd* root@192..:/etc/kubernetes/ssl/

# etcd-3
scp etcd* root@192..:/etc/kubernetes/ssl/



# 如果 etcd 非 root 使用者,讀驗證書會提示沒權限

chmod  /etc/kubernetes/ssl/etcd-key.pem
           

修改 etcd 配置

# etcd-1


vi /usr/lib/systemd/system/etcd.service

[Unit]
Description=Etcd Server
After=network.target
After=network-online.target
Wants=network-online.target

[Service]
Type=notify
WorkingDirectory=/var/lib/etcd/
User=etcd
# set GOMAXPROCS to number of processors
ExecStart=/usr/bin/etcd \
  --name=etcd1 \
  --cert-file=/etc/kubernetes/ssl/etcd.pem \
  --key-file=/etc/kubernetes/ssl/etcd-key.pem \
  --peer-cert-file=/etc/kubernetes/ssl/etcd.pem \
  --peer-key-file=/etc/kubernetes/ssl/etcd-key.pem \
  --trusted-ca-file=/etc/kubernetes/ssl/ca.pem \
  --peer-trusted-ca-file=/etc/kubernetes/ssl/ca.pem \
  --initial-advertise-peer-urls=https://192.168.1.109:2380 \
  --listen-peer-urls=https://192.168.1.109:2380 \
  --listen-client-urls=https://192.168.1.109:2379,http://127.0.0.1:2379 \
  --advertise-client-urls=https://192.168.1.109:2379 \
  --initial-cluster-token=k8s-etcd-cluster \
  --initial-cluster=etcd1=https://192.168.1.109:2380,etcd2=https://192.168.1.110:2380,etcd3=https://192.168.1.111:2380 \
  --initial-cluster-state=new \
  --data-dir=/var/lib/etcd
Restart=on-failure
RestartSec=5
LimitNOFILE=65536

[Install]
WantedBy=multi-user.target
           
# etcd-2


vi /usr/lib/systemd/system/etcd.service


[Unit]
Description=Etcd Server
After=network.target
After=network-online.target
Wants=network-online.target

[Service]
Type=notify
WorkingDirectory=/var/lib/etcd/
User=etcd
# set GOMAXPROCS to number of processors
ExecStart=/usr/bin/etcd \
  --name=etcd2 \
  --cert-file=/etc/kubernetes/ssl/etcd.pem \
  --key-file=/etc/kubernetes/ssl/etcd-key.pem \
  --peer-cert-file=/etc/kubernetes/ssl/etcd.pem \
  --peer-key-file=/etc/kubernetes/ssl/etcd-key.pem \
  --trusted-ca-file=/etc/kubernetes/ssl/ca.pem \
  --peer-trusted-ca-file=/etc/kubernetes/ssl/ca.pem \
  --initial-advertise-peer-urls=https://192.168.1.110:2380 \
  --listen-peer-urls=https://192.168.1.110:2380 \
  --listen-client-urls=https://192.168.1.110:2379,http://127.0.0.1:2379 \
  --advertise-client-urls=https://192.168.1.110:2379 \
  --initial-cluster-token=k8s-etcd-cluster \
  --initial-cluster=etcd1=https://192.168.1.109:2380,etcd2=https://192.168.1.110:2380,etcd3=https://192.168.1.111:2380 \
  --initial-cluster-state=new \
  --data-dir=/var/lib/etcd
Restart=on-failure
RestartSec=5
LimitNOFILE=65536

[Install]
WantedBy=multi-user.target
           
# etcd-3

vi /usr/lib/systemd/system/etcd.service

[Unit]
Description=Etcd Server
After=network.target
After=network-online.target
Wants=network-online.target

[Service]
Type=notify
WorkingDirectory=/var/lib/etcd/
User=etcd
# set GOMAXPROCS to number of processors
ExecStart=/usr/bin/etcd \
  --name=etcd3 \
  --cert-file=/etc/kubernetes/ssl/etcd.pem \
  --key-file=/etc/kubernetes/ssl/etcd-key.pem \
  --peer-cert-file=/etc/kubernetes/ssl/etcd.pem \
  --peer-key-file=/etc/kubernetes/ssl/etcd-key.pem \
  --trusted-ca-file=/etc/kubernetes/ssl/ca.pem \
  --peer-trusted-ca-file=/etc/kubernetes/ssl/ca.pem \
  --initial-advertise-peer-urls=https://192.168.1.111:2380 \
  --listen-peer-urls=https://192.168.1.111:2380 \
  --listen-client-urls=https://192.168.1.111:2379,http://127.0.0.1:2379 \
  --advertise-client-urls=https://192.168.1.111:2379 \
  --initial-cluster-token=k8s-etcd-cluster \
  --initial-cluster=etcd1=https://192.168.1.109:2380,etcd2=https://192.168.1.110:2380,etcd3=https://192.168.1.111:2380 \
  --initial-cluster-state=new \
  --data-dir=/var/lib/etcd
Restart=on-failure
RestartSec=5
LimitNOFILE=65536

[Install]
WantedBy=multi-user.target
           

啟動 etcd

在每個節點執行

systemctl enable etcd

systemctl start etcd

systemctl status etcd

           
# 如果報錯 請使用
journalctl -f -t etcd  和 journalctl -u etcd 來定位問題
           

驗證 etcd 叢集狀态

檢視 etcd 叢集狀态:

etcdctl --endpoints=https://192.168.1.110:2379 \
        --cert-file=/etc/kubernetes/ssl/etcd.pem \
        --ca-file=/etc/kubernetes/ssl/ca.pem \
        --key-file=/etc/kubernetes/ssl/etcd-key.pem \
        cluster-health

member fb6a35f1ce3d83 is healthy: got healthy result from https://192.168.1.109:2379
member c07b7eb732b is healthy: got healthy result from https://192.168.1.111:2379
member fdb0d4304dcee33c is healthy: got healthy result from https://192.168.1.110:2379
cluster is healthy
           

檢視 etcd 叢集成員

etcdctl --endpoints=https://192.168.1.110:2379 \
        --cert-file=/etc/kubernetes/ssl/etcd.pem \
        --ca-file=/etc/kubernetes/ssl/ca.pem \
        --key-file=/etc/kubernetes/ssl/etcd-key.pem \
        member list

fb6a35f1ce3d83: name=etcd1 peerURLs=https://: clientURLs=https://: isLeader=true
c07b7eb732b: name=etcd3 peerURLs=https://: clientURLs=https://: isLeader=false
fdb0d4304dcee33c: name=etcd2 peerURLs=https://: clientURLs=https://: isLeader=false
           

安裝 docker

# 導入 yum 源

# 安裝 yum-config-manager

yum -y install yum-utils

# 導入
yum-config-manager \
    --add-repo \
    https://download.docker.com/linux/centos/docker-ce.repo


# 更新 repo
yum makecache

# 安裝

yum install docker-ce
           

添加dockerhub加速器

[root@kubernetes-master- ~]# cat /etc/docker/daemon.json 
{"registry-mirrors": ["http://579fe187.m.daocloud.io","https://pee6w651.mirror.aliyuncs.com"]}
           

啟動docker

systemctl daemon-reload
systemctl start docker
systemctl enable docker
           

安裝 kubectl 工具

Master 端

# 首先安裝 kubectl

wget https://dl.k8s.io/v1./kubernetes-client-linux-amd64.tar.gz

tar -xzvf kubernetes-client-linux-amd64.tar.gz

cp kubernetes/client/bin/* /usr/local/bin/

chmod a+x /usr/local/bin/kube*


# 驗證安裝

kubectl version
[root@kubernetes-master- ~]# kubectl version
Client Version: version.Info{Major:"1", Minor:"7", GitVersion:"v1.7.2", GitCommit:"922a86cfcd65915a9b2f69f3f193b8907d741d9c", GitTreeState:"clean", BuildDate:"2017-07-21T08:23:22Z", GoVersion:"go1.8.3", Compiler:"gc", Platform:"linux/amd64"}
           

建立 admin 證書

cd /opt/ssl/

vi admin-csr.json
{
  "CN": "admin",
  "hosts": [],
  "key": {
    "algo": "rsa",
    "size": 
  },
  "names": [
    {
      "C": "CN",
      "ST": "ShenZhen",
      "L": "ShenZhen",
      "O": "system:masters",
      "OU": "System"
    }
  ]
}
           
# 生成 admin 證書和私鑰
cd /opt/ssl/

/opt/local/cfssl/cfssl gencert -ca=/etc/kubernetes/ssl/ca.pem \
  -ca-key=/etc/kubernetes/ssl/ca-key.pem \
  -config=/etc/kubernetes/ssl/config.json \
  -profile=kubernetes admin-csr.json | /opt/local/cfssl/cfssljson -bare admin


# 檢視生成

[root@k8s-master- ssl]# ls admin*
admin.csr  admin-csr.json  admin-key.pem  admin.pem

cp admin*.pem /etc/kubernetes/ssl/

scp admin*.pem root@192..:/etc/kubernetes/ssl/
scp admin*.pem root@192..:/etc/kubernetes/ssl/
           

配置 kubectl kubeconfig 檔案

# 配置 kubernetes 叢集

kubectl config set-cluster kubernetes \
  --certificate-authority=/etc/kubernetes/ssl/ca.pem \
  --embed-certs=true \
  --server=https://192.168.1.109:6443


# 配置 用戶端認證

kubectl config set-credentials admin \
  --client-certificate=/etc/kubernetes/ssl/admin.pem \
  --embed-certs=true \
  --client-key=/etc/kubernetes/ssl/admin-key.pem



kubectl config set-context kubernetes \
  --cluster=kubernetes \
  --user=admin


kubectl config use-context kubernetes
           

分發 kubectl config 檔案

# 将上面配置的 kubeconfig 檔案分發到其他機器

# 其他伺服器建立目錄

mkdir /root/.kube //在其他節點也要建立該目錄

scp /root/.kube/config root@192:/root/.kube/

scp /root/.kube/config root@192:/root/.kube/
           

部署 kubernetes Master 節點

Master 需要部署 kube-apiserver , kube-scheduler , kube-controller-manager 這三個元件

安裝元件

# 從github 上下載下傳版本

cd /tmp

wget https://dl.k8s.io/v1/kubernetes-server-linux-amd64.tar.gz

tar -xzvf kubernetes-server-linux-amd64.tar.gz

cd kubernetes

cp -r server/bin/{kube-apiserver,kube-controller-manager,kube-scheduler,kubectl,kube-proxy,kubelet} /usr/local/bin/
           

建立 kubernetes 證書

cd /opt/ssl

vi kubernetes-csr.json

{
  "CN": "kubernetes",
  "hosts": [
    "127.0.0.1",
    "192.168.1.109",
    "192.168.1.110",
    "192.254.0.1",
    "kubernetes",
    "kubernetes.default",
    "kubernetes.default.svc",
    "kubernetes.default.svc.cluster",
    "kubernetes.default.svc.cluster.local"
  ],
  "key": {
    "algo": "rsa",
    "size": 
  },
  "names": [
    {
      "C": "CN",
      "ST": "ShenZhen",
      "L": "ShenZhen",
      "O": "k8s",
      "OU": "System"
    }
  ]
}


## 這裡 hosts 字段中 三個 IP 分别為 127.0.0.1 本機, 192.168.1.109, 192.168.1.111 為 Master 的IP, 192.254.0.1 為 kubernetes SVC 的 IP, 一般是 部署網絡的第一個IP , 如: 192.254.0.1 , 在啟動完成後,我們使用   kubectl get svc , 就可以檢視到
           

生成 kubernetes 證書和私鑰

/opt/local/cfssl/cfssl gencert -ca=/etc/kubernetes/ssl/ca.pem \
  -ca-key=/etc/kubernetes/ssl/ca-key.pem \
  -config=/etc/kubernetes/ssl/config.json \
  -profile=kubernetes kubernetes-csr.json | /opt/local/cfssl/cfssljson -bare kubernetes

# 檢視生成

[root@k8s-node- ssl]# ls -lt kubernetes*
-rw-r--r--  root root  月    : kubernetes.csr
-rw-------  root root  月    : kubernetes-key.pem
-rw-r--r--  root root  月    : kubernetes.pem
-rw-r--r--  root root   月    : kubernetes-csr.json


# 拷貝到目錄
cp -r kubernetes* /etc/kubernetes/ssl/
scp kubernetes* root@192..:/etc/kubernetes/ssl/
scp kubernetes* root@192..:/etc/kubernetes/ssl/
           

配置 kube-apiserver

kubelet 首次啟動時向 kube-apiserver 發送 TLS Bootstrapping 請求,kube-apiserver 驗證 kubelet 請求中的 token 是否與它配置的 token 一緻,如果一緻則自動為 kubelet生成證書和秘鑰。

# 生成 token

[root@k8s-node- ssl]# head -c 16 /dev/urandom | od -An -t x | tr -d ' '
cdc7201eb10d76842ff5b6e35bfbb516


# 建立 token.csv 檔案

cd /opt/ssl

vi token.csv

cdc7201eb10d76842ff5b6e35bfbb516,kubelet-bootstrap,,"system:kubelet-bootstrap"


# 拷貝

cp token.csv /etc/kubernetes/
scp token.csv root@192..:/etc/kubernetes/
scp token.csv root@192..:/etc/kubernetes/
           

建立 kube-apiserver.service 檔案

# 自定義 系統 service 檔案一般存于 /etc/systemd/system/ 下

vi /etc/systemd/system/kube-apiserver.service

[Unit]
Description=kubernetes API Server
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
After=network.target

[Service]
User=root
ExecStart=/usr/local/bin/kube-apiserver \
  --admission-control=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,ResourceQuota \
  --advertise-address=. \
  --allow-privileged=true \
  --apiserver-count= \
  --audit-log-maxage= \
  --audit-log-maxbackup= \
  --audit-log-maxsize= \
  --audit-log-path=/var/lib/audit.log \
  --authorization-mode=RBAC \
  --bind-address=. \
  --client-ca-file=/etc/kubernetes/ssl/ca.pem \
  --enable-swagger-ui=true \
  --etcd-cafile=/etc/kubernetes/ssl/ca.pem \
  --etcd-certfile=/etc/kubernetes/ssl/etcd.pem \
  --etcd-keyfile=/etc/kubernetes/ssl/etcd-key.pem \
  --etcd-servers=https://192.168.1.109:2379,https://192.168.1.110:2379,https://192.168.1.111:2379 \
  --event-ttl=h \
  --kubelet-https=true \
  --insecure-bind-address=. \
  --runtime-config=rbac.authorization.k8s.io/v1alpha1 \
  --service-account-key-file=/etc/kubernetes/ssl/ca-key.pem \
  --service-cluster-ip-range=../ \
  --service-node-port-range=- \
  --tls-cert-file=/etc/kubernetes/ssl/kubernetes.pem \
  --tls-private-key-file=/etc/kubernetes/ssl/kubernetes-key.pem \
  --experimental-bootstrap-token-auth \
  --token-auth-file=/etc/kubernetes/token.csv \
  --v=
Restart=on-failure
RestartSec=5
Type=notify
LimitNOFILE=65536

[Install]
WantedBy=multi-user.target
           
# 這裡面要注意的是 --service-node-port-range=30000-32000
# 這個地方是 映射外部端口時 的端口範圍,随機映射也在這個範圍内映射,指定映射端口必須也在這個範圍内。
           

啟動 kube-apiserver

systemctl daemon-reload
systemctl enable kube-apiserver
systemctl start kube-apiserver
systemctl status kube-apiserver
           

配置 kube-controller-manager

# 建立 kube-controller-manager.service 檔案

vi /etc/systemd/system/kube-controller-manager.service


[Unit]
Description=kubernetes Controller Manager
Documentation=https://github.com/GoogleCloudPlatform/kubernetes

[Service]
ExecStart=/usr/local/bin/kube-controller-manager \
  --address=.. \
  --master=http://192.168.1.109:8080 \
  --allocate-node-cidrs=true \
  --service-cluster-ip-range=../ \
  --cluster-cidr=../ \
  --cluster-name=kubernetes \
  --cluster-signing-cert-file=/etc/kubernetes/ssl/ca.pem \
  --cluster-signing-key-file=/etc/kubernetes/ssl/ca-key.pem \
  --service-account-private-key-file=/etc/kubernetes/ssl/ca-key.pem \
  --root-ca-file=/etc/kubernetes/ssl/ca.pem \
  --leader-elect=true \
  --v=
Restart=on-failure
RestartSec=5

[Install]
WantedBy=multi-user.target
           

啟動 kube-controller-manager

systemctl daemon-reload
systemctl enable kube-controller-manager
systemctl start kube-controller-manager
systemctl status kube-controller-manager
           

配置 kube-scheduler

# 建立 kube-cheduler.service 檔案

vi /etc/systemd/system/kube-scheduler.service


[Unit]
Description=kubernetes Scheduler
Documentation=https://github.com/GoogleCloudPlatform/kubernetes

[Service]
ExecStart=/usr/local/bin/kube-scheduler \
  --address=.. \
  --master=http://192.168.1.109:8080 \
  --leader-elect=true \
  --v=
Restart=on-failure
RestartSec=5

[Install]
WantedBy=multi-user.target
           

啟動 kube-scheduler

systemctl daemon-reload
systemctl enable kube-scheduler
systemctl start kube-scheduler
systemctl status kube-scheduler
           

驗證 Master 節點

[root@kubernetes-master- kubernetes]# kubectl get componentstatuses
NAME                 STATUS    MESSAGE              ERROR
scheduler            Healthy   ok                   
controller-manager   Healthy   ok                   
etcd-               Healthy   {"health": "true"}   
etcd-               Healthy   {"health": "true"}   
etcd-               Healthy   {"health": "true"}   
           

部署 kubernetes Node 節點

Node 節點 需要部署的元件有 docker calico kubectl kubelet kube-proxy 這幾個元件。

配置 kubelet

kubelet 啟動時向 kube-apiserver 發送 TLS bootstrapping 請求,需要先将 bootstrap token 檔案中的 kubelet-bootstrap 使用者賦予 system:node-bootstrapper 角色,然後 kubelet 才有權限建立認證請求(certificatesigningrequests)。

# 先建立認證請求
# user 為 master 中 token.csv 檔案裡配置的使用者
# 隻需在一個node中建立一次就可以

kubectl create clusterrolebinding kubelet-bootstrap --clusterrole=system:node-bootstrapper --user=kubelet-bootstrap
           

建立 kubelet kubeconfig 檔案

# 配置叢集

kubectl config set-cluster kubernetes \
  --certificate-authority=/etc/kubernetes/ssl/ca.pem \
  --embed-certs=true \
  --server=https://192.168.1.109:6443 \
  --kubeconfig=bootstrap.kubeconfig

# 配置用戶端認證

kubectl config set-credentials kubelet-bootstrap \
  --token=cdc7201eb10d76842ff5b6e35bfbb516 \
  --kubeconfig=bootstrap.kubeconfig


# 配置關聯

kubectl config set-context default \
  --cluster=kubernetes \
  --user=kubelet-bootstrap \
  --kubeconfig=bootstrap.kubeconfig


# 配置預設關聯
kubectl config use-context default --kubeconfig=bootstrap.kubeconfig

# 拷貝生成的 bootstrap.kubeconfig 檔案

mv bootstrap.kubeconfig /etc/kubernetes/

scp /etc/kubernetes/bootstrap.kubeconfig [email protected]:/etc/kubernetes/

scp /etc/kubernetes/bootstrap.kubeconfig [email protected]:/etc/kubernetes/
           

建立 kubelet.service 檔案

# 建立 kubelet 目錄

mkdir /var/lib/kubelet

vi /etc/systemd/system/kubelet.service


[Unit]
Description=kubernetes Kubelet
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
After=docker.service
Requires=docker.service

[Service]
WorkingDirectory=/var/lib/kubelet
ExecStart=/usr/local/bin/kubelet \
  --address=. \
  --hostname-override=. \
  --pod-infra-container-image=gcr.io/google_containers/pause-amd64: \
  --experimental-bootstrap-kubeconfig=/etc/kubernetes/bootstrap.kubeconfig \
  --kubeconfig=/etc/kubernetes/kubelet.kubeconfig \
  --require-kubeconfig \
  --cert-dir=/etc/kubernetes/ssl \
  --cluster_dns=.. \
  --cluster_domain=cluster.local. \
  --hairpin-mode promiscuous-bridge \
  --allow-privileged=true \
  --serialize-image-pulls=false \
  --logtostderr=true \
  --cgroup-driver=systemd \
  --v=
ExecStopPost=/sbin/iptables -A INPUT -s 10.0.0.0/8 -p tcp --dport 4194 -j ACCEPT
ExecStopPost=/sbin/iptables -A INPUT -s 172.16.0.0/12 -p tcp --dport 4194 -j ACCEPT
ExecStopPost=/sbin/iptables -A INPUT -s 192.168.0.0/16 -p tcp --dport 4194 -j ACCEPT
ExecStopPost=/sbin/iptables -A INPUT -p tcp --dport 4194 -j DROP
Restart=on-failure
RestartSec=5

[Install]
WantedBy=multi-user.target
           
# 如上配置:
      為本機的IP
       預配置設定的 dns 位址
cluster.local.   為 kubernetes 叢集的 domain
gcr.io/google_containers/pause-amd64: 鏡像,
           

啟動 kubelet

systemctl daemon-reload
systemctl enable kubelet
systemctl start kubelet
systemctl status kubelet

           

啟動失敗報錯

參考 kubernetes.io

在kubelet啟動檔案中添加參數

--cgroup-driver=systemd \

問題解決

配置 TLS 認證

注意 每添加一個節點 都要進行認證

# 檢視 csr 的名稱

[root@kubernetes-master- kubernetes]# kubectl get csr
NAME                                                   AGE       REQUESTOR           CONDITION
node-csr-CB1uNi3e6099Jk-uYgJJHP1DdLIcOf9jxrJ2eH-PDZg   m        kubelet-bootstrap   Pending


# 增加 認證

[root@kubernetes-master- kubernetes]# kubectl certificate approve node-csr-CB1uNi3e6099Jk-uYgJJHP1DdLIcOf9jxrJ2eH-PDZg
certificatesigningrequest "node-csr-CB1uNi3e6099Jk-uYgJJHP1DdLIcOf9jxrJ2eH-PDZg" approved
           

驗證 nodes

[[email protected] ssl]# kubectl get nodes 
NAME            STATUS    AGE       VERSION
   Ready     s       v1

# 成功以後會自動生成配置檔案與密鑰

# 配置檔案

ls /etc/kubernetes/kubelet.kubeconfig   
/etc/kubernetes/kubelet.kubeconfig


# 密鑰檔案

ls /etc/kubernetes/ssl/kubelet*
/etc/kubernetes/ssl/kubelet-client.crt  /etc/kubernetes/ssl/kubelet.crt
/etc/kubernetes/ssl/kubelet-client.key  /etc/kubernetes/ssl/kubelet.key
           
[root@kubernetes-master- kubernetes]# kubectl get csr
NAME                                                   AGE       REQUESTOR           CONDITION
node-csr-sjslkPdNqpYZL8jRYp10seyVW4F91au7ftGQHG8YwM   m        kubelet-bootstrap   Pending
node-csr-CB1uNi3e6099Jk-uYgJJHP1DdLIcOf9jxrJ2eH-PDZg   m       kubelet-bootstrap   Approved,Issued
[root@kubernetes-master- kubernetes]# kubectl certificate approve node-csr-5sjslkPdNqpYZL8jRYp10seyVW4F91au7ftGQHG8YwM
certificatesigningrequest "node-csr-5sjslkPdNqpYZL8jRYp10seyVW4F91au7ftGQHG8YwM" approved
[root@kubernetes-master- kubernetes]# kubectl get nodes 
NAME            STATUS    AGE       VERSION
.   Ready     s        v1.
.   Ready     m       v1.
           

配置 kube-proxy

建立 kube-proxy 證書

# 證書方面由于我們node端沒有裝 cfssl
# 我們回到 master 端 機器 去配置證書,然後拷貝過來

[root@k8s-master- ~]# cd /opt/ssl


vi kube-proxy-csr.json

{
  "CN": "system:kube-proxy",
  "hosts": [],
  "key": {
    "algo": "rsa",
    "size": 
  },
  "names": [
    {
      "C": "CN",
      "ST": "ShenZhen",
      "L": "ShenZhen",
      "O": "k8s",
      "OU": "System"
    }
  ]
}
           

生成 kube-proxy 證書和私鑰

/opt/local/cfssl/cfssl gencert -ca=/etc/kubernetes/ssl/ca.pem \
  -ca-key=/etc/kubernetes/ssl/ca-key.pem \
  -config=/etc/kubernetes/ssl/config.json \
  -profile=kubernetes  kube-proxy-csr.json | /opt/local/cfssl/cfssljson -bare kube-proxy

# 檢視生成
ls kube-proxy*
kube-proxy.csr  kube-proxy-csr.json  kube-proxy-key.pem  kube-proxy.pem

# 拷貝到目錄
cp kube-proxy*.pem /etc/kubernetes/ssl/

 scp kube-proxy*.pem root@192..:/etc/kubernetes/ssl/
 scp kube-proxy*.pem root@192..:/etc/kubernetes/ssl/
           

建立 kube-proxy kubeconfig 檔案

# 配置叢集

kubectl config set-cluster kubernetes \
  --certificate-authority=/etc/kubernetes/ssl/ca.pem \
  --embed-certs=true \
  --server=https://192.168.1.109:6443 \
  --kubeconfig=kube-proxy.kubeconfig


# 配置用戶端認證

kubectl config set-credentials kube-proxy \
  --client-certificate=/etc/kubernetes/ssl/kube-proxy.pem \
  --client-key=/etc/kubernetes/ssl/kube-proxy-key.pem \
  --embed-certs=true \
  --kubeconfig=kube-proxy.kubeconfig


# 配置關聯

kubectl config set-context default \
  --cluster=kubernetes \
  --user=kube-proxy \
  --kubeconfig=kube-proxy.kubeconfig



# 配置預設關聯
kubectl config use-context default --kubeconfig=kube-proxy.kubeconfig

# 拷貝到目錄
mv kube-proxy.kubeconfig /etc/kubernetes/
scp /etc/kubernetes/kube-proxy.kubeconfig [email protected]:/etc/kubernetes/
scp /etc/kubernetes/kube-proxy.kubeconfig [email protected]:/etc/kubernetes/
           

建立 kube-proxy.service 檔案

# 建立 kube-proxy 目錄

mkdir -p /var/lib/kube-proxy


vi /etc/systemd/system/kube-proxy.service

[Unit]
Description=kubernetes Kube-Proxy Server
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
After=network.target

[Service]
WorkingDirectory=/var/lib/kube-proxy
ExecStart=/usr/local/bin/kube-proxy \
  --bind-address=. \
  --hostname-override=. \
  --cluster-cidr=../ \
  --kubeconfig=/etc/kubernetes/kube-proxy.kubeconfig \
  --logtostderr=true \
  --v=
Restart=on-failure
RestartSec=5
LimitNOFILE=65536

[Install]
WantedBy=multi-user.target
           

啟動 kube-proxy

systemctl daemon-reload
systemctl enable kube-proxy
systemctl start kube-proxy
systemctl status kube-proxy
           

部署其他Node 節點

仿照之上面部署的kubelet kube-proxy 部署其他節點,結果如下

[root@kubernetes-master- kubernetes]# kubectl get nodes
NAME            STATUS    AGE       VERSION
.   Ready     m        v1.
.   Ready     m        v1.
.   Ready     m        v1.
           

Calico 網絡

修改 Node kubelet.service

vi /etc/systemd/system/kubelet.service

# 增加 如下配置

  --network-plugin=cni \


# 重新加載配置
systemctl daemon-reload
systemctl restart kubelet.service
systemctl status kubelet.service
           

重新開機Node kube-proxy.service

# 重新加載配置
systemctl daemon-reload
systemctl restart kube-proxy.service
systemctl status kube-proxy.service
           

安裝 Calico

官網位址

wget http://docs.projectcalico.org/v2/getting-started/kubernetes/installation/hosted/calico.yaml

wget http://docs.projectcalico.org/v2/getting-started/kubernetes/installation/rbac.yaml


# 鏡像
[[email protected] ~]# cat calico.yaml | grep image
          image: quay.io/calico/node:v1
          image: quay.io/calico/cni:v1
          image: quay.io/calico/kube-policy-controller:v0
           

配置 calico

vi calico.yaml

# 注意修改如下選項:


  etcd_endpoints: "https://192.168.1.109:2379,https://192.168.1.110:2379,https://192.168.1.111:2379"

    etcd_ca: "/calico-secrets/etcd-ca"  
    etcd_cert: "/calico-secrets/etcd-cert"
    etcd_key: "/calico-secrets/etcd-key"  


# 這裡面要寫入 base64 的資訊
# 分别執行括号内的指令,填寫到 etcd-key , etcd-cert, etcd-ca 中,不用括号。


data:
  etcd-key: (cat /etc/kubernetes/ssl/etcd-key.pem | base64 | tr -d '\n')
  etcd-cert: (cat /etc/kubernetes/ssl/etcd.pem | base64 | tr -d '\n')
  etcd-ca: (cat /etc/kubernetes/ssl/ca.pem | base64 | tr -d '\n')


    - name: CALICO_IPV4POOL_CIDR
      value: "192.168.0.0/16"
           

導入 yaml 檔案

root@kubernetes-master- ~]# kubectl apply -f calico.yaml 
configmap "calico-config" created
secret "calico-etcd-secrets" created
daemonset "calico-node" created
deployment "calico-policy-controller" created
serviceaccount "calico-policy-controller" created
serviceaccount "calico-node" created

[root@kubernetes-master- ~]# kubectl apply -f rbac.yaml 
clusterrole "calico-policy-controller" created
clusterrolebinding "calico-policy-controller" created
clusterrole "calico-node" created
clusterrolebinding "calico-node" created
           

驗證 Calico

[root@kubernetes-master- ssl]# kubectl get pods -n kube-system
NAME                                        READY     STATUS    RESTARTS   AGE
calico-node-jdz4                           /       Running             m
calico-node-d8986                           /       Running             m
calico-node-fz92l                           /       Running             m
calico-policy-controller--rnn7b   /       Running             m
[root@kubernetes-master- ssl]# kubectl get ds -n kube-system
NAME          DESIRED   CURRENT   READY     UP-TO-DATE   AVAILABLE   NODE-SELECTOR   AGE
calico-node                                                     <none>          m
           

安裝 Calicoctl

[[email protected] kubernetes]# cd /usr/local/bin/

[[email protected] kubernetes]# wget -c  https://github.com/projectcalico/calicoctl/releases/download/v1.3.0/calicoctl

[[email protected] kubernetes]# chmod +x calicoctl


## 建立 calicoctl 配置檔案

# 配置檔案, 在 安裝了 calico 網絡的 機器下

[[email protected] kubernetes]# calicoctl version
Version:      v1.3.0
Build date:   
Git commit:   d2babb6
[[email protected] kubernetes]# mkdir /etc/calico
[[email protected] kubernetes]# vi /etc/calico/calicoctl.cfg
apiVersion: v1
kind: calicoApiConfig
metadata:
spec:
  datastoreType: "etcdv2"
  etcdEndpoints: "https://192.168.1.109:2379,https://192.168.1.110:2379,https://192.168.1.111:2379"
  etcdKeyFile: "/etc/kubernetes/ssl/etcd-key.pem"
  etcdCertFile: "/etc/kubernetes/ssl/etcd.pem"
  etcdCACertFile: "/etc/kubernetes/ssl/ca.pem"

[[email protected] kubernetes]# calicoctl node status
Calico process is running.

IPv4 BGP status
+---------------+-------------------+-------+----------+-------------+
| PEER ADDRESS  |     PEER TYPE     | STATE |  SINCE   |    INFO     |
+---------------+-------------------+-------+----------+-------------+
| 192.168.1.109 | node-to-node mesh | up    | 08:32:07 | Established |
| 192.168.1.110 | node-to-node mesh | up    | 08:32:08 | Established |
+---------------+-------------------+-------+----------+-------------+

IPv6 BGP status
No IPv6 peers found.
           

測試叢集

# 建立一個 nginx deplyment

apiVersion: extensions/v1beta1 
kind: Deployment 
metadata: 
  name: nginx-dm
spec: 
  replicas: 
  template: 
    metadata: 
      labels: 
        name: nginx 
    spec: 
      containers: 
        - name: nginx 
          image: nginx:alpine 
          imagePullPolicy: IfNotPresent
          ports: 
            - containerPort: 

---

apiVersion: v1 
kind: Service
metadata: 
  name: nginx-svc 
spec: 
  ports: 
    - port: 
      targetPort: 
      protocol: TCP 
  selector: 
    name: nginx

[[email protected] tmp]# vi deplyment.yaml
[[email protected] tmp]# kubectl create -f deplyment.yaml 
deployment "nginx-dm" created
service "nginx-svc" created
           
[root@kubernetes-master- tmp]# kubectl get pods 
NAME                        READY     STATUS    RESTARTS   AGE
nginx-dm--cshnp   /       Running             m
nginx-dm--w56bx   /       Running             m
[root@kubernetes-master- tmp]# kubectl get deployment
NAME       DESIRED   CURRENT   UP-TO-DATE   AVAILABLE   AGE
nginx-dm                                            m
[root@kubernetes-master- tmp]# kubectl get svc
NAME         CLUSTER-IP       EXTERNAL-IP   PORT(S)   AGE
kubernetes   ..      <none>        /TCP   h
nginx-svc    .   <none>        /TCP    m
           
# 在 node 裡 curl

[[email protected] kubernetes]# curl 192.254.236.70
<!DOCTYPE html>
<html>
<head>
<title>Welcome to nginx!</title>
<style>
    body {
        width: em;
        margin:  auto;
        font-family: Tahoma, Verdana, Arial, sans-serif;
    }
</style>
</head>
<body>
<h1>Welcome to nginx!</h1>
<p>If you see this page, the nginx web server is successfully installed and
working. Further configuration is required.</p>

<p>For online documentation and support please refer to
<a href="http://nginx.org/">nginx.org</a>.<br/>
Commercial support is available at
<a href="http://nginx.com/">nginx.com</a>.</p>

<p><em>Thank you for using nginx.</em></p>
</body>
</html>
           

配置 KubeDNS

官方 github yaml 相關

https://github.com/kubernetes/kubernetes/tree/v1.7.2/cluster/addons/dns

下載下傳鏡像
docker pull gcr.io/google_containers/k8s-dns-sidecar-amd64:
docker pull gcr.io/google_containers/k8s-dns-kube-dns-amd64:
docker pull gcr.io/google_containers/k8s-dns-dnsmasq-nanny-amd64:
           

下載下傳 yaml 檔案

mkdir /tmp/dns 
cd /tmp/dns

https://github.com/kubernetes/kubernetes/tree/v1/cluster/addons/dns/kubedns-cm.yaml


https://github.com/kubernetes/kubernetes/tree/v1/cluster/addons/dns/kubedns-sa.yaml


https://github.com/kubernetes/kubernetes/tree/v1/cluster/addons/dns/kubedns-controller.yaml.base


https://github.com/kubernetes/kubernetes/tree/v1/cluster/addons/dns/kubedns-svc.yaml.base


# 修改字尾

mv kubedns-controller.yaml.base kubedns-controller.yaml

mv kubedns-svc.yaml.base kubedns-svc.yaml
           

系統預定義的 RoleBinding

預定義的 RoleBinding system:kube-dns 将 kube-system 命名空間的 kube-dns ServiceAccount 與 system:kube-dns Role 綁定, 該 Role 具有通路 kube-apiserver DNS 相關 API 的權限;

[[email protected] dns]# kubectl get clusterrolebindings system:kube-dns -o yaml
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
  annotations:
    rbac.authorization.kubernetes.io/autoupdate: "true"
  creationTimestamp: --T07::Z
  labels:
    kubernetes.io/bootstrapping: rbac-defaults
  name: system:kube-dns
  resourceVersion: "107"
  selfLink: /apis/rbac.authorization.k8s.io/v1beta1/clusterrolebindings/system%Akube-dns
  uid: f5cc1bd-fb8--d7a4ad
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: system:kube-dns
subjects:
- kind: ServiceAccount
  name: kube-dns
  namespace: kube-system
           

修改 kubedns-svc.yaml

# kubedns-svc.yaml 中 clusterIP: __PILLAR__DNS__SERVER__ 修改為我們之前定義的 dns IP 192.254.0.2

[[email protected] dns]# cat kubedns-svc.yaml 
apiVersion: v1
kind: Service
metadata:
  name: kube-dns
  namespace: kube-system
  labels:
    k8s-app: kube-dns
    kubernetes.io/cluster-service: "true"
    addonmanager.kubernetes.io/mode: Reconcile
    kubernetes.io/name: "KubeDNS"
spec:
  selector:
    k8s-app: kube-dns
  clusterIP: 
  ports:
  - name: dns
    port: 
    protocol: UDP
  - name: dns-tcp
    port: 
    protocol: TCP

           

修改 kubedns-controller.yaml

1. # 修改 --domain=__PILLAR__DNS__DOMAIN__.   為 我們之前 預定的 domain 名稱 --domain=cluster.local.

2. # 修改 --server=/__PILLAR__DNS__DOMAIN__/127.0.0.1#10053  中 domain 為我們之前預定的 --server=/cluster.local./127.0.0.1#10053

3. # 修改 --probe=kubedns,127.0.0.1:10053,kubernetes.default.svc.__PILLAR__DNS__DOMAIN__, 中的 domain 為我們之前預定的  --probe=kubedns,127.0.0.1:10053,kubernetes.default.svc.cluster.local.,

4. # 修改 --probe=dnsmasq,127.0.0.1:53,kubernetes.default.svc.__PILLAR__DNS__DOMAIN__,  中的 domain 為我們之前預定的  --probe=dnsmasq,127.0.0.1:53,kubernetes.default.svc.cluster.local.,
           

導入 yaml 檔案

[root@k8s-master- kubedns]# kubectl create -f .
configmap "kube-dns" created
deployment "kube-dns" created
serviceaccount "kube-dns" created
service "kube-dns" created
           

檢視 kubedns 服務

[[email protected] dns]# kubectl get all --namespace=kube-system
NAME                                           READY     STATUS    RESTARTS   AGE
po/calico-node-jdz4                           /       Running             m
po/calico-node-d8986                           /       Running             m
po/calico-node-fz92l                           /       Running             m
po/calico-policy-controller--rnn7b   /       Running             m
po/kube-dns--mr8c4                   /       Running             m

NAME           CLUSTER-IP    EXTERNAL-IP   PORT(S)         AGE
svc/kube-dns      <none>        /UDP,/TCP   m

NAME                              DESIRED   CURRENT   UP-TO-DATE   AVAILABLE   AGE
deploy/calico-policy-controller                                            m
deploy/kube-dns                                                            m

NAME                                     DESIRED   CURRENT   READY     AGE
rs/calico-policy-controller-                              m
rs/kube-dns-                                              m
           

驗證 dns 服務

在驗證 dns 之前,在 dns 未部署之前建立的 pod 與 deployment 等,都必須删除,重新部署,否則無法解析

# 導入之前的 nginx-dm yaml檔案
[root@kubernetes-master- tmp]# kubectl delete -f deplyment.yaml 
deployment "nginx-dm" deleted
service "nginx-svc" deleted

[root@kubernetes-master- tmp]# kubectl create -f deplyment.yaml 
deployment "nginx-dm" created
service "nginx-svc" created

[root@kubernetes-master- tmp]# kubectl get svc nginx-svc
NAME         CLUSTER-IP        EXTERNAL-IP   PORT(S)   AGE
nginx-svc    .   <none>        /TCP    s

# 建立一個 pods 來測試一下 nameserver

apiVersion: v1
kind: Pod
metadata:
  name: alpine
spec:
  containers:
  - name: alpine
    image: alpine
    command:
    - sh
    - -c
    - while true; do sleep ; done



# 檢視 pods
[root@kubernetes-master- tmp]# kubectl get pods 
NAME                        READY     STATUS    RESTARTS   AGE
alpine                      /       Running             m
nginx-dm--fr75   /       Running             m
nginx-dm--jtqg   /       Running             m



# 測試

[root@kubernetes-master- tmp]# kubectl exec -it alpine ping nginx-svc
PING nginx-svc (.):  data bytes
 bytes from .: seq= ttl= time= ms
 bytes from .: seq= ttl= time= ms


[root@kubernetes-master- tmp]# kubectl exec -it alpine nslookup nginx-svc
nslookup: can't resolve '(null)': Name does not resolve

Name:      nginx-svc
Address 1: 192.254.230.234 nginx-svc.default.svc.cluster.local
           

部署 Ingress 與 Dashboard

部署 dashboard

官方 dashboard 的github https://github.com/kubernetes/dashboard

下載下傳 dashboard 鏡像

gcr.io/google_containers/kubernetes-dashboard-amd64:v1
           

下載下傳 yaml 檔案

mkdir /tmp/dashboard
cd /tmp/dashboard

https://sourcegraph.com/github.com/kubernetes/[email protected]/-/blob/cluster/addons/dashboard/dashboard-controller.yaml

https://sourcegraph.com/github.com/kubernetes/[email protected]/-/blob/cluster/addons/dashboard/dashboard-service.yaml

# 因為開啟了 RBAC 是以這裡需要建立一個 RBAC 認證

vi dashboard-rbac.yaml


apiVersion: v1
kind: ServiceAccount
metadata:
  name: dashboard
  namespace: kube-system

---

kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1alpha1
metadata:
  name: dashboard
subjects:
  - kind: ServiceAccount
    name: dashboard
    namespace: kube-system
roleRef:
  kind: ClusterRole
  name: cluster-admin
  apiGroup: rbac.authorization.k8s.io
           

導入 yaml

# dashboard-controller.yaml 增加 rbac 授權


# 在第二個 spec 下面 增加

    spec:
      serviceAccountName: dashboard


[root@kubernetes-master- dashboard]# kubectl create -f .
deployment "kubernetes-dashboard" created
serviceaccount "dashboard" created
clusterrolebinding "dashboard" created
service "kubernetes-dashboard" created

[root@kubernetes-master- dashboard]# kubectl get svc -n kube-system kubernetes-dashboard
NAME                   CLUSTER-IP        EXTERNAL-IP   PORT(S)   AGE
kubernetes-dashboard   .   <none>        /TCP    s
           

通路dashboard

官網

kubectl proxy模式

[[email protected] dashboard]# kubectl cluster-info
Kubernetes master is running at https://192.168.1.109:6443
KubeDNS is running at https://192.168.1.109:6443/api/v1/namespaces/kube-system/services/kube-dns/proxy
kubernetes-dashboard is running at https://192.168.1.109:6443/api/v1/namespaces/kube-system/services/kubernetes-dashboard/proxy

To further debug and diagnose cluster problems, use 'kubectl cluster-info dump'.
           
[[email protected] dashboard]# kubectl proxy --address='192.168.1.109'
Starting to serve on :
           

NodePort 模式

把 `type: ClusterIP` 用 `type: NodePort` 替換

[root@kubernetes-master- dashboard]# kubectl -n kube-system edit service kubernetes-dashboard
service "kubernetes-dashboard" edited
[root@kubernetes-master- dashboard]# kubectl -n kube-system get service kubernetes-dashboard
NAME                   CLUSTER-IP        EXTERNAL-IP   PORT(S)        AGE
kubernetes-dashboard   .   <nodes>       :/TCP   m
           

部署 Nginx Ingress

https://master-ip:30897 就可以通路了dashboard

部署 Nginx Ingress

kubernetes 暴露服務的方式目前隻有三種:LoadBlancer Service、NodePort Service、Ingress;

什麼是 Ingress ? Ingress 就是利用 Nginx Haproxy 等負載均衡工具來暴露 kubernetes 服務。

官方 Nginx Ingress github

https://github.com/kubernetes/ingress-nginx/tree/nginx-0.9.0-beta.11

# 官方鏡像
gcr.io/google_containers/defaultbackend:
gcr.io/google_containers/nginx-ingress-controller:-beta
           
# 部署 Nginx  backend , Nginx backend 用于統一轉發 沒有的域名 到指定頁面。


https://github.com/kubernetes/ingress-nginx/blob/nginx--beta/examples/deployment/nginx/default-backend.yaml

直接導入既可, 這裡不需要修改
[[email protected] nignx]# kubectl apply -f default-backend.yaml 
deployment "default-http-backend" created
service "default-http-backend" created
[[email protected] nignx]# kubectl get pods -n kube-system
NAME                                        READY     STATUS    RESTARTS   AGE
calico-node-jdz4                           /       Running             h
calico-node-d8986                           /       Running             h
calico-node-fz92l                           /       Running             h
calico-policy-controller--rnn7b   /       Running             h
default-http-backend-726995137-4c6d5        1/1       Running   0          18s
kube-dns--mr8c4                   /       Running             m
kubernetes-dashboard--lpngv       /       Running             m
           
# 部署 Ingress RBAC 認證

https://github.com/kubernetes/ingress-nginx/blob/nginx--beta/examples/rbac/nginx/nginx-ingress-controller-rbac.yml

# 修改 namespace
[[email protected] nignx]# sed -i 's/namespace: nginx-ingress/namespace: kube-system/g' nginx-ingress-controller-rbac.yml



[[email protected] nignx]# kubectl apply -f nginx-ingress-controller-rbac.yml
namespace "nginx-ingress" created
serviceaccount "nginx-ingress-serviceaccount" created
clusterrole "nginx-ingress-clusterrole" created
role "nginx-ingress-role" created
rolebinding "nginx-ingress-role-nisa-binding" created
clusterrolebinding "nginx-ingress-clusterrole-nisa-binding" created
           
# 部署 Ingress Controller 元件
https://github.com/kubernetes/ingress-nginx/blob/nginx-0.9.0-beta.11/examples/daemonset/nginx/nginx-ingress-daemonset.yaml

# 修改 yaml 檔案 增加 rbac 認證 和 hostNetwork , 第二個 spec 下 增加

    spec:
      hostNetwork: true
      serviceAccountName: nginx-ingress-serviceaccount


[[email protected] nignx]# kubectl apply -f nginx-ingress-daemonset.yaml 
daemonset "nginx-ingress-lb" created


[[email protected] nignx]# kubectl get daemonset -n kube-system
NAME               DESIRED   CURRENT   READY     UP-TO-DATE   AVAILABLE   NODE-SELECTOR   AGE
calico-node                                                          <none>          h
nginx-ingress-lb                                                     <none>          s
           
# 建立一個 ingress

# 檢視我們原有的 svc
[[email protected] nignx]# kubectl get svc nginx-svc
NAME        CLUSTER-IP        EXTERNAL-IP   PORT(S)   AGE
nginx-svc      <none>        /TCP    m

[[[email protected] nignx]# cat nginx-ingress.yaml 
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
  name: nginx-ingress
spec:
  rules:
  - host: nginx.qinzhao.me
    http:
      paths:
      - backend:
          serviceName: nginx-svc
          servicePort: 
[[email protected] nignx]# kubectl apply -f nginx-ingress.yaml
ingress "nginx-ingress" created
[[email protected] nignx]# kubectl get ingress
NAME            HOSTS              ADDRESS            PORTS     AGE
nginx-ingress   nginx.qinzhao.me   ...           m

curl -I nginx.qinzhao.me
           
# 配置一個 Dashboard Ingress


[root@kubernetes-master- nignx]# kubectl get svc -n kube-system kubernetes-dashboard
NAME                   CLUSTER-IP        EXTERNAL-IP   PORT(S)        AGE
kubernetes-dashboard   .   <nodes>       :/TCP   m
[root@kubernetes-master- nignx]# vi  dashboard-ingress.yaml
[root@kubernetes-master- nignx]# cat dashboard-ingress.yaml 
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
  name: dashboard-ingress
  namespace: kube-system
spec:
  rules:
  - host: dashboard.jicki.me
    http:
      paths:
      - backend:
          serviceName: kubernetes-dashboard
          servicePort: 

[root@kubernetes-master- nignx]# kubectl get ingress -n kube-system
No resources found.
[root@kubernetes-master- nignx]# kubectl create -f  dashboard-ingress.yaml
ingress "dashboard-ingress" created
[root@kubernetes-master- nignx]# kubectl get ingress -n kube-system
NAME                HOSTS                ADDRESS   PORTS     AGE
dashboard-ingress   dashboard.jicki.me                     s
[root@kubernetes-master- nignx]# curl -I dashboard.jicki.me
           

Master HA

基于 Nginx 負載 API 做 Master HA
# master 之間除 api server 以外其他元件通過 etcd 選舉,api server 預設不作處理;在每個 node 上啟動一個 nginx,每個 nginx 反向代理所有 api server,node 上 kubelet、kube-proxy 連接配接本地的 nginx 代理端口,當 nginx 發現無法連接配接後端時會自動踢掉出問題的 api server,進而實作 api server 的 HA
           
# 下載下傳 二進制 檔案

wget https://dl.k8s.io/v1/kubernetes-server-linux-amd64.tar.gz

tar -xzvf kubernetes-server-linux-amd64.tar.gz

cd kubernetes

cp -r server/bin/{kube-apiserver,kube-controller-manager,kube-scheduler,kubectl,kube-proxy,kubelet} /usr/local/bin/
           
# 拷貝 Matser-1 的密鑰到  Master-2

# 這裡我為了友善偷懶,我把所有的密鑰都拷貝過去了

[root@k8s-master- ~]# cd /etc/kubernetes/ssl

[root@k8s-master- ssl]# scp -r * [email protected]:/etc/kubernetes/ssl/


# 拷貝 token.csv 檔案

[root@k8s-master- ~]# cd /etc/kubernetes

[root@k8s-master- ssl]# scp -r token.csv [email protected]:/etc/kubernetes/
           
# 配置 Master kube-apiserver
vi /etc/systemd/system/kube-apiserver.service

[Unit]
Description=kubernetes API Server
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
After=network.target

[Service]
User=root
ExecStart=/usr/local/bin/kube-apiserver \
  --admission-control=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,ResourceQuota \
  --advertise-address=. \
  --allow-privileged=true \
  --apiserver-count= \
  --audit-log-maxage= \
  --audit-log-maxbackup= \
  --audit-log-maxsize= \
  --audit-log-path=/var/lib/audit.log \
  --authorization-mode=RBAC \
  --bind-address=. \
  --client-ca-file=/etc/kubernetes/ssl/ca.pem \
  --enable-swagger-ui=true \
  --etcd-cafile=/etc/kubernetes/ssl/ca.pem \
  --etcd-certfile=/etc/kubernetes/ssl/etcd.pem \
  --etcd-keyfile=/etc/kubernetes/ssl/etcd-key.pem \
  --etcd-servers=https://192.168.1.109:2379,https://192.168.1.110:2379,https://192.168.1.111:2379 \
  --event-ttl=h \
  --kubelet-https=true \
  --insecure-bind-address=. \
  --runtime-config=rbac.authorization.k8s.io/v1alpha1 \
  --service-account-key-file=/etc/kubernetes/ssl/ca-key.pem \
  --service-cluster-ip-range=../ \
  --service-node-port-range=- \
  --tls-cert-file=/etc/kubernetes/ssl/kubernetes.pem \
  --tls-private-key-file=/etc/kubernetes/ssl/kubernetes-key.pem \
  --experimental-bootstrap-token-auth \
  --token-auth-file=/etc/kubernetes/token.csv \
  --v=
Restart=on-failure
RestartSec=5
Type=notify
LimitNOFILE=65536
           
# 啟動 kube-apiserver
systemctl daemon-reload
systemctl enable kube-apiserver
systemctl start kube-apiserver
systemctl status kube-apiserver
           
# 部署 kube-controller-manager


vi /etc/systemd/system/kube-controller-manager.service

[Unit]
Description=kubernetes Controller Manager
Documentation=https://github.com/GoogleCloudPlatform/kubernetes

[Service]
ExecStart=/usr/local/bin/kube-controller-manager \
  --address=.. \
  --master=http://192.168.1.110:8080 \
  --allocate-node-cidrs=true \
  --service-cluster-ip-range=../ \
  --cluster-cidr=../ \
  --cluster-name=kubernetes \
  --cluster-signing-cert-file=/etc/kubernetes/ssl/ca.pem \
  --cluster-signing-key-file=/etc/kubernetes/ssl/ca-key.pem \
  --service-account-private-key-file=/etc/kubernetes/ssl/ca-key.pem \
  --root-ca-file=/etc/kubernetes/ssl/ca.pem \
  --leader-elect=true \
  --v=
Restart=on-failure
RestartSec=5

[Install]
WantedBy=multi-user.target
           
# 啟動 kube-controller-manager

systemctl daemon-reload
systemctl enable kube-controller-manager
systemctl start kube-controller-manager
systemctl status kube-controller-manager
           
# 部署 kube-scheduler

vi /etc/systemd/system/kube-scheduler.service

[Unit]
Description=kubernetes Scheduler
Documentation=https://github.com/GoogleCloudPlatform/kubernetes

[Service]
ExecStart=/usr/local/bin/kube-scheduler \
  --address=.. \
  --master=http://192.168.1.110:8080 \
  --leader-elect=true \
  --v=
Restart=on-failure
RestartSec=5

[Install]
WantedBy=multi-user.target
           
# 啟動 kube-scheduler

systemctl daemon-reload
systemctl enable kube-scheduler
systemctl start kube-scheduler
systemctl status kube-scheduler
           
# Master-2 裡驗證

[root@kubernetes-master- kubernetes]# kubectl get componentstatuses
NAME                 STATUS    MESSAGE              ERROR
controller-manager   Healthy   ok                   
scheduler            Healthy   ok                   
etcd-               Healthy   {"health": "true"}   
etcd-               Healthy   {"health": "true"}   
etcd-               Healthy   {"health": "true"}  
           

修改 node 配置

# kubelet

# 首先 重新建立 kubelet kubeconfig 檔案

# 配置叢集 (server 這裡配置為127.0.0.1 既是 Master 又是 Node 的請配置為 Node IP)

kubectl config set-cluster kubernetes \
  --certificate-authority=/etc/kubernetes/ssl/ca.pem \
  --embed-certs=true \
  --server=https://127.0.0.1:6443 \
  --kubeconfig=bootstrap.kubeconfig


# 配置用戶端認證

kubectl config set-credentials kubelet-bootstrap \
  --token=cdc7201eb10d76842ff5b6e35bfbb516 \
  --kubeconfig=bootstrap.kubeconfig


# 配置關聯

kubectl config set-context default \
  --cluster=kubernetes \
  --user=kubelet-bootstrap \
  --kubeconfig=bootstrap.kubeconfig


# 配置預設關聯
kubectl config use-context default --kubeconfig=bootstrap.kubeconfig

# 拷貝生成的 bootstrap.kubeconfig 檔案

mv bootstrap.kubeconfig /etc/kubernetes/
           
# 重新建立 kube-proxy kubeconfig 檔案

# 配置叢集 (server 這裡配置為 127.0.0.1 既是 Master 又是 Node 的請配置為 Node IP)

kubectl config set-cluster kubernetes \
  --certificate-authority=/etc/kubernetes/ssl/ca.pem \
  --embed-certs=true \
  --server=https://127.0.0.1:6443 \
  --kubeconfig=kube-proxy.kubeconfig


# 配置用戶端認證

kubectl config set-credentials kube-proxy \
  --client-certificate=/etc/kubernetes/ssl/kube-proxy.pem \
  --client-key=/etc/kubernetes/ssl/kube-proxy-key.pem \
  --embed-certs=true \
  --kubeconfig=kube-proxy.kubeconfig


# 配置關聯

kubectl config set-context default \
  --cluster=kubernetes \
  --user=kube-proxy \
  --kubeconfig=kube-proxy.kubeconfig



# 配置預設關聯
kubectl config use-context default --kubeconfig=kube-proxy.kubeconfig

# 拷貝到目錄
mv kube-proxy.kubeconfig /etc/kubernetes/
           

建立Nginx 代理

在每個 node 都必須建立一個 Nginx 代理, 這裡特别注意, 當 Master 也做為 Node 的時候 不需要配置 Nginx-proxy
# 建立配置目錄
mkdir -p /etc/nginx

# 寫入代理配置

cat << EOF >> /etc/nginx/nginx.conf
error_log stderr notice;

worker_processes auto;
events {
  multi_accept on;
  use epoll;
  worker_connections ;
}

stream {
    upstream kube_apiserver {
        least_conn;
        server :;
        server :;
    }

    server {
        listen        :;
        proxy_pass    kube_apiserver;
        proxy_timeout m;
        proxy_connect_timeout s;
    }
}
EOF
           
# 配置 Nginx 基于 docker 程序,然後配置 systemd 來啟動

cat << EOF >> /etc/systemd/system/nginx-proxy.service

[Unit]
Description=kubernetes apiserver docker wrapper
Wants=docker.socket
After=docker.service

[Service]
User=root
PermissionsStartOnly=true
ExecStart=/usr/bin/docker run -p 6443:6443 \\
                              -v /etc/nginx:/etc/nginx \\
                              --name nginx-proxy \\
                              --net=host \\
                              --restart=on-failure:5 \\
                              --memory=512M \\
                              nginx:1.13.3-alpine
ExecStartPre=-/usr/bin/docker rm -f nginx-proxy
ExecStop=/usr/bin/docker stop nginx-proxy
Restart=always
RestartSec=15s
TimeoutStartSec=30s

[Install]
WantedBy=multi-user.target
EOF
           
[[email protected] kubernetes]# kubectl get nodes -owide
NAME            STATUS    AGE       VERSION   EXTERNAL-IP   OS-IMAGE                KERNEL-VERSION
   Ready     h        v1    <none>        CentOS Linux  (Core)   -el7.x86_64
   Ready     h        v1    <none>        CentOS Linux  (Core)   -el7.x86_64
   Ready     h        v1    <none>        CentOS Linux  (Core)   -el7.x86_64
           

END

參考

https://jicki.me/2017/07/25/kubernetes-1.7.2/#%E9%AA%8C%E8%AF%81-dns-%E6%9C%8D%E5%8A%A1

繼續閱讀