天天看点

2.2019-11-19ubunt环境准备及k8s安装环境准备

服务器主名称设置

master   192.168.200.198    1.5g
master2  192.168.200.197   1.5g
node1    192.168.200.206    4g
node2    192.168.200.207    4g
harbor   192.168.200.200     1.5g
harbor2  192.168.200.199    1.5g
haproxy1 192.168.200.201  1.5g
etcd1    192.168.200.203      2g
etcd2    192.168.200.204      2g
etcd3    192.168.200.205      2g
      

haproxy2 192.168.200.202 1.5g

2.2019-11-19ubunt环境准备及k8s安装环境准备
2.2019-11-19ubunt环境准备及k8s安装环境准备

转存失败重新上传取消

2.2019-11-19ubunt环境准备及k8s安装环境准备

选择版本,克隆

在master节点上克隆项目

root@master:~# git clone -b 0.6.1 https://github.com/easzlab/kubeasz.git
root@master:~# apt install ansible -y
root@master:~# ssh-keygen 
开启免密要登录
root@master:~# apt install sshpass -y
root@master:~# cat scp.sh 
#!/bin/bash
#目标主机列表
IP="
192.168.200.198
192.168.200.199
192.168.200.200
192.168.200.201
192.168.200.202
192.168.200.203
192.168.200.204
192.168.200.205
192.168.200.206
"
for node in ${IP};do
sshpass -p 123456 ssh-copy-id ${node} -o StrictHostKeyChecking=no
if [ $? -eq 0 ];then
echo "${node} 秘钥copy完成"
else
echo "${node} 秘钥copy失败"
fi
done
root@master:~# bash scp.sh 
允许master2上可以上传镜像
root@master:~# scp -r /root/.docker/ 192.168.200.197:/root/.docker/
把登录网址的信息拷贝过去
root@master:~# scp -r /etc/docker/certs.d/ 192.168.200.197:/etc/docker/
把公钥私钥拷贝过去
      

第二个master节点

root@master2:~# docker pull alpine
下载镜像
root@master2:~# vim /etc/hosts
192.168.200.200 harbor.wyh.net
root@master2:~# docker images
REPOSITORY          TAG                 IMAGE ID            CREATED             SIZE
alpine              latest              965ea09ff2eb        4 weeks ago         5.55MB
root@master2:~# vim /etc/hosts
root@master2:~# docker tag 965ea09ff2eb harbor.wyh.net/linux37/alpine:v2
修改名称
root@master2:~# docker push harbor.wyh.net/linux37/alpine:v2
提交到服务器
      
root@master:~# cp limits.conf /etc/security/limits.conf 
替换默认的配置文件
root@master:~# cp sysctl.conf /etc/sysctl.conf 
替换默认的配置文件
      

在master节点优化,其他节点的脚本

root@master:~# cat scp.sh | grep ^[^#]
IP="
192.168.200.197
192.168.200.203
192.168.200.204
192.168.200.205
192.168.200.206
192.168.200.207
"
for node in ${IP};do
      scp docker-install.sh ${node}:/opt/
      scp -r /etc/docker/certs.d ${node}:/etc/docker/
      scp /etc/hosts ${node}:/etc/
      scp /etc/sysctl.conf ${node}:/etc/
      scp /etc/security/limits.conf ${node}:/etc/security/
      ssh ${node} "reboot"
      echo ${node},"重启成功"
done
      

安装haprox和keepalive的

root@haproxy1:~# apt install keepalived haproxy -y
root@etcd3:~# apt install keepalived haproxy -y
root@haproxy1:~# cp /usr/share/doc/keepalived/samples/keepalived.conf.vrrp /etc/keepalived/keepalived.conf

修改keepalived的配置文件
root@haproxy1:~# vim /etc/keepalived/keepalived.conf 

! Configuration File for keepalived

global_defs {
   notification_email {
     acassen
   }
   notification_email_from [email protected]
   smtp_server 192.168.200.1
   smtp_connect_timeout 30
   router_id LVS_DEVEL
}

vrrp_instance VI_1 {
    state MASTER
    interface ens32
修改为本机网卡
    garp_master_delay 10
    smtp_alert
    virtual_router_id 88
修改id为88
    priority 100
    advert_int 1
    authentication {
        auth_type PASS
        auth_pass 1111
    }
    virtual_ipaddress {
        192.168.200.248  dev ens32 label ens32:0
修改虚拟ip地址

    }
}

#systemctl enable keepalived      

拷贝至另一个服务器上

root@haproxy1:~# scp /etc/keepalived/keepalived.conf 192.168.200.205:/etc/keepalived/keepalived.conf

root@etcd3:~# vim /etc/keepalived/keepalived.conf 
vrrp_instance VI_1 {
    state BACKUP
修改状态为backup
    interface ens32
    garp_master_delay 10
    smtp_alert
    virtual_router_id 88
    priority 80
优先级为80
    advert_int 1
    authentication {
        auth_type PASS
        auth_pass 1111
    }
#systemctl enable keepalived      

修改haproxy的配置文件

root@haproxy1:~# vim /etc/haproxy/haproxy.cfg 
listen k8s-api-6443
  bind 192.168.200.248:6443
  mode tcp
  server 192.168.200.198 192.168.200.198:6443 check fall 3 rise 3 inter 3s
  server 192.168.200.197 192.168.200.197:6443 check fall 3 rise 3 inter 3s
root@haproxy1:~# systemctl restart haproxy
root@haproxy1:~# systemctl enable haproxy
root@haproxy1:~# scp /etc/haproxy/haproxy.cfg 192.168.200.205:/etc/haproxy/
拷贝配置文件过去
root@etcd3:~# systemctl restart haproxy
root@etcd3:~# systemctl enable haproxy
root@master:~# mv /etc/ansible/* /tmp/
root@master:~# git clone -b 0.6.1 https://github.com/easzlab/kubeasz.git
root@master:~# cp -rf kubeasz/* /etc/ansible/
      

查看安装指南

​​​https://github.com/easzlab/kubeasz/tree/0.6.1​​

2.2019-11-19ubunt环境准备及k8s安装环境准备

image.png

python2.7在master和node节点还有3个etcd都要安装

root@master:/etc/ansible# cp example/hosts.m-masters.example ./hosts
# 安装python2
apt-get install python2.7
 Ubuntu18.04可能需要配置以下软连接
ln -s /usr/bin/python2.7 /usr/bin/python

      

解压

下载二进制文件 请从分享的​​​百度云链接​​​,下载解压到/etc/ansible/bin目录,如果你有合适网络环境也可以按照/down/download.sh自行从官网下载各种tar包

​​​https://github.com/kubernetes/kubernetes/releases?after=v1.13.12​​ 下载包的地址

root@master:/etc/ansible/bin# tar xf k8s.1-13-5.tar.gz 
root@master:/etc/ansible/bin# mv bin/* .
      
2.2019-11-19ubunt环境准备及k8s安装环境准备

image.png

2.2019-11-19ubunt环境准备及k8s安装环境准备

下载这个官网二进制包

验证是否能够执行命令

root@master:/etc/ansible/bin# ./kube-apiserver --version
Kubernetes v1.13.5
root@master:/etc/ansible/bin# ./docker --version
Docker version 18.09.2, build 6247962
      

修改hosts文件

root@master:/etc/ansible# cat hosts 
# 集群部署节点:一般为运行ansible 脚本的节点
# 变量 NTP_ENABLED (=yes/no) 设置集群是否安装 chrony 时间同步
[deploy]
192.168.200.198 NTP_ENABLED=no
# etcd集群请提供如下NODE_NAME,注意etcd集群必须是1,3,5,7...奇数个节点
[etcd]
192.168.200.203 NODE_NAME=etcd1
192.168.200.204 NODE_NAME=etcd2
192.168.200.205 NODE_NAME=etcd3
[new-etcd] # 预留组,后续添加etcd节点使用
#192.168.1.x NODE_NAME=etcdx
[kube-master]
192.168.200.198
#192.168.200.197
[new-master] # 预留组,后续添加master节点使用
#192.168.1.5
#192.168.200.197
[kube-node]
192.168.200.206
[new-node] # 预留组,后续添加node节点使用
192.168.200.207
# 参数 NEW_INSTALL:yes表示新建,no表示使用已有harbor服务器
# 如果不使用域名,可以设置 HARBOR_DOMAIN=""
[harbor]
#192.168.1.8 HARBOR_DOMAIN="harbor.yourdomain.com" NEW_INSTALL=no
# 负载均衡(目前已支持多于2节点,一般2节点就够了) 安装 haproxy+keepalived
[lb]
192.168.1.1 LB_ROLE=backup
192.168.1.2 LB_ROLE=master
#【可选】外部负载均衡,用于自有环境负载转发 NodePort 暴露的服务等
[ex-lb]
#192.168.1.6 LB_ROLE=backup EX_VIP=192.168.1.250
#192.168.1.7 LB_ROLE=master EX_VIP=192.168.1.250
[all:vars]
# ---------集群主要参数---------------
#集群部署模式:allinone, single-master, multi-master
DEPLOY_MODE=multi-master
#集群主版本号,目前支持: v1.8, v1.9, v1.10,v1.11, v1.12, v1.13
K8S_VER="v1.13"
# 集群 MASTER IP即 LB节点VIP地址,为区别与默认apiserver端口,设置VIP监听的服务端口8443
# 公有云上请使用云负载均衡内网地址和监听端口
MASTER_IP="192.168.200.248"
KUBE_APISERVER="https://{{ MASTER_IP }}:6443"
# 集群网络插件,目前支持calico, flannel, kube-router, cilium
CLUSTER_NETWORK="calico"
# 服务网段 (Service CIDR),注意不要与内网已有网段冲突
SERVICE_CIDR="10.20.0.0/16"
# POD 网段 (Cluster CIDR),注意不要与内网已有网段冲突
CLUSTER_CIDR="172.31.0.0/16"
# 服务端口范围 (NodePort Range)
NODE_PORT_RANGE="30000-65000"
# kubernetes 服务 IP (预分配,一般是 SERVICE_CIDR 中第一个IP)
CLUSTER_KUBERNETES_SVC_IP="10.20.0.1"
# 集群 DNS 服务 IP (从 SERVICE_CIDR 中预分配)
CLUSTER_DNS_SVC_IP="10.20.254.254"
# 集群 DNS 域名
CLUSTER_DNS_DOMAIN="linux37.local."
# 集群basic auth 使用的用户名和密码
BASIC_AUTH_USER="admin"
BASIC_AUTH_PASS="123456"
# ---------附加参数--------------------
#默认二进制文件目录
bin_dir="/usr/bin"
#证书目录
ca_dir="/etc/kubernetes/ssl"
#部署目录,即 ansible 工作目录,建议不要修改
base_dir="/etc/ansible"      

把资源限制改大点

root@master:/etc/ansible# vim roles/prepare/templates/30-k8s-ulimits.conf.j2 

* soft nofile 1000000
* hard nofile 1000000
* soft nproc 1000000
* hard nproc 1000000
root@master:/etc/ansible# vim 01.prepare.yml 
    #  - lb
注释掉lb,因为已经有负载均衡了
      

地址需要修改你所在的城市

root@master:/etc/ansible# grep HangZhou roles/ -R
roles/harbor/templates/harbor-csr.json.j2:      "ST": "HangZhou",      

分步安装,这个时候就需要所有服务器必须安装python2.7

root@master:/etc/ansible# ansible-playbook 01.prepare.yml