天天看點

OpenShift 3.11 OKD安裝部署

#openshift 3.11 OKD安裝部署

openshift安裝部署

###1 環境準備(所有節點)

openshift 版本 v3.11

1.1 機器環境

ip              cpu  mem   hostname  OSsystem

172.16.1.91    4    8    node01  CentOS7.6

172.16.1.92    4    8    node02  CentOS7.6

172.16.1.93    4    8    node03  CentOS7.6

172.16.1.94    4    8    node04  CentOS7.6

172.16.1.95    4    8    node05  CentOS7.6

1.2 免密碼ssh登陸

ssh-keygen 

ssh-copy-id 172.16.1.91

ssh-copy-id 172.16.1.92

ssh-copy-id 172.16.1.93

ssh-copy-id 172.16.1.94

ssh-copy-id 172.16.1.95

1.3 hosts解析

vim /etc/hosts

172.16.1.91 node01

172.16.1.92 node02

172.16.1.93 node03

172.16.1.94 node04

172.16.1.95 node05

---------------------

scp -rp /etc/hosts 192.168.1.132:/etc/hosts

scp -rp /etc/hosts 192.168.1.135:/etc/hosts

1.4 selinux和關閉防火牆

#sed -i 's/SELINUX=.*/SELINUX=enforcing/' /etc/selinux/config

#sed -i 's/SELINUXTYPE=.*/SELINUXTYPE=targeted/' /etc/selinux/config

開放8443端口給openshift,api使用

/sbin/iptables -I INPUT -p tcp --dport 8443 -j ACCEPT &&\ service iptables save

1.2.3 安裝需要的軟體包

yum install -y wget git ntp net-tools bind-utils iptables-services bridge-utils bash-completion kexec-tools sos psacct nfs-utils yum-utils docker NetworkManager

1.2.4 其他

sysctl net.ipv4.ip_forward=1

yum install pyOpenSSL httpd-tools -y 

systemctl start NetworkManager 

systemctl enable NetworkManager

配置鏡像加速器

echo '{

   "insecure-registries": ["172.30.0.0/16"],

   "registry-mirrors": ["https://3aexnae3.mirror.aliyuncs.com"]

}' >/etc/docker/daemon.json

systemctl daemon-reload && \

systemctl enable docker && \

systemctl restart docker

1.2.5 鏡像下載下傳

#master鏡像清單(主節點)

echo 'docker.io/cockpit/kubernetes

docker.io/openshift/origin-haproxy-router

docker.io/openshift/origin-haproxy-router  

docker.io/openshift/origin-service-catalog

docker.io/openshift/origin-node

docker.io/openshift/origin-deployer

docker.io/openshift/origin-control-plane

docker.io/openshift/origin-control-plane

docker.io/openshift/origin-template-service-broker

docker.io/openshift/origin-pod

docker.io/cockpit/kubernetes

docker.io/openshift/origin-web-console

quay.io/coreos/etcd' >image.txt && \

while read line; do docker pull $line ; done<image.txt

#node鏡像清單(兩個node節點)

echo 'docker.io/openshift/origin-haproxy-router

docker.io/openshift/origin-node

docker.io/openshift/origin-deployer

docker.io/openshift/origin-pod

docker.io/ansibleplaybookbundle/origin-ansible-service-broker

docker.io/openshift/origin-docker-registry' >image.txt && \

while read line; do docker pull $line ; done<image.txt

###2 配置ansible(主節點)

2.1 下載下傳openshift-ansible代碼

需要下載下傳2.6.5版本的ansible

git clone -b release-3.11 https://github.com/openshift/openshift-ansible.git

//wget https://buildlogs.centos.org/centos/7/paas/x86_64/openshift-origin311/ansible-2.6.5-1.el7.noarch.rpm &&\

//yum localinstall ansible-2.6.5-1.el7.noarch.rpm -y && \

//yum install -y etcd &&\

//systemctl enable etcd &&\

//systemctl start etcd

yum install ansible

2.2 配置檔案

https://docs.okd.io/3.11/install/example_inventories.html

[[email protected] ~]# cat /etc/ansible/hosts

[all]

# all下放所有機器節點的名稱

node01

node02

node03

node04

node05

[OSEv3:children]

#這裡放openshfit的角色,這裡有三個角色,master,node,etcd

masters

nodes

etcd

[OSEv3:vars]

#這裡是openshfit的安裝參數

#指定ansible使用ssh的使用者為root

ansible_ssh_user=root

#指定方式為origin

openshift_deployment_type=origin

#指定版本為3.11

openshift_release=3.11

openshift_enable_service_catalog=false

openshift_clock_enabled=true

openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', 'challenge': 'true', 'kind': 'HTPasswdPasswordIdentityProvider'}]

openshift_disable_check=disk_availability,docker_storage,memory_availability,docker_image_availability

[masters]

#master角色的機器名稱包含

node01

[etcd]

#etcd角色的機器名稱包含

node01

[nodes]

#node角色的機器名稱包含

#master openshift_node_group_name='node-config-all-in-one'

#node01 openshift_node_group_name='node-config-compute'

#node02 openshift_node_group_name='node-config-compute'

node01 openshift_node_group_name='node-config-master'

node02 openshift_node_group_name='node-config-compute'

node03 openshift_node_group_name='node-config-compute'

node04 openshift_node_group_name='node-config-infra'

node05 openshift_node_group_name='node-config-infra'

#gluster[1:6].example.com openshift_node_group_name='node-config-compute-storage'

#openshift_enable_service_catalog=false

#openshift_hosted_registry_storage_kind=nfs

#openshift_hosted_registry_storage_access_modes=['ReadWriteMany']

#openshift_hosted_registry_storage_nfs_directory=/data/docker

#openshift_hosted_registry_storage_nfs_options='*(rw,root_squash)'

#openshift_hosted_registry_storage_volume_name=registry

#openshift_hosted_registry_storage_volume_size=20Gi

# openshiftclock_enabled=true

# ansible_service_broker_install=false

3 使用ansible來進行安裝

#安裝前檢查

ansible-playbook ~/openshift-ansible/playbooks/prerequisites.yml

#安裝

ansible-playbook ~/openshift-ansible/playbooks/deploy_cluster.yml

#安裝openshift-web-console

ansible-playbook ~/openshift-ansible/playbooks/openshift-web-console/config.yml

#如需重新安裝,先解除安裝

ansible-playbook ~/openshift-ansible/playbooks/adhoc/uninstall.yml

###4 安裝後配置(主節點)

4.1 配置nfs持久卷

yum install nfs-utils rpcbind -y 

mkdir -p /data/v0{01..20} /data/{docker,volume,registry}

chmod -R 777 /data 

vim /etc/exports

/data 172.16.1.0/24(rw,sync,no_all_squash,no_root_squash)

/data/v001 172.16.1.0/24(rw,sync,no_all_squash,no_root_squash)

/data/v002 172.16.1.0/24(rw,sync,no_all_squash,no_root_squash)

/data/v003 172.16.1.0/24(rw,sync,no_all_squash,no_root_squash)

/data/v004 172.16.1.0/24(rw,sync,no_all_squash,no_root_squash)

/data/v005 172.16.1.0/24(rw,sync,no_all_squash,no_root_squash)

/data/v006 172.16.1.0/24(rw,sync,no_all_squash,no_root_squash)

/data/v007 172.16.1.0/24(rw,sync,no_all_squash,no_root_squash)

/data/v008 172.16.1.0/24(rw,sync,no_all_squash,no_root_squash)

/data/v009 172.16.1.0/24(rw,sync,no_all_squash,no_root_squash)

/data/v010 172.16.1.0/24(rw,sync,no_all_squash,no_root_squash)

/data/docker *(rw,sync,no_all_squash,no_root_squash)

systemctl restart rpcbind &&\

systemctl restart nfs && \

systemctl enable rpcbind &&\

systemctl enable nfs

exportfs -rv    #重新讀取配置檔案

exportfs        #檢查 NFS 伺服器是否挂載我們想共享的目錄 /home:

rpcinfo -p      #确認NFS伺服器啟動成功

exportfs -v     #檢視配置

kubectl apply -f pv-01-10.yml

配置檔案參考章節最後 pv-01-10.yml

4.2 建立openshift使用者

oc login -u system:admin                                ##使用系統管理者使用者登入

htpasswd -b /etc/origin/master/htpasswd admin 123456    ##建立使用者

htpasswd -b /etc/origin/master/htpasswd dev dev         ##建立使用者

oc login -u admin                                       ##使用使用者登入

oc logout                                               ##退出目前使用者

4.3 賦予建立的使用者叢集管理者權限

oc login -u system:admin &&\                            

oc adm policy add-cluster-role-to-user cluster-admin admin

4.4 通路測試

需要添加hosts解析到本地電腦

172.16.1.91 node01

172.16.1.92 node02

172.16.1.93 node03

172.16.1.94 node04

172.16.1.95 node05

賬号密碼是上面建立使用者的賬号密碼

http://node01:8443 admin/123456

###5 其他配置

5.1 部署叢集節點管理cockpit

yum install -y cockpit cockpit-docker cockpit-kubernetes &&\

systemctl start cockpit &&\

systemctl enable cockpit.socket &&\

iptables -A INPUT -p tcp -m state --state NEW -m tcp --dport 9090 -j ACCEPT

https://172.16.1.91:9090   賬号密碼是機器的ssh賬号密碼

5.2 指令補全

#kubectl 指令補全  

mkdir -p /usr/share/bash-completion/kubernetes

kubectl completion bash >/usr/share/bash-completion/kubernetes/bash_completion

echo 'source /usr/share/bash-completion/kubernetes/bash_completion' >>~/.bash_profile

#oc 自動補全

mkdir -p /usr/share/bash-completion/openshift

oc completion bash >/usr/share/bash-completion/openshift/bash_completion

echo "source /usr/share/bash-completion/openshift/bash_completion" >> ~/.bash_profile

source ~/.bash_profile

5.3 openshift登入

#admin使用者登陸openshift:使用者名dev 密碼:dev

oc login -n openshift

oc get svc -n default|grep docker-registry|awk '{print $3}'

#檢視admin使用者的token

oc whoami -t

#登入docker私庫

docker login -u admin -p `oc whoami -t` docker-registry.default.svc:5000

通過觀察service的docker-registry的IP

将svc添加每台主機的hosts做對應的解析

5.4 常用指令行操作

#master-restart api

#master-restart controllers

oc whoami -t                                            ###檢視目前使用者token

oc login https://node01:8443 --token=`oc whoami -t`     ###使用使用者token登入

oc get nodes                                            ###檢視目前node節點狀态

###6 其他

6.1 pv-01-10.yaml檔案

apiVersion: v1

kind: PersistentVolume

metadata:

  name: nfs-pv001

  labels:

    name: pv001

    type: nfs

spec:

  nfs:

    path: /data/v001

    server: 172.16.1.91

  capacity:

    storage: 50Gi

  accessModes:

    - ReadWriteMany

    - ReadWriteOnce

    - ReadOnlyMany

  persistentVolumeReclaimPolicy: Retain

---

apiVersion: v1

kind: PersistentVolume

metadata:

  name: nfs-pv002

  labels:

    name: nfs-pv002

    type: nfs

spec:

  nfs:

    path: /data/v002

    server: 172.16.1.91

  capacity:

    storage: 50Gi

  accessModes:

    - ReadWriteMany

    - ReadWriteOnce

    - ReadOnlyMany

  persistentVolumeReclaimPolicy: Retain

---

apiVersion: v1

kind: PersistentVolume

metadata:

  name: nfs-pv003

  labels:

    name: nfs-pv003

    type: nfs

spec:

  nfs:

    path: /data/v003

    server: 172.16.1.91

  capacity:

    storage: 30Gi

  accessModes:

    - ReadWriteMany

    - ReadWriteOnce

    - ReadOnlyMany

  persistentVolumeReclaimPolicy: Retain

---

apiVersion: v1

kind: PersistentVolume

metadata:

  name: nfs-pv004

  labels:

    name: nfs-pv004

    type: nfs

spec:

  nfs:

    path: /data/v004

    server: 172.16.1.91

  capacity:

    storage: 30Gi

  accessModes:

    - ReadWriteMany

    - ReadWriteOnce

    - ReadOnlyMany

  persistentVolumeReclaimPolicy: Retain

---

apiVersion: v1

kind: PersistentVolume

metadata:

  name: nfs-pv005

  labels:

    name: nfs-pv005

    type: nfs

spec:

  nfs:

    path: /data/v005

    server: 172.16.1.91

  capacity:

    storage: 10Gi

  accessModes:

    - ReadWriteMany

    - ReadWriteOnce

    - ReadOnlyMany

  persistentVolumeReclaimPolicy: Retain

---

apiVersion: v1

kind: PersistentVolume

metadata:

  name: nfs-pv006

  labels:

    name: nfs-pv006

    type: nfs

spec:

  nfs:

    path: /data/v006

    server: 172.16.1.91

  capacity:

    storage: 10Gi

  accessModes:

    - ReadWriteMany

    - ReadWriteOnce

    - ReadOnlyMany

  persistentVolumeReclaimPolicy: Retain

---

apiVersion: v1

kind: PersistentVolume

metadata:

  name: nfs-pv007

  labels:

    name: nfs-pv007

    type: nfs

spec:

  nfs:

    path: /data/v007

    server: 172.16.1.91

  capacity:

    storage: 5Gi

  accessModes:

    - ReadWriteMany

    - ReadWriteOnce

    - ReadOnlyMany

  persistentVolumeReclaimPolicy: Retain

---

apiVersion: v1

kind: PersistentVolume

metadata:

  name: nfs-pv008

  labels:

    name: nfs-pv008

    type: nfs

spec:

  nfs:

    path: /data/v008

    server: 172.16.1.91

  capacity:

    storage: 5Gi

  accessModes:

    - ReadWriteMany

    - ReadWriteOnce

    - ReadOnlyMany

  persistentVolumeReclaimPolicy: Retain

---

apiVersion: v1

kind: PersistentVolume

metadata:

  name: nfs-pv009

  labels:

    name: nfs-pv009

    type: nfs

spec:

  nfs:

    path: /data/v009

    server: 172.16.1.91

  capacity:

    storage: 2Gi

  accessModes:

    - ReadWriteMany

    - ReadWriteOnce

    - ReadOnlyMany

  persistentVolumeReclaimPolicy: Retain

---

apiVersion: v1

kind: PersistentVolume

metadata:

  name: nfs-pv010

  labels:

    name: nfs-pv010

    type: nfs

spec:

  nfs:

    path: /data/v010

    server: 172.16.1.91

  capacity:

    storage: 2Gi

  accessModes:

    - ReadWriteMany

    - ReadWriteOnce

    - ReadOnlyMany

  persistentVolumeReclaimPolicy: Retain

===================================================================================================

#Centos7.6安裝Docker_1.13.1

#### 移除舊的版本:

yum remove docker docker-client \

                  docker-client-latest \

                  docker-common \

                  docker-latest \

                  docker-latest-logrotate \

                  docker-logrotate \

                  docker-selinux \

                  docker-engine-selinux \

                  docker-engine

#### 安裝一些必要的系統工具:

yum install -y yum-utils device-mapper-persistent-data lvm2

#### 添加軟體源資訊 (可選)

yum-config-manager --add-repo http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo

#### 更新 yum 緩存(可選)

yum makecache fast

#### 安裝 Docker 1.13.1

yum -y install docker

#### CentOS7的Docker預設存儲路徑修改

mkdir -p /opt/docker/data

vim /usr/lib/systemd/system/docker.service

#### 找到以下内容

ExecStart=/usr/bin/dockerd-current \

# -g 或 --graph 或 --data-root,插入或修改後儲存

  -g /opt/docker/data

#### 修改後需要重載設定

systemctl daemon-reload

#### 啟動Docker背景服務及開機啟動

systemctl enable docker && systemctl start docker

#### 測試運作 hello-world

docker run hello-world

#### 鏡像加速,修改/etc/docker/daemon.json内容:

{

  "registry-mirrors": ["http://hub-mirror.c.163.com"]

}

#### 删除 Docker CE

yum remove docker-ce

rm -rf /var/lib/docker

https://www.runoob.com/docker/centos-docker-install.html

Docker指定容器的IP範圍和DNS

# 注意:default-address-pools 指定的是預設網絡位址段範圍,如果設為C級位址"5.5.5.0/24"則執行"docker network create mynet1"會出錯,原因是不能再分派C級位址。

如果設為B級位址"5.5.5.0/16",則預設網絡的第一個配置設定的IP位址為5.5.0.1,執行"docker network create mynet1 && docker netwrk inspect mynet1",

會看到mynet1網絡範圍為5.5.1.0/24,通過“--net mynet1”分派的第一個IP位址為5.5.1.1

echo  -e '{

  "authorization-plugins": [],

  "dns":["172.16.250.15","168.63.129.16","8.8.8.8"],

  "default-address-pools":[{"base":"5.5.0.0/16","size":24}]

}

' > /etc/docker/daemon.json

systemctl restart docker

# 建立自定義網絡。注意:容器使用自定義網絡,會導緻/etc/resolv.conf變為原始值,無法使用主控端的/etc/docker/daemon.json所設定的dns,需要手動修改,是以建議盡管不要用自定義網絡。

docker network create --subnet=5.5.5.0/24 mynetwork1 --gateway=5.5.5.1

https://docs.docker.com/engine/reference/commandline/dockerd/#daemon-configuration-file

解決容器無法通路主控端端口的問題

firewall-cmd --permanent --zone=public --add-rich-rule='rule family=ipv4 source address=5.5.0.0/

The following table lists the playbooks in the order that they must run:

Table 1. Individual Component Playbook Run Order

Playbook Name    File Location

Health Check

~/openshift-ansible/playbooks/openshift-checks/pre-install.yml

Node Bootstrap

~/openshift-ansible/playbooks/openshift-node/bootstrap.yml

etcd Install

~/openshift-ansible/playbooks/openshift-etcd/config.yml

NFS Install

~/openshift-ansible/playbooks/openshift-nfs/config.yml

Load Balancer Install

~/openshift-ansible/playbooks/openshift-loadbalancer/config.yml

Master Install

~/openshift-ansible/playbooks/openshift-master/config.yml

Master Additional Install

~/openshift-ansible/playbooks/openshift-master/additional_config.yml

Node Join

~/openshift-ansible/playbooks/openshift-node/join.yml

GlusterFS Install

~/openshift-ansible/playbooks/openshift-glusterfs/config.yml

Hosted Install

~/openshift-ansible/playbooks/openshift-hosted/config.yml

Monitoring Install

~/openshift-ansible/playbooks/openshift-monitoring/config.yml

Web Console Install

~/openshift-ansible/playbooks/openshift-web-console/config.yml

Admin Console Install

~/openshift-ansible/playbooks/openshift-console/config.yml

Metrics Install

~/openshift-ansible/playbooks/openshift-metrics/config.yml

metrics-server

~/openshift-ansible/playbooks/metrics-server/config.yml

Logging Install

~/openshift-ansible/playbooks/openshift-logging/config.yml

Availability Monitoring Install

~/openshift-ansible/playbooks/openshift-monitor-availability/config.yml

Service Catalog Install

~/openshift-ansible/playbooks/openshift-service-catalog/config.yml

Management Install

~/openshift-ansible/playbooks/openshift-management/config.yml

Descheduler Install

~/openshift-ansible/playbooks/openshift-descheduler/config.yml

Node Problem Detector Install

~/openshift-ansible/playbooks/openshift-node-problem-detector/config.yml

Autoheal Install

~/openshift-ansible/playbooks/openshift-autoheal/config.yml

Operator Lifecycle Manager (OLM) Install (Technology Preview)

~/openshift-ansible/playbooks/olm/config.yml