glusterfs 怎麼叢集,網上一搜鋪天蓋地的
可利用這個特點做單節點高可用,因為K8S 哪怕節點當機了 master 會在随意一台節點把挂掉的複活
當然我是在自己的環境下跑,經過網絡的glusterfs,資料傳輸,有筆記大的性能損耗,對網絡要求也特别高
小檔案存儲性能也不高等問題.最下面有mariadb 使用glusterfs 插入10W行資料,與本地硬碟的性能對比
這裡記錄一下rabbitmq 單機高可用情景,mysql,mongodb, redis 等,萬變不離其宗
事先建立好了 volume,卷名為 env-dev
随便找個客戶機挂載
mount -t glusterfs 192.168.91.135:/env-dev /mnt/env/dev
預先建立需要的檔案夾
mkdir -p /mnt/env/dev/rabbitmq/mnesia
編寫 glusterfs endpoint
[root@k8s-master-0 dev]# cat pv-ep.yaml
apiVersion: v1
kind: Endpoints
metadata:
name: glusterfs
namespace: env-dev
subsets:
- addresses:
- ip: 192.168.91.135
- ip: 192.168.91.136
ports:
- port: 49152
protocol: TCP
---
apiVersion: v1
kind: Service
metadata:
name: glusterfs
namespace: env-dev
spec:
ports:
- port: 49152
protocol: TCP
targetPort: 49152
sessionAffinity: None
type: ClusterIP
編寫 pv,注意這裡path 是 volume名稱 + 具體路徑
[root@k8s-master-0 dev]# cat rabbitmq-pv.yaml
apiVersion: v1
kind: PersistentVolume
metadata:
name: rabbitmq-pv
labels:
type: glusterfs
spec:
storageClassName: rabbitmq-dir
capacity:
storage: 3Gi
accessModes:
- ReadWriteMany
glusterfs:
endpoints: glusterfs
path: "env-dev/rabbitmq/mnesia"
readOnly: false
編寫pvc
[root@k8s-master-0 dev]# cat rabbitmq-pvc.yaml
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: rabbitmq-pvc
namespace: env-dev
spec:
storageClassName: rabbitmq-dir
accessModes:
- ReadWriteMany
resources:
requests:
storage: 3Gi
建立endpoint pv pcv
kubectl apply -f pv-ep.yaml
kubectl apply -f rabbitmq-pv.yaml
kubectl apply -f rabbitmq-pvc.yaml
使用方式,紅字部分
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: ha-rabbitmq
namespace: env-dev
spec:
replicas: 1
selector:
matchLabels:
app: ha-rabbitmq
template:
metadata:
labels:
app: ha-rabbitmq
spec:
#hostNetwork: true
hostname: ha-rabbitmq
terminationGracePeriodSeconds: 60
containers:
- name: ha-rabbitmq
image: 192.168.91.137:5000/rabbitmq:3.7.7-management-alpine
securityContext:
privileged: true
env:
- name: "RABBITMQ_DEFAULT_USER"
value: "rabbit"
- name: "RABBITMQ_DEFAULT_PASS"
value: "rabbit"
ports:
- name: tcp
containerPort: 5672
hostPort: 5672
- name: http
containerPort: 15672
hostPort: 15672
livenessProbe:
failureThreshold: 3
httpGet:
path: /
port: 15672
scheme: HTTP
initialDelaySeconds: 20
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 1
readinessProbe:
failureThreshold: 3
httpGet:
path: /
port: 15672
scheme: HTTP
initialDelaySeconds: 20
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 1
volumeMounts:
- name: date
mountPath: /etc/localtime
- name: workdir
mountPath: "/var/lib/rabbitmq/mnesia"
volumes:
- name: date
hostPath:
path: /usr/share/zoneinfo/Asia/Shanghai
- name: workdir
persistentVolumeClaim:
claimName: rabbitmq-pvc
---
apiVersion: v1
kind: Service
metadata:
name: ha-rabbitmq
namespace: env-dev
labels:
app: ha-rabbitmq
spec:
ports:
- name: tcp
port: 5672
targetPort: 5672
- name: http
port: 15672
targetPort: 15672
建立rabbitmq pod以及service.
kubectl create -f ha-rabbitmq.yaml
配置設定到了第一個節點,看看資料檔案
在管理頁面建立一個建立一個 virtual host
環境東西太多,這裡就不暴力關機了,直接删除再建立
這次配置設定到節點0
看看剛才建立的virtual host
還健在
haproxy 代理
[root@localhost conf]# cat haproxy.cfg
global
chroot /usr/local
daemon
nbproc 1
group nobody
user nobody
pidfile /haproxy.pid
#ulimit-n 65536
#spread-checks 5m
#stats timeout 5m
#stats maxconn 100
########預設配置############
defaults
mode tcp
retries 3 #兩次連接配接失敗就認為是伺服器不可用,也可以通過後面設定
option redispatch #當serverId對應的伺服器挂掉後,強制定向到其他健康的伺服器
option abortonclose #當伺服器負載很高的時候,自動結束掉目前隊列處理比較久的連結
maxconn 32000 #預設的最大連接配接數
timeout connect 10s #連接配接逾時
timeout client 8h #用戶端逾時
timeout server 8h #伺服器逾時
timeout check 10s #心跳檢測逾時
log 127.0.0.1 local0 err #[err warning info debug]
########MariaDB配置#################
listen mariadb
bind 0.0.0.0:3306
mode tcp
balance leastconn
server mariadb1 192.168.91.141:3306 check port 3306 inter 2s rise 1 fall 2 maxconn 1000
server mariadb2 192.168.91.142:3306 check port 3306 inter 2s rise 1 fall 2 maxconn 1000
server mariadb3 192.168.91.143:3306 check port 3306 inter 2s rise 1 fall 2 maxconn 1000
#######RabbitMq配置#################
listen rabbitmq
bind 0.0.0.0:5672
mode tcp
balance leastconn
server rabbitmq1 192.168.91.141:5672 check port 5672 inter 2s rise 1 fall 2 maxconn 1000
server rabbitmq2 192.168.91.142:5672 check port 5672 inter 2s rise 1 fall 2 maxconn 1000
server rabbitmq3 192.168.91.143:5672 check port 5672 inter 2s rise 1 fall 2 maxconn 1000
#######Redis配置#################
listen redis
bind 0.0.0.0:6379
mode tcp
balance leastconn
server redis1 192.168.91.141:6379 check port 6379 inter 2s rise 1 fall 2 maxconn 1000
server redis2 192.168.91.142:6379 check port 6379 inter 2s rise 1 fall 2 maxconn 1000
server redis3 192.168.91.143:6379 check port 6379 inter 2s rise 1 fall 2 maxconn 1000
nginx 代理管理頁面
Mariadb 使用 glusterfs 與 本地硬碟 性能測試
#使用glusterfs 兩個副本的 mariadb 插入10W行資料
2 queries executed, 2 success, 0 errors, 0 warnings
查詢:CREATE PROCEDURE test_insert() BEGIN DECLARE Y bigint DEFAULT 1; WHILE Y<100000 DO INSERT INTO t_insert VALUES(NULL,'11111111111...
共 0 行受到影響
執行耗時 : 0.018 sec
傳送時間 : 1.366 sec
總耗時 : 1.385 sec
-----------------------------------------------------------
查詢:CALL test_insert();
共 99999 行受到影響
執行耗時 : 6 min 1 sec
傳送時間 : 0 sec
總耗時 : 6 min 1 sec
#使用本地硬碟 mariadb 插入10W行資料
2 queries executed, 2 success, 0 errors, 0 warnings
查詢:CREATE PROCEDURE test_insert() BEGIN DECLARE Y BIGINT DEFAULT 1; WHILE Y<100000 DO INSERT INTO t_insert VALUES(NULL,'11111111111...
共 0 行受到影響
執行耗時 : 0.004 sec
傳送時間 : 1.372 sec
總耗時 : 1.376 sec
-----------------------------------------------------------
查詢:CALL test_insert();
共 99999 行受到影響
執行耗時 : 1 min 13 sec
傳送時間 : 0.006 sec
總耗時 : 1 min 13 sec