實驗拓撲:
![](https://img.laitimes.com/img/__Qf2AjLwojIjJCLyojI0JCLicmbw5yYlRzNyATZ2EmNjJTOlBDO5cDO0kTZ3cTZzQ2Y1MDMw8CX5d2bs92Yl1iclB3bsVmdlR2LcNWaw9CXt92Yu4GZjlGbh5yYjV3Lc9CX6MHc0RHaiojIsJye.png)
一、安裝配置Ceph服務
1.配置伺服器時間同步
安裝一台節點為主時間伺服器,配置如下
其餘伺服器設定上遊位址為主伺服器
啟動服務即可,chronyc sources檢視
2.安裝Ceph建立叢集
在node1節點安裝ceph-deploy
[root@ceph-node1 yum.repos.d]# yum -y install ceph-deploy
若無則建立/etc/ceph配置檔案夾,且所有ceph-deploy指令的執行都要在該檔案夾下
#進入/etc/ceph,建立ceph叢集
[root@ceph-node1 ~]# mkdir /etc/ceph
[root@ceph-node1 ~]# cd /etc/ceph/
[root@ceph-node1 ceph]# ceph-deploy new ceph-node1
叢集建立完成後,工作目錄下會建立叢集配置檔案和密鑰檔案
#在node1上執行install指令,可以直接在所有節點安裝ceph軟體包
[root@ceph-node1 ceph]# ceph-deploy install ceph-node1 ceph-node2 ceph-node3
#安裝完成後,可以輸入ceph -v檢視版本檢查是否安裝成功
[root@ceph-node1 ~]# ceph -v
ceph version 0.94.5 (9764da52395923e0b32908d83a9f7304401fee43)
[root@ceph-node2 ~]# ceph -v
ceph version 0.94.5 (9764da52395923e0b32908d83a9f7304401fee43)
[root@ceph-node3 ~]# ceph -v
ceph version 0.94.5 (9764da52395923e0b32908d83a9f7304401fee43)
#在node1節點上建立第一個monitor
[root@ceph-node1 ceph]# ceph-deploy --overwrite-conf mon create-initial
#檢查叢集狀态,可以看到此時處于不健康狀态
[root@ceph-node1 ceph]# ceph -s
cluster 37b6fbec-7562-404a-8a43-2e443f53937e
health HEALTH_ERR
64 pgs stuck inactive
64 pgs stuck unclean
no osds
monmap e1: 1 mons at {ceph-node1=192.168.1.31:6789/0}
election epoch 2, quorum 0 ceph-node1
osdmap e1: 0 osds: 0 up, 0 in
pgmap v2: 64 pgs, 1 pools, 0 bytes data, 0 objects
0 kB used, 0 kB / 0 kB avail
64 creating
3.配置磁盤建立OSD
#disk list 列出節點上的所有可用磁盤,這裡sdb為為伺服器新加的50G磁盤
[root@ceph-node1 ceph]# ceph-deploy disk list ceph-node1
[ceph_deploy.conf][DEBUG ] found configuration file at: /root/.cephdeploy.conf
。。。。。。
[ceph-node1][DEBUG ] /dev/sdb other, unknown
#對磁盤進行分區:parted磁盤分區,mklabel修改卷标,mkpart分區
[root@ceph-node1 ceph]# parted /dev/sdb
GNU Parted 3.1
使用 /dev/sdb
Welcome to GNU Parted! Type 'help' to view a list of commands.
(parted) mklabel
新的磁盤标簽類型?gpt
是/Yes/否/No? yes
(parted) mkpart
分區名稱? []?
檔案系統類型? [ext2]?
起始點?0%
結束點?100%
(parted) p #輸出目前分區資訊
Model: VMware, VMware Virtual S (scsi)
Disk /dev/sdb: 53.7GB
Sector size (logical/physical): 512B/512B
Partition Table: gpt
Disk Flags:
Number Start End Size File system Name 标志
1 1049kB 53.7GB 53.7GB
(parted) q
資訊: You may need to update /etc/fstab.
[root@ceph-node1 ceph]# lsblk
NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINT
sdb 8:16 0 50G 0 disk
└─sdb1 8:17 0 50G 0 part
#對分區進行格式化,建立挂載目錄進行挂載,并将檔案夾權限配置為777
[root@ceph-node1 ceph]# mkfs.xfs /dev/sdb1
[root@ceph-node1 ceph]# mkdir /opt/osd1
[root@ceph-node1 ceph]# mount /dev/sdb1 /opt/osd1/
[root@ceph-node1 ceph]# chmod 777 /opt/osd1/
三個node節點都把磁盤進行相同操作
#在node1節點建立OSD節點
[root@ceph-node1 ceph]# ceph-deploy osd prepare ceph-node1:/opt/osd1 ceph-node2:/opt/osd2 ceph-node3:/opt/osd3
建立完成後,在三個節點中,修改osd下所有檔案的權限為777
#确認權限修改777後,激活OSD節點
[root@ceph-node1 ceph]# ceph-deploy osd activate ceph-node1:/opt/osd1/ ceph-node2:/opt/osd2 ceph-node3:/opt/osd3
#再次檢視叢集狀态,此時為健康模式
[root@ceph-node1 ceph]# ceph -s
cluster 37b6fbec-7562-404a-8a43-2e443f53937e
health HEALTH_OK
monmap e1: 1 mons at {ceph-node1=192.168.1.31:6789/0}
election epoch 2, quorum 0 ceph-node1
osdmap e13: 3 osds: 3 up, 3 in
pgmap v19: 64 pgs, 1 pools, 0 bytes data, 0 objects
15460 MB used, 134 GB / 149 GB avail
64 active+clean
#最後開放權限給其他節點
[root@ceph-node1 ceph]# ceph-deploy admin ceph-node{1,2,3}
二、Ceph測試使用
1.安裝ceph用戶端
搭建一台Ceph-Client測試機:配置網絡及yum源,配置主機名和hosts檔案
[root@ceph-node1 ceph]# echo '192.168.1.34 ceph-client' >> /etc/hosts
#在node1節點上安裝client上所需的ceph軟體
[root@ceph-node1 ceph]# ceph-deploy install ceph-client
#在node1節點上開放權限給client
[root@ceph-node1 ceph]# ceph-deploy admin ceph-client
2.Ceph用戶端使用
#rbd creata建立塊裝置鏡像image,map映射image為塊裝置
[root@ceph-client ~]# rbd create foo --size 4096 -m ceph-node1 -k /etc/ceph/ceph.client.admin.keyring
[root@ceph-client ~]# rbd map foo --name client.admin -m ceph-node1 -k /etc/ceph/ceph.client.admin.keyring
/dev/rbd0
#rbd0塊裝置格式化并挂載,正常使用
[root@ceph-client ~]# mkfs.xfs /dev/rbd0
meta-data=/dev/rbd0 isize=256 agcount=9, agsize=130048 blks
= sectsz=512 attr=2, projid32bit=1
= crc=0 finobt=0
data = bsize=4096 blocks=1048576, imaxpct=25
= sunit=1024 swidth=1024 blks
naming =version 2 bsize=4096 ascii-ci=0 ftype=0
log =internal log bsize=4096 blocks=2560, version=2
= sectsz=512 sunit=8 blks, lazy-count=1
realtime =none extsz=4096 blocks=0, rtextents=0
[root@ceph-client ~]# mkdir /mnt/test
[root@ceph-client ~]# mount /dev/rbd0 /mnt/test
[root@ceph-client ~]# df -h|grep rbd0
/dev/rbd0 4.0G 33M 4.0G 1% /mnt/test
3.調整塊裝置大小
#info foo可以檢視目前塊裝置資訊,此時容量為4G
[root@ceph-client ~]# rbd info foo
rbd image 'foo':
size 4096 MB in 1024 objects
order 22 (4096 kB objects)
block_name_prefix: rb.0.1026.238e1f29
format: 1
#使用resize指令将大小修改至10G
[root@ceph-client ~]# rbd resize --size 10240 foo
Resizing image: 100% complete...done.
[root@ceph-client ~]# rbd info foo
rbd image 'foo':
size 10240 MB in 2560 objects
order 22 (4096 kB objects)
block_name_prefix: rb.0.1026.238e1f29
format: 1
特别注意的是,調整檔案系統指令:
resize2fs指令 針對的是ext2、ext3、ext4檔案系統
xfs_growfs指令 針對的是xfs檔案系統
#執行調整檔案系統大小,即可檢視檔案大小被擴容至10G
[root@ceph-client ~]# xfs_growfs /dev/rbd0
[root@ceph-client ~]# df -h|grep rbd0
/dev/rbd0 10G 33M 10G 1% /mnt/test
4.删除塊裝置
#1、取消塊裝置挂載 ->2、取消鏡像與塊裝置的映射 -> 3、删除裝置鏡像
[root@ceph-client ~]# df -h|grep rbd0
/dev/rbd0 10G 33M 10G 1% /mnt/test
[root@ceph-client ~]# umount /dev/rbd0
[root@ceph-client ~]# rbd unmap /dev/rbd/rbd/foo
[root@ceph-client ~]# rbd rm foo
Removing image: 100% complete...done.
三、Ceph指令
1.檢查ceph安裝狀态
[root@ceph-node1 ceph]# ceph status
cluster 37b6fbec-7562-404a-8a43-2e443f53937e
health HEALTH_OK
monmap e1: 1 mons at {ceph-node1=192.168.1.31:6789/0}
election epoch 2, quorum 0 ceph-node1
osdmap e13: 3 osds: 3 up, 3 in
pgmap v20: 64 pgs, 1 pools, 0 bytes data, 0 objects
15459 MB used, 134 GB / 149 GB avail
64 active+clean
2.檢查monitor仲裁狀态
[root@ceph-node1 ceph]# ceph quorum_status --format json-pretty
{
"election_epoch": 2,
"quorum": [
0
],
"quorum_names": [
"ceph-node1"
],
"quorum_leader_name": "ceph-node1",
"monmap": {
"epoch": 1,
"fsid": "37b6fbec-7562-404a-8a43-2e443f53937e",
"modified": "0.000000",
"created": "0.000000",
"mons": [
{
"rank": 0,
"name": "ceph-node1",
"addr": "192.168.1.31:6789\/0"
}
]
}
}
3.導出monitor資訊
[root@ceph-node1 ceph]# ceph mon dump
dumped monmap epoch 1
epoch 1
fsid 37b6fbec-7562-404a-8a43-2e443f53937e
last_changed 0.000000
created 0.000000
0: 192.168.1.31:6789/0 mon.ceph-node1
4.檢視叢集使用
[root@ceph-node1 ceph]# ceph df
GLOBAL:
SIZE AVAIL RAW USED %RAW USED
149G 134G 15459M 10.07
POOLS:
NAME ID USED %USED MAX AVAIL OBJECTS
rbd 0 0 0 46019M 0
5.檢查monitor、OSD、PG配置組 狀态
[root@ceph-node1 ceph]# ceph mon stat
e1: 1 mons at {ceph-node1=192.168.1.31:6789/0}, election epoch 2, quorum 0 ceph-node1
[root@ceph-node1 ceph]# ceph osd stat
osdmap e13: 3 osds: 3 up, 3 in
[root@ceph-node1 ceph]# ceph pg stat
v20: 64 pgs: 64 active+clean; 0 bytes data, 15459 MB used, 134 GB / 149 GB avail
6.列出PG
[root@ceph-node1 ceph]# ceph pg dump
7.列出ceph存儲池
[root@ceph-node1 ceph]# ceph osd lspools
0 rbd,
8.檢查OSD的crush
[root@ceph-node1 ceph]# ceph osd tree
ID WEIGHT TYPE NAME UP/DOWN REWEIGHT PRIMARY-AFFINITY
-1 0.14996 root default
-2 0.04999 host ceph-node1
0 0.04999 osd.0 up 1.00000 1.00000
-3 0.04999 host ceph-node2
1 0.04999 osd.1 up 1.00000 1.00000
-4 0.04999 host ceph-node3
2 0.04999 osd.2 up 1.00000 1.00000
9.列出叢集的認證密鑰
[root@ceph-node1 ceph]# ceph auth list
installed auth entries:
osd.0
key: AQBFsx1f/+qKFRAAyLOYuF7ep/FXLMI5yca/zg==
caps: [mon] allow profile osd
caps: [osd] allow *
osd.1
key: AQBRsx1fXCGlHBAAViSH6eCjRI7NbrLFWGajrQ==
caps: [mon] allow profile osd
caps: [osd] allow *
osd.2
key: AQBksx1fgjK9NRAAe827GWfDqfMtwxRqJ3D0sA==
caps: [mon] allow profile osd
caps: [osd] allow *
client.admin
key: AQAsrh1fAngFJhAAKPsed40MDoLgbvk7L4kuJg==
caps: [mds] allow
caps: [mon] allow *
caps: [osd] allow *
client.bootstrap-mds
key: AQAtrh1fcaafBxAA3EOMSIIlggVmUwPs6lIFzw==
caps: [mon] allow profile bootstrap-mds
client.bootstrap-osd
key: AQAsrh1fYXJsMRAAQR1fAeuSAfJ+ze4YHei7Hw==
caps: [mon] allow profile bootstrap-osd
client.bootstrap-rgw
key: AQAsrh1f6O+BORAAiWjtCwFnVgkjaVfFQXtTKQ==
caps: [mon] allow profile bootstrap-rgw