1.環境準備
伺服器配置
四台伺服器使用Centos7.6作業系統,admin為管理節點
192.168.30.15 admin
192.168.30.16 storage1
192.168.30.17 storage2
192.168.30.18 storage3
admin節點配置yum倉庫配置
vim /etc/yum.repos.d/ceph.repo
[Ceph]
name=Ceph packages for $basearch
baseurl=https://mirrors.aliyun.com/ceph/rpm-mimic/el7/x86_64/
enabled=1
gpgcheck=1
type=rpm-md
gpgkey=https://download.ceph.com/keys/release.asc
priority=1
[Ceph-noarch]
name=Ceph noarch packages
baseurl=https://mirrors.aliyun.com/ceph/rpm-mimic/el7/noarch/
enabled=1
gpgcheck=1
type=rpm-md
gpgkey=https://download.ceph.com/keys/release.asc
priority=1
[ceph-source]
name=Ceph source packages
baseurl=https://mirrors.aliyun.com/ceph/rpm-mimic/el7/SRPMS/
enabled=1
gpgcheck=1
type=rpm-md
gpgkey=https://download.ceph.com/keys/release.asc
priority=1
hosts添加解析
vim /etc/hosts
192.168.30.15 admin
192.168.30.16 storage1
192.168.30.17 storage2
192.168.30.18 storage3
配置ssh免密碼登陸
ssh-keygen
ssh-copy-id storage1
ssh-copy-id storage2
ssh-copy-id storage3
2.部署ceph叢集
#更新軟體源并安裝ceph-deploy管理工具
yum clean all && yum list
yum install python-setuptools ceph-deploy -y
#建立叢集配置檔案目錄
mkdir /etc/ceph && cd /etc/ceph
#初始化monitor節點,準備建立叢集
ceph-deploy new admin
#配置ceph.conf配置檔案,預設副本數為3,修改副本數改為2
osd_pool_default_size = 2
#安裝ceph軟體
ceph-deploy install admin storage1 storage2 storage3
#生成monitor檢測叢集使用的秘鑰
ceph-deploy mon create-initial
#分發配置檔案到叢集每個節點
ceph-deploy admin storage1 storage2 storage3
#配置mgr,用于管理叢集
ceph-deploy mgr storage1 storage2 storage3
#使用ceph -s指令進行驗證,叢集搭建完成
[root@admin ceph]# ceph -s
cluster:
id: eae1fd09-7410-446a-bb50-08717bc335ee
health: HEALTH_OK
services:
mon: 1 daemons, quorum admin
mgr: storage1(active), standbys: storage2, storage3
osd: 3 osds: 3 up, 3 in
rgw: 3 daemons active
data:
pools: 6 pools, 288 pgs
objects: 221 objects, 2.2 KiB
usage: 3.0 GiB used, 57 GiB / 60 GiB avail
pgs: 288 active+clean
#部署rgw用來提供對象存儲
ceph-deploy rgw storage1 storage2 storage3
#部署mds用來提供cephfs【暫時未使用到】
ceph-deploy mds create storage1 storage2 storage3
#向叢集添加osd
ceph-deploy osd create storage1 --data /dev/sdb
ceph-deploy osd create storage2 --data /dev/sdb
ceph-deploy osd create storage3 --data /dev/sdb
#使用ceph -s指令檢視osd狀态
3.挂載cephfs
挂載:
ceph-fuse -m 192.168.30.15:6789 /opt/ -c ceph.client.admin.keyring
解除安裝:
umount /opt/