ceph.conf file 添加mon模块配置(osd忽略)
[global]
fsid = caefadfb-ad33-453f-be23-778fdbd0a892
cluster = ceph
public network = 10.3.0.129/27
# Enable cephx authentication (which uses shared keys for almost everything)
auth cluster required = cephx
auth service required = cephx
auth client required = cephx
# Replication
osd pool default size = 2
osd pool default min size = 1
osd pool default pg num = 333
osd pool default pgp num = 333
osd crush chooseleaf type = 1
[osd]
osd journal size = 1024
[mon]
# Global settings for monitors
mon host = host1, host2, host3
mon addr = 10.3.0.138:6789, 10.3.0.139:6789, 10.3.0.140:6789
mon initial members = 0, 1, 2
[mon.0]
host = zw-vm-138
mon addr = 10.3.0.138:6789
[mon.1]
host = zw-vm-139
mon addr = 10.3.0.139:6789
[mon.2]
host = zw-vm-140
mon addr = 10.3.0.140:6789
[osd.0]
host = zw-vm-138
[osd.1]
host = zw-vm-139
[osd.2]
host = zw-vm-140
[mds.0]
host = zw-vm-138
创建监视器角色
创建 mon keyring
ceph-authtool --create-keyring /etc/ceph/ceph.mon.keyring --gen-key -n mon. --cap mon 'allow *'
创建admin keyring
ceph-authtool --create-keyring /etc/ceph/ceph.client.admin.keyring --gen-key -n client.admin --set-uid=0 --cap mon 'allow *' --cap osd 'allow *' --cap mds 'allow'
导入 admin keyring to the mon keyring
ceph-authtool /etc/ceph/ceph.mon.keyring --import-keyring /etc/ceph/ceph.client.admin.keyring
创建初始化 monmap file
monmaptool --create --add 0 192.168.2.1 --add 1 192.168.2.2 --add 2 192.168.2.3 --fsid 1798897a-f0c9-422d-86b3-d4933a12c7ac initial-monmap
push /etc/ceph/* to others server
在server1创建mon.0
监视器信息数据存放目录
mkdir -p /var/lib/ceph/mon/ceph-0
chown ceph:ceph -R /var/lib/ceph
cd /var/log/ceph
rm -rf *
cd /etc/ceph
格式化
ceph-mon --mkfs -i 0 --monmap initial-monmap --keyring ceph.mon.keyring
启动脚本
cd /etc/init.d
ln -s ceph ceph.mon-0
rc-update add ceph.mon-0 default
在server1创建mon.1
mkdir -p /var/lib/ceph/mon/ceph-1
chown ceph:ceph -R /var/lib/ceph
cd /var/log/ceph
rm -rf *
cd /etc/ceph
ceph-mon --mkfs -i 1 --monmap initial-monmap --keyring ceph.mon.keyring
cd /etc/init.d
ln -s ceph ceph.mon-1
rc-update add ceph.mon-1 default
在server2创建mon.2
mkdir -p /var/lib/ceph/mon/ceph-0
chown ceph:ceph -R /var/lib/ceph
cd /var/log/ceph
rm -rf *
cd /etc/ceph
ceph-mon --mkfs -i 0 --monmap initial-monmap --keyring ceph.mon.keyring
cd /etc/init.d
ln -s ceph ceph.mon-0
rc-update add ceph.mon-0 default
start all mon servers
/etc/init.d/ceph.mon-0 start
/etc/init.d/ceph.mon-1 start
/etc/init.d/ceph.mon-2 start
启动后查看状态如下
#ceph -s
cluster caefadfb-ad33-453f-be23-778fdbd0a892
health HEALTH_ERR
64 pgs stuck inactive
64 pgs stuck unclean
no osds
monmap e1: 3 mons at {0=10.3.0.138:6789/0,1=10.3.0.139:6789/0,2=10.3.0.140:6789/0}
election epoch 6, quorum 0,1,2 0,1,2
osdmap e1: 0 osds: 0 up, 0 in
flags sortbitwise
pgmap v2: 64 pgs, 1 pools, 0 bytes data, 0 objects
0 kB used, 0 kB / 0 kB avail
64 creating