创建ceph集群不使用mkcephfs和ceph-deploy
2016-03-24 11:28
901 查看
1. uuidgen
2.init cluster and mon daemon
mkdir -p /data/mon/mon.0
ceph-authtool --create-keyring /tmp/ceph.mon.keyring --gen-key -n mon. --cap mon 'allow *'
ceph-authtool --create-keyring /etc/ceph/ceph.client.admin.keyring --gen-key -n client.admin --set-uid=0 --cap mon 'allow *' --cap osd 'allow *' --cap mds 'allow *'
ceph-authtool /tmp/ceph.mon.keyring --import-keyring /etc/ceph/ceph.client.admin.keyring
monmaptool --create --add 0 127.0.0.1 --fsid a7f64266-0894-4f1e-a635-d0aeaca0e993 /tmp/monmap
ceph-mon -i 0 -c /etc/ceph/ceph.conf --mkfs --monmap /tmp/monmap --keyring /tmp/ceph.mon.keyring --mon-data /data/mon/mon.0 --debug_mon 10 --fsid=a7f64266-0894-4f1e-a635-d0aeaca0e993
ceph-mon -i 0
3.add two osd daemon
ceph osd create
mkdir -p /data/osd/osd.0
mkfs.xfs -f /dev/vdb
mount -t xfs /dev/vdb /data/osd/osd.0
ceph-osd -i 0 --mkfs --osd-data=/data/osd/osd.0 -c /etc/ceph/ceph.conf --debug_osd 20 --mkkey
ceph auth add osd.0 osd 'allow *' mon 'allow rwx' -i /data/osd/osd.0/keyring
ceph-osd -i 0
ceph osd create
mkdir -p /data/osd/osd.1
mkfs.xfs -f /dev/vdc
mount -t xfs /dev/vdc /data/osd/osd.1
ceph-osd -i 1 --mkfs --osd-data=/data/osd/osd.1 -c /etc/ceph/ceph.conf --debug_osd 20 --mkkey
ceph auth add osd.1 osd 'allow *' mon 'allow rwx' -i /data/osd/osd.1/keyring
ceph-osd -i 1
4.build crush tree
ceph osd crush add-bucket unkownrack rack
ceph osd tree
ceph osd crush add-bucket host0 host
ceph osd crush add-bucket host1 host
ceph osd crush move host0 rack=unkownrack
ceph osd crush move host1 rack=unkownrack
ceph osd crush move unkownrack root=default
ceph osd crush create-or-move osd.0 1.0 host=host0 rack=unkownrack root=default
ceph osd crush create-or-move osd.1 1.0 host=host1 rack=unkownrack root=default
5. ps : ceph.conf
[global]
max open files = 131072
log file = /var/log/ceph/ceph-$name.log
pid file = /var/run/ceph/$name.pid
auth cluster required = cephx
service required = cephx
auth client required = cephx
osd pool default size = 1
[mon]
debug mon = 0
debug paxos = 0
mon data = /data/mon/mon.$id
[mon.0]
host = 127.0.0.1
mon addr = 127.0.0.1:6789
[osd]
osd data = /data/osd/osd.$id
osd journal = /data/osd/osd.$id/journal
osd journal size = 1000
osd mkfs type = xfs
osd mkfs options xfs = -f
osd mount options xfs = rw,noatime
debug osd = 0
[osd.0]
host = 127.0.0.1
devs = /dev/vdb
[osd.1]
host = 127.0.0.1
devs = /dev/vdc
[client]
debug client = 10
2.init cluster and mon daemon
mkdir -p /data/mon/mon.0
ceph-authtool --create-keyring /tmp/ceph.mon.keyring --gen-key -n mon. --cap mon 'allow *'
ceph-authtool --create-keyring /etc/ceph/ceph.client.admin.keyring --gen-key -n client.admin --set-uid=0 --cap mon 'allow *' --cap osd 'allow *' --cap mds 'allow *'
ceph-authtool /tmp/ceph.mon.keyring --import-keyring /etc/ceph/ceph.client.admin.keyring
monmaptool --create --add 0 127.0.0.1 --fsid a7f64266-0894-4f1e-a635-d0aeaca0e993 /tmp/monmap
ceph-mon -i 0 -c /etc/ceph/ceph.conf --mkfs --monmap /tmp/monmap --keyring /tmp/ceph.mon.keyring --mon-data /data/mon/mon.0 --debug_mon 10 --fsid=a7f64266-0894-4f1e-a635-d0aeaca0e993
ceph-mon -i 0
3.add two osd daemon
ceph osd create
mkdir -p /data/osd/osd.0
mkfs.xfs -f /dev/vdb
mount -t xfs /dev/vdb /data/osd/osd.0
ceph-osd -i 0 --mkfs --osd-data=/data/osd/osd.0 -c /etc/ceph/ceph.conf --debug_osd 20 --mkkey
ceph auth add osd.0 osd 'allow *' mon 'allow rwx' -i /data/osd/osd.0/keyring
ceph-osd -i 0
ceph osd create
mkdir -p /data/osd/osd.1
mkfs.xfs -f /dev/vdc
mount -t xfs /dev/vdc /data/osd/osd.1
ceph-osd -i 1 --mkfs --osd-data=/data/osd/osd.1 -c /etc/ceph/ceph.conf --debug_osd 20 --mkkey
ceph auth add osd.1 osd 'allow *' mon 'allow rwx' -i /data/osd/osd.1/keyring
ceph-osd -i 1
4.build crush tree
ceph osd crush add-bucket unkownrack rack
ceph osd tree
ceph osd crush add-bucket host0 host
ceph osd crush add-bucket host1 host
ceph osd crush move host0 rack=unkownrack
ceph osd crush move host1 rack=unkownrack
ceph osd crush move unkownrack root=default
ceph osd crush create-or-move osd.0 1.0 host=host0 rack=unkownrack root=default
ceph osd crush create-or-move osd.1 1.0 host=host1 rack=unkownrack root=default
5. ps : ceph.conf
[global]
max open files = 131072
log file = /var/log/ceph/ceph-$name.log
pid file = /var/run/ceph/$name.pid
auth cluster required = cephx
service required = cephx
auth client required = cephx
osd pool default size = 1
[mon]
debug mon = 0
debug paxos = 0
mon data = /data/mon/mon.$id
[mon.0]
host = 127.0.0.1
mon addr = 127.0.0.1:6789
[osd]
osd data = /data/osd/osd.$id
osd journal = /data/osd/osd.$id/journal
osd journal size = 1000
osd mkfs type = xfs
osd mkfs options xfs = -f
osd mount options xfs = rw,noatime
debug osd = 0
[osd.0]
host = 127.0.0.1
devs = /dev/vdb
[osd.1]
host = 127.0.0.1
devs = /dev/vdc
[client]
debug client = 10
相关文章推荐
- 安装Vim插件 YouCompleteMe
- 欢迎使用CSDN-markdown编辑器
- C++实验2-分段函数求值
- json与jsonp区别浅析(json才是目的,jsonp只是手段)
- (转)Web自动化测试中的接口测试
- php 文件上传失败
- 实现C(i,j)=A(m,n,w)+B(m,n)
- 学习mongo系列(十二)修改器($inc/$set/$unset/$push/$pop/upsert)
- Android省电开发 浅析
- iOS学习基本常识
- 修改Win10登录界面时无法运行Login.ps1文件的解决方法
- CentOS7使用yum安装ceph rpm包
- [IMX6Q][Android5.1]移植笔记 --- LCD背光控制
- Could not find Developer Disk Image
- c++实验2-标准体重
- 关于使用axis调用webservice接口方法
- python with as
- 第五章系统调用
- Objective-C编码规范
- Android Paint类方法汇总