您的位置:首页 > 其它

ceph配置参数

2015-06-03 20:20 218 查看
ceph针对不同的硬件,需要配置不同的参数。下面是我们测试过比较可用的参数。

针对HDD的环境:

[global]
fsid = a7f64266-0894-4f1e-a635-d0aeaca0e993
public network = 10.10.2.0/24
auth cluster required = cephx
auth service required = cephx

auth client required = cephx
filestore xattr use omap = true
osd pool default size = 2
osd pool default min size = 1
osd pool default pg num = 333
osd pool default pgp num = 333
osd crush chooseleaf type = 1
mon osd full ratio = .80
mon osd nearfull ratio = .70

debug lockdep = 0/0
debug context = 0/0
debug crush = 0/0
debug buffer = 0/0
debug timer = 0/0
debug journaler = 0/0
debug osd = 0/0
debug optracker = 0/0
debug objclass = 0/0
debug filestore = 0/0
debug journal = 0/0
debug ms = 0/0
debug monc = 0/0
debug tp = 0/0
debug auth = 0/0
debug finisher = 0/0
debug heartbeatmap = 0/0
debug perfcounter = 0/0
debug asok = 0/0
debug throttle = 0/0

[mon]
mon initial members = controller1, compute1, compute2
mon host = 10.10.2.101:6789, 10.10.2.102:6789, 10.10.2.103:6789

[mon.controller1]
host = controller1
mon addr = 10.10.2.101:6789
mon data = /var/lib/ceph/mon/ceph-controller1

[mon.compute1]
host = compute1
mon addr = 10.10.2.102:6789
mon data = /var/lib/ceph/mon/ceph-compute1

[mon.compute2]
host = compute2
mon addr = 10.10.2.103:6789
mon data = /var/lib/ceph/mon/ceph-compute2

[osd]
osd journal size = 1024
osd data = /var/lib/ceph/osd/$cluster-$id
osd journal = /var/lib/ceph/osd/$cluster-$id/journal

[osd.0]
osd host = controller1
public addr = 10.10.2.101
cluster addr = 10.10.2.101

[osd.1]
osd host = controller1
public addr = 10.10.2.101
cluster addr = 10.10.2.101

[osd.2]
osd host = controller1
public addr = 10.10.2.101
cluster addr = 10.10.2.101

[osd.3]
osd host = controller1
public addr = 10.10.2.101
cluster addr = 10.10.2.101

[osd.4]
osd host = controller1
public addr = 10.10.2.101
cluster addr = 10.10.2.101

[osd.5]
osd host = compute1
public addr = 10.10.2.102
cluster addr = 10.10.2.102

...


针对SSD的环境:

[global]
fsid = a7f64266-0894-4f1e-a635-d0aeaca0e993
filestore_xattr_use_omap = true
public network = 172.16.3.0/24
auth cluster required = cephx
auth service required = cephx
auth client required = cephx
osd pool default size = 2
osd pool default min size = 1
osd pool default pg num = 4096
osd pool default pgp num = 4096
osd crush chooseleaf type = 0

debug lockdep = 0/0
debug context = 0/0
debug crush = 0/0
debug buffer = 0/0
debug timer = 0/0
debug journaler = 0/0
debug osd = 0/0
debug optracker = 0/0
debug objclass = 0/0
debug filestore = 0/0
debug journal = 0/0
debug ms = 0/0
debug monc = 0/0
debug tp = 0/0
debug auth = 0/0
debug finisher = 0/0
debug heartbeatmap = 0/0
debug perfcounter = 0/0
debug asok = 0/0
debug throttle = 0/0

[client.cephuser]
keyring = /etc/ceph/client.cephuser.keyring

[mon]
mon initial members = compute1, compute2, compute3, compute4, compute5
mon host = 172.16.1.51:6789, 172.16.1.52:6789, 172.16.1.53:6789, 172.16.1.54:6789, 172.16.1.55:6789,

mon osd down out interval = 600
mon osd min down reporters = 13

[mon.compute1]
host = compute1
mon addr = 172.16.1.51:6789
mon data = /var/lib/ceph/mon/ceph-compute1

[mon.compute2]
host = compute2
mon addr = 172.16.1.52:6789
mon data = /var/lib/ceph/mon/ceph-compute2

[mon.compute3]
host = compute3
mon addr = 172.16.1.53:6789
mon data = /var/lib/ceph/mon/ceph-compute3

[mon.compute4]
host = compute4
mon addr = 172.16.1.54:6789
mon data = /var/lib/ceph/mon/ceph-compute4

[mon.compute5]
host = compute5
mon addr = 172.16.1.55:6789
mon data = /var/lib/ceph/mon/ceph-compute5

[client]
rbd cache = false
rbd cache writethrough until flush = false
rbd cache size = 335544320
rbd cache max dirty = 335544320
rbd cache target dirty = 235544320
rbd cache max dirty age = 60
rbd cache max dirty object = 0

[osd]
sd data = /var/lib/ceph/osd/$cluster-$id
osd journal = /var/lib/ceph/osd/$cluster-$id/journal
osd mkfs type = xfs
osd mkfs options xfs = -f -i size=2048
osd mount options xfs = rw,noatime,logbsize=256k,delaylog
osd journal size = 20480
osd mon heartbeat interval = 30 # Performance tuning filestore
merge threshold = 40
filestore split multiple = 8
backfills = 1
osd recovery op priority = 1
filestore fd cache size = 1024
filestore queue max bytes =  1048576000
filestore queue committing max_bytes = 1048576000
journal max write bytes = 1048576000
journal queue max bytes = 1048576000
ms dispatch throttle bytes = 1048576000
objecter infilght op bytes = 1048576000
filestore queue max ops = 500000
filestore queue committing max_ops = 500000
journal max write entries = 100000
journal queue max ops = 500000
objecter inflight ops = 819200
osd min pg log entries = 30000
osd max pg log entries = 100000
osd op log threshold = 50

[osd.0]
osd host = compute1
public addr = 172.16.3.51
cluster addr = 172.16.3.51
osd journal = /dev/sdb2

[osd.1]
osd host = compute1
public addr = 172.16.3.51
cluster addr = 172.16.3.51
osd journal = /dev/sdc2

[osd.2]
osd host = compute2
public addr = 172.16.3.52
cluster addr = 172.16.3.52
osd journal = /dev/sdb2

[osd.3]
osd host = compute2
public addr = 172.16.3.52
cluster addr = 172.16.3.52
osd journal = /dev/sdc2

[osd.4]
osd host = compute3
public addr = 172.16.3.53
cluster addr = 172.16.3.53
osd journal = /dev/sdb2

[osd.5]
osd host = compute3
public addr = 172.16.3.53
cluster addr = 172.16.3.53
osd journal = /dev/sdc2

...
内容来自用户分享和网络整理,不保证内容的准确性,如有侵权内容,可联系管理员处理 点击这里给我发消息
标签: