您的位置:首页 > 运维架构 > Linux

centos 6.4 ceph 分布式集群文件系统部署 手记

2013-12-14 15:52 573 查看
Centos 6.4 ceph 分布式集群文件系统部署
0, 部署环境
IP ADDR HOSTNAME rule
192.168.1.120 master-ceph mds,mon
192.168.1.121 ceph-node1 osd
192.168.1.122 ceph-node2 osd
192.168.1.123 ceph-node3 osd

1, 打通SSH 隧道
1, mkdir /root/.ssh
2, ssh-keygen -t rsa (适用于mon,mds,osd)
cp -fr id_rsa.pub authorized_keys
3, 集中所有的keys,并同步到osd
[root@master-ceph .ssh]# cat /root/.ssh/authorized_keys
ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAu3a5Dih/Cp52JUcSO+tOP5153GaZc+kqZ10r4HpR0bkIArMQg+6iQUPCAWQENSJB3diIgdCMz08Wat2LySYYkG4eZYYQbcS3/4NEAa4o9BuEfIbnRCmijIIwdNTgbgC8hHLGJfor3y7o5ZiHx/JkG2OZ2BXJJaypLHUKacAx89tmuFt21oz0IFh8mNrhiDnMN2pxnJmgJRSD4eAdXfIjZ1wUHTUMVuZzVMbDAApBXDXddMEBN1Z7vbxI9jUCSrMIacOTLZM4/7le5t5KKGxQ2iU08com3RNuma8Se3cVytIEN8NjqyUnzxHjFN7zZOVRDg+R733eDJmFeynVS5AlXw== root@master-ceph
ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAr3Apl9nmY1fkT3mFf5j3czHE/YjUWtETMMHYkLzSyptk8MHdU1gBUe05d182paLW+kawhsFRRxOXIrGqdKjkkIqy7Zgm0UrR0HbMDVOzqyfCoDVFuOhwnVr4HWvG8U9r38DJsrStdt1Sj8se0N3CMabEGlY+9RldH8VMe7qJKGvRj0ItXLkbznZWh6ahMkxcdOPdE9PXKZBjpq5ezzA14EH/z24WLFeAR/hs0ejOwi47HCCeLWRMGrROQF6fzpMClQhpIUcKQlcTu3WeCg3IUSTWDCrPjdOBLn2Cdf7OddlVT1qk9eJ4MYyNYaQL1zbgpSEP/7Nj6/EFIV2V/R/QAw== root@ceph-node1
ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEA1ufp4vf2VxvFinhPZURJaaVf/AexPXE2H00BDMI8OliSLWDOk9BdZMKQjh55kTcOu1vXDL8QegZnfCqchG/ybPZLUHdiaJqKNhJDkR4/PFuf49ZzGp4y61U8WvoU2ijllpdYtSsfwCnRLqDkeuWpONDt5AXM8n8QRRzQLkSt7Ad3UW3CSU6lggn34cT9JmYieOYQr1F/+F+mkgvu1gycVPMTqVdC1NeIjFQT4ijw44fI0vELJUyNOdPrPKS9SP3sah02EBeVroekwY4icLWieZqZkLYMrFBOF5X39geB7yZOaDakH7qFSuNP2WG5OpqeF1UkkG0jxS2J1v/zDufOQQ== root@ceph-node2
ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAwa08h0zK7L2Q6hOrDhTG1TrHHDPW0CyXP/dIO8Btqm5MaHRWUTFzRsRyFZVXSGba3Rf9WPNfYIZYWnF1LwpqsJISTI3jl4pR7xmc3KlxwYIxdPdfEihbCwzX9R1ckjY933qIiMfrJ3spSVlBqm1ljNET7udnCD2EUg9DpmKchvRe8XtPPOjF9py8N7eSpu8Z8TJoAUTNLM2Q7Twi6tJCi4Bx5uaXFvS2+S7vSA+WO1zZHw38n1ptRBoCurRPBTdf3FGOOP2RXcRXHc4d0TP0MFQSzvPEcbp4m2G66O7s+TTzONdbRauwNXo5gV6hcgf8Q0+kVYSfxcrJAe5JHUlU2Q== root@ceph-node3
[root@master-ceph .ssh]#
2, 更改主机名
1, hostname 主机名(适用于mon,mds,osd)
3, 添加hosts 解析
1, vim /etc/hosts
*.*.*.* *****
IP 主机名
2,参看hosts
[root@master-ceph .ssh]# cat /etc/hosts
127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4
::1 localhost localhost.localdomain localhost6 localhost6.localdomain6
192.168.1.120 master-ceph
192.168.1.121 ceph-node1
192.168.1.122 ceph-node2
192.168.1.123 ceph-node3
[root@master-ceph .ssh]#
3, 同步/etc/中,(适用于mon,mds,osd)
4, yum 更新 ,安装相关依赖包(适用于mon,mds,osd)
rpm --import 'https://ceph.com/git/?p=ceph.git;a=blob_plain;f=keys/release.asc'
rpm -Uvh http://mirrors.yun-idc.com/epel/6/i386/epel-release-6-8.noarch.rpm
yum install snappy leveldb gdisk python-argparse gperftools-libs -y
rpm -Uvh http://ceph.com/rpm-dumpling/el6/noarch/ceph-release-1-0.el6.noarch.rpm
yum install ceph-deploy python-pushy -y
yum install ceph -y
yum install btrfs-progs (适用于所有OSD)
5, 版本
ceph version 0.67.4 (ad85b8bfafea6232d64cb7ba76a8b6e8252fa0c7)
6, 配置
;
; Sample ceph ceph.conf file.
;
; This file defines cluster membership, the various locations
; that Ceph stores data, and any other runtime options.
; If a 'host' is defined for a daemon, the init.d start/stop script will
; verify that it matches the hostname (or else ignore it). If it is
; not defined, it is assumed that the daemon is intended to start on
; the current host (e.g., in a setup with a startup.conf on each
; node).
; The variables $type, $id and $name are available to use in paths
; $type = The type of daemon, possible values: mon, mds and osd
; $id = The ID of the daemon, for mon.alpha, $id will be alpha
; $name = $type.$id
; For example:
; osd.0
; $type = osd
; $id = 0
; $name = osd.0
; mon.beta
; $type = mon
; $id = beta
; $name = mon.beta
; global
[global]
; enable secure authentication
auth supported = cephx
; allow ourselves to open a lot of files
max open files = 131072
; set log file
log file = /var/log/ceph/$name.log
; log_to_syslog = true ; uncomment this line to log to syslog
; set up pid files
pid file = /var/run/ceph/$name.pid
; If you want to run a IPv6 cluster, set this to true. Dual-stack isn't possible
;ms bind ipv6 = true
; monitors
; You need at least one. You need at least three if you want to
; tolerate any node failures. Always create an odd number.
[mon]
mon data = /storage/$name
; If you are using for example the RADOS Gateway and want to have your newly created
; pools a higher replication level, you can set a default
;osd pool default size = 3
; You can also specify a CRUSH rule for new pools
; Wiki: http://ceph.newdream.net/wiki/Custom_data_placement_with_CRUSH
;osd pool default crush rule = 0
; Timing is critical for monitors, but if you want to allow the clocks to drift a
; bit more, you can specify the max drift.
;mon clock drift allowed = 1
; Tell the monitor to backoff from this warning for 30 seconds
;mon clock drift warn backoff = 30
; logging, for debugging monitor crashes, in order of
; their likelihood of being helpful :)
;debug ms = 1
;debug mon = 20
;debug paxos = 20
;debug auth = 20
[mon.0]
host = master-ceph
mon addr = 192.168.1.120:6789

; mds
; You need at least one. Define two to get a standby.
[mds]
; where the mds keeps it's secret encryption keys
keyring = /storage/keyring.$name
; mds logging to debug issues.
;debug ms = 1
;debug mds = 20
[mds.0]
host = master-ceph

; osd
; You need at least one. Two if you want data to be replicated.
; Define as many as you like.
[osd]
; This is where the osd expects its data
osd data = /storage/$name
; Ideally, make the journal a separate disk or partition.
; 1-10GB should be enough; more if you have fast or many
; disks. You can use a file under the osd data dir if need be
; (e.g. /data/$name/journal), but it will be slower than a
; separate disk or partition.
; This is an example of a file-based journal.
osd journal = /storage/$name/journal
osd journal size = 1000 ; journal size, in megabytes
; If you want to run the journal on a tmpfs (don't), disable DirectIO
;journal dio = false
; You can change the number of recovery operations to speed up recovery
; or slow it down if your machines can't handle it
; osd recovery max active = 3
; osd logging to debug osd issues, in order of likelihood of being
; helpful
;debug ms = 1
;debug osd = 20
;debug filestore = 20
;debug journal = 20

; ### The below options only apply if you're using mkcephfs
; ### and the devs options
; The filesystem used on the volumes
osd mkfs type = btrfs
; If you want to specify some other mount options, you can do so.
; for other filesystems use 'osd mount options $fstype'
osd mount options btrfs = rw,noatime
; The options used to format the filesystem via mkfs.$fstype
; for other filesystems use 'osd mkfs options $fstype'
; osd mkfs options btrfs =

[osd.0]
host = ceph-node1
; if 'devs' is not specified, you're responsible for
; setting up the 'osd data' dir.
devs = /dev/sdb1
[osd.1]
host = ceph-node2
devs = /dev/sdb1
[osd.2]
host = ceph-node3
devs = /dev/sdb1

7, ceph 初始化操作
mkcephfs -a -c /etc/ceph/ceph.conf --mkbtrfs
注意:需要在所有的OSD:/storage/ 创建osd.*目录,例如:
ceph-node1: /storage/osd.0
ceph-node2: /storage/osd.1
ceph-node3: /storage/osd.2
8,重启服务
/etc/init.d/ceph -a start
9, ceph 状态
ceph -s or ceph status
[root@master-ceph ceph]# ceph -s
cluster 296dc02c-36d7-44ef-ba4e-c39839b07671
health HEALTH_OK
monmap e1: 1 mons at {0=192.168.1.120:6789/0}, election epoch 2, quorum 0 0
osdmap e103: 3 osds: 3 up, 3 in
pgmap v195: 768 pgs: 768 active+clean; 9518 bytes data, 3035 MB used, 278 GB / 299 GB avail
mdsmap e4: 1/1/1 up {0=0=up:active}
[root@master-ceph ceph]# ceph df
GLOBAL:
SIZE AVAIL RAW USED %RAW USED
299G 278G 3037M 0.99
POOLS:
NAME ID USED %USED OBJECTS
data 0 0 0 0
metadata 1 9518 0 21
rbd 2 0 0 0
[root@master-ceph ceph]#

10, 同步脚本
[root@master-ceph .ssh]# cat /root/setup.py
#!/usr/bin/env python
# 同步authorized_keys,hosts,ceph.conf 文件
# 使用方法
#python /root/setup.py /etc/ceph/ceph.conf
#python /root/setup.py /etc/hosts
#python /root/setup.py /root/.ssh/authorized_keys
import os,sys,time
path = sys.argv[1]
for i in range(120,124):
ip = str('192.168.1.' + str(i))
cmd =" scp %s %s:%s "%(path,ip,path)
os.system(cmd)
内容来自用户分享和网络整理,不保证内容的准确性,如有侵权内容,可联系管理员处理 点击这里给我发消息
标签:  python mds osd ceph mon