您的位置:首页 > 其它

GlusterFS 安装与配置

2014-09-11 09:42 274 查看
来源:http://www.btschina.com/home/index.php/glusterfs-an-zhuang-yu-pei-zhi.html

GlusterFS是一个开源的分布式文件系统,于2011年被红帽收购.它具有高扩展性、高性能、高可用性、可横向扩展的弹性特点,无元数据服务器设计使glusterfs没有单点故障隐患,详细介绍请查看官网:www.gluster.org 。
部署环境:

OS: CentOS release 6.5 (Final) x64

Server:

c1:192.168.242.132

c2:192.168.242.133

c3:192.168.242.134

c4:192.168.242.135

hosts:

192.168.242.132 c1

192.168.242.133 c2

192.168.242.134 c3

192.168.242.135 c4
具体操作:

c1/c2/c3/c4上执行

[root@c1 ~]# wget -P /etc/yum.repos.d http://download.gluster.org/pub/gluster/glusterfs/LATEST/CentOS/glusterfs-epel.repo

[root@c1 yum.repos.d]# yum install -y glusterfs glusterfs-server glusterfs-fuse

[root@c1 yum.repos.d]# /etc/init.d/glusterd start

Starting glusterd: [ OK ]

[root@c1 yum.repos.d]# chkconfig glusterd on
c1上配置集群

[root@c1 ~]# gluster peer probe c1

peer probe: success. Probe on localhost not needed

[root@c1 ~]# gluster peer probe c2
peer probe: success.

[root@c1 ~]# gluster peer probe c3

peer probe: success.

[root@c1 ~]# gluster peer probe c4

peer probe: success.
如果c1在peer表中被识别为ip地址,可能后面集群过程中会出现通讯问题,

我们可以使用ip来进行修复:

[root@c3 ~]# gluster peer status

Number of Peers: 3
Hostname: 192.168.242.132

Uuid: 6e8d6880-ec36-4331-a806-2e8fb4fda7be

State: Peer in Cluster (Connected)
Hostname: c2

Uuid: 9a722f50-911e-4181-823d-572296640486

State: Peer in Cluster (Connected)
Hostname: c4

Uuid: 1ee3588a-8a16-47ff-ba59-c0285a2a95bd

State: Peer in Cluster (Connected)

[root@c3 ~]# gluster peer detach 192.168.242.132

peer detach: success

[root@c3 ~]# gluster peer probe c1

peer probe: success.

[root@c3 ~]# gluster peer status

Number of Peers: 3
Hostname: c2

Uuid: 9a722f50-911e-4181-823d-572296640486

State: Peer in Cluster (Connected)
Hostname: c4

Uuid: 1ee3588a-8a16-47ff-ba59-c0285a2a95bd

State: Peer in Cluster (Connected)
Hostname: c1

Uuid: 6e8d6880-ec36-4331-a806-2e8fb4fda7be

State: Peer in Cluster (Connected)
c1上创建集群磁盘

[root@c1 ~]# gluster volume create datavolume1 replica 2 transport tcp c1:/usr/local/share/datavolume1 c2:/usr/local/share/datavolume1 c3:/usr/local/share/datavolume1 c4:/usr/local/share/datavolume1 force

volume create: datavolume1: success: please start the volume to access data

[root@c1 ~]# gluster volume create datavolume2 replica 2 transport tcp c1:/usr/local/share/datavolume2 c2:/usr/local/share/datavolume2 c3:/usr/local/share/datavolume2 c4:/usr/local/share/datavolume2 force

volume create: datavolume2: success: please start the volume to access data

[root@c1 ~]# gluster volume create datavolume3 replica 2 transport tcp c1:/usr/local/share/datavolume3 c2:/usr/local/share/datavolume3 c3:/usr/local/share/datavolume3 c4:/usr/local/share/datavolume3 force

volume create: datavolume3: success: please start the volume to access data

[root@c1 ~]# gluster volume start datavolume1

volume start: datavolume1: success

[root@c1 ~]# gluster volume start datavolume2

volume start: datavolume2: success

[root@c1 ~]# gluster volume start datavolume3

volume start: datavolume3: success
[root@c1 ~]# gluster volume info
Volume Name: datavolume1

Type: Distributed-Replicate

Volume ID: 819d3dc4-2a3a-4342-b49b-3b7961ef624f

Status: Started

Number of Bricks: 2 x 2 = 4

Transport-type: tcp

Bricks:

Brick1: c1:/usr/local/share/datavolume1

Brick2: c2:/usr/local/share/datavolume1

Brick3: c3:/usr/local/share/datavolume1

Brick4: c4:/usr/local/share/datavolume1
Volume Name: datavolume2

Type: Distributed-Replicate

Volume ID: d9ebaee7-ef91-4467-9e44-217a63635bfc

Status: Started

Number of Bricks: 2 x 2 = 4

Transport-type: tcp

Bricks:

Brick1: c1:/usr/local/share/datavolume2

Brick2: c2:/usr/local/share/datavolume2

Brick3: c3:/usr/local/share/datavolume2

Brick4: c4:/usr/local/share/datavolume2
Volume Name: datavolume3

Type: Distributed-Replicate

Volume ID: 1e8b21db-f377-468b-b76e-868edde93f15

Status: Started

Number of Bricks: 2 x 2 = 4

Transport-type: tcp

Bricks:

Brick1: c1:/usr/local/share/datavolume3

Brick2: c2:/usr/local/share/datavolume3

Brick3: c3:/usr/local/share/datavolume3

Brick4: c4:/usr/local/share/datavolume3
客户端环境部署

Centos OS 6.5 x64 并加入hosts

[root@c5 ~]#wget -P /etc/yum.repos.d http://download.gluster.org/pub/gluster/glusterfs/LATEST/CentOS/glusterfs-epel.repo

[root@c5 ~]#yum install -y glusterfs glusterfs-fuse

[root@c5 ~]# mkdir -p /mnt/{datavolume1,datavolume2,datavolume3}

[root@c5 ~]# mount -t glusterfs -o ro c1:datavolume1 /mnt/datavolume1/

[root@c5 ~]# mount -t glusterfs -o ro c1:datavolume2 /mnt/datavolume2/

[root@c5 ~]# mount -t glusterfs -o ro c1:datavolume3 /mnt/datavolume3/

me3

[root@c5 ~]# df -h

Filesystem Size Used Avail Use% Mounted on

/dev/mapper/VolGroup-lv_root

38G 840M 36G 3% /

tmpfs 242M 0 242M 0% /dev/shm

/dev/sda1 485M 32M 429M 7% /boot

c1:datavolume1 57G 2.4G 52G 5% /mnt/datavolume1

c1:datavolume2 57G 2.4G 52G 5% /mnt/datavolume2

c1:datavolume3 57G 2.4G 52G 5% /mnt/datavolume3
客户端测试

[root@c5 ~]# umount /mnt/datavolume1/

[root@c5 ~]# mount -t glusterfs c1:datavolume1 /mnt/datavolume1/

[root@c5 ~]# touch /mnt/datavolume1/test.txt

[root@c5 ~]# ls /mnt/datavolume1/test.txt

/mnt/datavolume1/test.txt
[root@c2 ~]# ls -al /usr/local/share/datavolume1/

total 16

drwxr-xr-x. 3 root root 4096 May 15 03:50 .

drwxr-xr-x. 8 root root 4096 May 15 02:28 ..

drw——-. 6 root root 4096 May 15 03:50 .glusterfs

-rw-r–r–. 2 root root 0 May 20 2014 test.txt

[root@c1 ~]# ls -al /usr/local/share/datavolume1/

total 16

drwxr-xr-x. 3 root root 4096 May 15 03:50 .

drwxr-xr-x. 8 root root 4096 May 15 02:28 ..

drw——-. 6 root root 4096 May 15 03:50 .glusterfs

-rw-r–r–. 2 root root 0 May 20 2014 test.txt
删除GlusterFS磁盘:

gluster volume stop datavolume1

gluster volume delete datavolume1
卸载GlusterFS磁盘:

gluster peer detach idc1-server4
访问控制:

gluster volume set datavolume1 auth.allow 192.168.242.*,192.168.241.*
添加GlusterFS节点:

gluster peer probe c6

gluster peer probe c7

gluster volume add-brick datavolume1 c6:/usr/local/share/datavolume1 c7:/usr/local/share/datavolume1
迁移GlusterFS磁盘数据:

gluster volume remove-brick datavolume1 c1:/usr/local/share/datavolume1 c6:/usr/local/share/datavolume1 start

gluster volume remove-brick datavolume1 c1:/usr/local/share/datavolume1 c6:/usr/local/share/datavolume1 status

gluster volume remove-brick datavolume1 c1:/usr/local/share/datavolume1 c6:/usr/local/share/datavolume1 commit
数据重新分配:

gluster volume rebalance datavolume1 start

gluster volume rebalance datavolume1 status

gluster volume rebalance datavolume1 stop
修复GlusterFS磁盘数据(例如在c1宕机的情况下):

gluster volume replace-brick datavolume1 c1:/usr/local/share/datavolume1 c6:/usr/local/share/datavolume1 commit -force

gluster volume heal datavolume1 full
内容来自用户分享和网络整理,不保证内容的准确性,如有侵权内容,可联系管理员处理 点击这里给我发消息
标签: