hadoop hbase 集群的安装(未整理,先记录在这)
2016-01-03 16:03
471 查看
ssh 免登陆的配置
ssh-keygen -t rsa
cd ~/.ssh
# ll
总用量 8
-rw------- 1 root root 1679 1月 2 21:04 id_rsa
-rw-r--r-- 1 root root 393 1月 2 21:04 id_rsa.pub
slave1
scp ~/.ssh/id_rsa.pub root@master:~/.ssh/id_rsa.pub_slave1
slave2
scp ~/.ssh/id_rsa.pub root@master:~/.ssh/id_rsa.pub_slave2
# ll
总用量 20
-rw------- 1 root root 1679 1月 2 21:04 id_rsa
-rw-r--r-- 1 root root 393 1月 2 21:04 id_rsa.pub
-rw-r--r-- 1 root root 393 1月 2 21:24 id_rsa.pub_slave1
-rw-r--r-- 1 root root 393 1月 2 21:24 id_rsa.pub_slave2
-rw-r--r-- 1 root root 808 1月 2 21:11 known_hosts
cd ~/.ssh
touch authorized_keys
cat ~/.ssh/id_rsa.pub >> authorized_keys
cat ~/.ssh/id_rsa.pub_slave1 >> authorized_keys
cat ~/.ssh/id_rsa.pub_slave2 >> authorized_keys
cat authorized_keys
# cat authorized_keys
ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEA29JCTGZjQBQUubsyMeL0rCRsRLlDWi7FqQTyp5/u+3WtS2Sg8psi/k3B+okQ6rVx6fYaUvF7TsBzjZv51ru+3Utlh56XuxL+gtrVr4KV1St/1DLuOpAvHImEbRIQxzAedaxY8PLSScVnQCoU5T9XYXdjb/z3AcvXC8Kr5GtTwGEgxndmbdmKKw0+VBboJNjNZ1chCfCpdJdZv5DK7a6uUhElXC/60+/OkM0C6lKi1/UVYdwN+A3Lch8OjfcX3iABWLjf8g2Z+tuHyDJ74XGSkkTRmgUBG1zFobnYgfnBk3LmRRtMK6yY6gsI9sYAEJs9LvP72PsXv08EcHG9qGmUNQ== root@master
ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAkV6o6cLTfUclAQpC1oJRJi+0xtX6Bx3rLlEi/nZtw6CkpGkh3JIq5MJ9A7lSfVIcLTjv6i1MovrPc9wEbVao89E5l1gUBf8ZXcoDIg6y1UknRpdkfft348/NmIg858QAENXT3XVCDN4HviIM2kpdli5r+PLbis0XFdv5Coetx/bSHkak8v0rkQSIjMrQI6ClVI3sjOfZSYwT9C3TfNK0DI7hCyqHZnAuRS52v7ntti39KOnDmCPoBf+Aat4uv5zm3KmWG43qY+fFqsBiwCnT+7omKrdQSdhhosbZsPrc7oPJ0f2snyJDWalnZx9TFYOMoXlktiZZadFo41JEGJufiQ== root@slave1
ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAxQswKavM4rPd5GLwKNpOzNhjKiw8JOd0x2EPJJ0SayV5C+/99htVQ7e6tWNaMoHhO4JiWQXnkMK7PHd8J0EyI+IzfPTcjJOMlp2oduivPknp8THMjjYdVq+NhkybNeaF9/ZCZc7/S6x/hU+Q5nPxev9glBNeZtEFiOrSV0wPeKwykBUDPJItNZNwSFJedfxtNz+LC7TPkK+EwuEnLxZz03EEWYGv8105LROdHzuPnwSXooyXHEfNGmrGkeAeUC8ghKxOiUBXvAngjXZMtQIJlmiho3b5YD1Nfawl4/RIvFz+3R+KRFbVm/AULN5z375ekLk/w6SGjgmDsY71QrE2RQ== root@slave2
rm -rf ~/.ssh/id_rsa.pub_slave1
rm -rf ~/.ssh/id_rsa.pub_slave2
scp ~/.ssh/authorized_keys root@slave1:~/.ssh/authorized_keys
scp ~/.ssh/authorized_keys root@slave2:~/.ssh/authorized_keys
zookeeper 的安装
cd /usr/local/software
tar -zxvf zookeeper-3.4.7.tar.gz -C /usr/local
cd /usr/local/zookeeper-3.4.7/conf
cp zoo_sample.cfg zoo.cfg
chmod 777 zoo.cfg
mkdir /usr/local/zookeeper-3.4.7/dataDir
cd /usr/local/zookeeper-3.4.7/dataDir
touch myid
echo 1 >> myid
// 修改 dataDir 路径
dataDir=/usr/local/zookeeper-3.4.7/dataDir
server.1=master:2888:3888
server.2=slave1:2888:3888
server.3=slave2:2888:3888
scp -r /usr/local/zookeeper-3.4.7 root@slave1:/usr/local/
scp -r /usr/local/zookeeper-3.4.7 root@slave2:/usr/local/
/usr/local/zookeeper-3.4.7/bin/zkServer.sh start
Usage: /usr/local/zookeeper-3.4.7/bin/zkServer.sh {start|start-foreground|stop|restart|status|upgrade|print-cmd}
/usr/local/zookeeper-3.4.7/bin/zkServer.sh status
配置文件如下
# The number of milliseconds of each tick
tickTime=2000
# The number of ticks that the initial
# synchronization phase can take
initLimit=10
# The number of ticks that can pass between
# sending a request and getting an acknowledgement
syncLimit=5
# the directory where the snapshot is stored.
# do not use /tmp for storage, /tmp here is just
# example sakes.
dataDir=/usr/local/zookeeper-3.4.7/dataDir
# the port at which the clients will connect
clientPort=2181
# the maximum number of client connections.
# increase this if you need to handle more clients
#maxClientCnxns=60
#
# Be sure to read the maintenance section of the
# administrator guide before turning on autopurge.
#
# http://zookeeper.apache.org/doc/current/zookeeperAdmin.html#sc_maintenance
#
# The number of snapshots to retain in dataDir
#autopurge.snapRetainCount=3
# Purge task interval in hours
# Set to "0" to disable auto purge feature
#autopurge.purgeInterval=1
server.1=0.0.0.0:2888:3888
server.2=slave1:2888:3888
server.3=slave2:2888:3888
hadoop 集群的安装
tar -zxvf /usr/local/software/hadoop-2.6.0.tar.gz -C /usr/local
vi /etc/profile
HADOOP_HOME=/usr/local/hadoop-2.6.0
export HADOOP_HOME
source /etc/profile
cp /usr/local/hadoop-2.6.0/etc/hadoop/mapred-site.xml.template /usr/local/hadoop-2.6.0/etc/hadoop/mapred-site.xml
/usr/local/hadoop-2.6.0/etc/hadoop/hadoop-env.sh 这个文件的第一行
export JAVA_HOME=/usr/local/jdk1.8.0_65
/usr/local/hadoop-2.6.0/etc/hadoop/yarn-env.sh 这个文件的第一行
export JAVA_HOME=/usr/local/jdk1.8.0_65
/usr/local/hadoop-2.6.0/bin/hdfs namenode -format
/usr/local/hadoop-2.6.0/bin/hdfs dfsadmin -report
scp -r /usr/local/hadoop-2.6.0 root@slave1:/usr/local
scp -r /usr/local/hadoop-2.6.0 root@slave2:/usr/local
cat /usr/local/hadoop-2.6.0/etc/hadoop/slaves
# cat /usr/local/hadoop-2.6.0/etc/hadoop/slaves
slave1
slave2
scp -r /usr/local/jdk1.8.0_65 root@slave1:/usr/local
scp -r /usr/local/jdk1.8.0_65 root@slave2:/usr/local
scp /etc/profile root@slave1:/etc/profile
scp /etc/profile root@slave2:/etc/profile
/usr/local/hadoop-2.6.0/sbin/start-all.sh
/usr/local/hadoop-2.6.0/sbin/stop-all.sh
http://192.168.140.128:50070
/usr/local/hadoop-2.6.0/bin/hadoop fs -copyFromLocal /usr/local/jdk1.8.0_65/src.zip hdfs://master:9000/test/src.zip
/usr/local/hadoop-2.6.0/bin/hadoop fs -mkdir hdfs://master:9000/test
/usr/local/hadoop-2.6.0/bin/hadoop fs -ls /
/usr/local/hadoop-2.6.0/bin/hadoop fs -ls /test
// 报错的解决
rm -rf /usr/local/hadoop-2.6.0/etc/hadoop/slaves
touch /usr/local/hadoop-2.6.0/etc/hadoop/slaves
chmod 777 /usr/local/hadoop-2.6.0/etc/hadoop/slaves
scp /usr/local/hadoop-2.6.0/etc/hadoop/slaves root@slave1:/usr/local/hadoop-2.6.0/etc/hadoop/slaves
scp /usr/local/hadoop-2.6.0/etc/hadoop/slaves root@slave2:/usr/local/hadoop-2.6.0/etc/hadoop/slaves
# cat /etc/hosts
192.168.140.128 master
192.168.140.129 slave1
192.168.140.130 slave2
关闭ipv6
vi /etc/sysconfig/network-scripts/ifcfg-eth0
vi /etc/sysconfig/network
vi /etc/modprobe.d/dist.conf
hbase 的安装
cd /usr/local/software
tar -zxvf hbase-1.1.2-bin.tar.gz -C /usr/local
cd /usr/local/hbase-1.1.2/lib
ls -l | grep hadoop
# ls -l | grep hadoop
-rw-r--r-- 1 root root 17041 8月 27 10:57 hadoop-annotations-2.5.1.jar
-rw-r--r-- 1 root root 52449 8月 27 10:57 hadoop-auth-2.5.1.jar
-rw-r--r-- 1 root root 2557 8月 27 10:59 hadoop-client-2.5.1.jar
-rw-r--r-- 1 root root 2962685 8月 27 10:57 hadoop-common-2.5.1.jar
-rw-r--r-- 1 root root 7095230 8月 27 10:59 hadoop-hdfs-2.5.1.jar
-rw-r--r-- 1 root root 491409 8月 27 10:59 hadoop-mapreduce-client-app-2.5.1.jar
-rw-r--r-- 1 root root 662892 8月 27 10:59 hadoop-mapreduce-client-common-2.5.1.jar
-rw-r--r-- 1 root root 1498368 8月 27 10:57 hadoop-mapreduce-client-core-2.5.1.jar
-rw-r--r-- 1 root root 35733 8月 27 10:59 hadoop-mapreduce-client-jobclient-2.5.1.jar
-rw-r--r-- 1 root root 43642 8月 27 10:59 hadoop-mapreduce-client-shuffle-2.5.1.jar
-rw-r--r-- 1 root root 1649852 8月 27 10:57 hadoop-yarn-api-2.5.1.jar
-rw-r--r-- 1 root root 117982 8月 27 10:59 hadoop-yarn-client-2.5.1.jar
-rw-r--r-- 1 root root 1416427 8月 27 10:57 hadoop-yarn-common-2.5.1.jar
-rw-r--r-- 1 root root 242381 8月 27 10:59 hadoop-yarn-server-common-2.5.1.jar
-rw-r--r-- 1 root root 87662 8月 27 11:11 hbase-hadoop2-compat-1.1.2.jar
-rw-r--r-- 1 root root 35944 8月 27 11:11 hbase-hadoop-compat-1.1.2.jar
hadoop-annotations-2.5.1.jar
find -name 'hadoop-yarn-server-common*.jar'
cp /usr/local/hadoop-2.6.0/share/hadoop/common/lib/hadoop-annotations-2.6.0.jar /usr/local/hbase-1.1.2/lib/
rm -rf /usr/local/hbase-1.1.2/lib/hadoop-annotations-2.5.1.jar
cp /usr/local/hadoop-2.6.0/share/hadoop/common/lib/hadoop-auth-2.6.0.jar /usr/local/hbase-1.1.2/lib/
rm -rf /usr/local/hbase-1.1.2/lib/hadoop-auth-2.5.1.jar
cp /usr/local/hadoop-2.6.0/share/hadoop/common/hadoop-common-2.6.0.jar /usr/local/hbase-1.1.2/lib/
rm -rf /usr/local/hbase-1.1.2/lib/hadoop-common-2.5.1.jar
cp /usr/local/hadoop-2.6.0/share/hadoop/hdfs/hadoop-hdfs-2.6.0.jar /usr/local/hbase-1.1.2/lib/
rm -rf /usr/local/hbase-1.1.2/lib/hadoop-hdfs-2.5.1.jar
cp /usr/local/hadoop-2.6.0/share/hadoop/mapreduce/hadoop-mapreduce-client-app-2.6.0.jar /usr/local/hbase-1.1.2/lib/
rm -rf /usr/local/hbase-1.1.2/lib/hadoop-mapreduce-client-app-2.5.1.jar
cp /usr/local/hadoop-2.6.0/share/hadoop/mapreduce/hadoop-mapreduce-client-common-2.6.0.jar /usr/local/hbase-1.1.2/lib/
rm -rf /usr/local/hbase-1.1.2/lib/hadoop-mapreduce-client-common-2.5.1.jar
cp /usr/local/hadoop-2.6.0/share/hadoop/mapreduce/hadoop-mapreduce-client-core-2.6.0.jar /usr/local/hbase-1.1.2/lib/
rm -rf /usr/local/hbase-1.1.2/lib/hadoop-mapreduce-client-core-2.5.1.jar
cp /usr/local/hadoop-2.6.0/share/hadoop/mapreduce/hadoop-mapreduce-client-jobclient-2.6.0.jar /usr/local/hbase-1.1.2/lib/
rm -rf /usr/local/hbase-1.1.2/lib/hadoop-mapreduce-client-jobclient-2.5.1.jar
cp /usr/local/hadoop-2.6.0/share/hadoop/mapreduce/hadoop-mapreduce-client-shuffle-2.6.0.jar /usr/local/hbase-1.1.2/lib/
rm -rf /usr/local/hbase-1.1.2/lib/hadoop-mapreduce-client-shuffle-2.5.1.jar
cp /usr/local/hadoop-2.6.0/share/hadoop/yarn/hadoop-yarn-api-2.6.0.jar /usr/local/hbase-1.1.2/lib/
rm -rf /usr/local/hbase-1.1.2/lib/hadoop-yarn-api-2.5.1.jar
cp /usr/local/hadoop-2.6.0/share/hadoop/yarn/hadoop-yarn-client-2.6.0.jar /usr/local/hbase-1.1.2/lib/
rm -rf /usr/local/hbase-1.1.2/lib/hadoop-yarn-client-2.5.1.jar
cp /usr/local/hadoop-2.6.0/share/hadoop/yarn/hadoop-yarn-common-2.6.0.jar /usr/local/hbase-1.1.2/lib/
rm -rf /usr/local/hbase-1.1.2/lib/hadoop-yarn-common-2.5.1.jar
cp /usr/local/hadoop-2.6.0/share/hadoop/yarn/hadoop-yarn-server-common-2.6.0.jar /usr/local/hbase-1.1.2/lib/
rm -rf /usr/local/hbase-1.1.2/lib/hadoop-yarn-server-common-2.5.1.jar
hbase 安装
mkdir /usr/local/hbase-1.1.2/pids
/usr/local/hadoop-2.6.0/bin/hadoop fs -mkdir hdfs://master:9000/hbase
<configuration>
<property>
<name>hbase.rootdir</name>
<value>hdfs://master:9000/hbase</value>
</property>
<property>
<name>hbase.tmp.dir</name>
<value>/usr/local/hbase-1.1.2/tmp</value>
</property>
<property>
<name>hbase.cluster.distributed</name>
<value>true</value>
</property>
<property>
<name>hbase.zookeeper.quorum</name>
<value>master,slave1,slave2</value>
</property>
</configuration>
scp -r /usr/local/hbase-1.1.2 root@slave1:/usr/local
scp -r /usr/local/hbase-1.1.2 root@slave2:/usr/local
export HBASE_HOME=/usr/local/hbase-1.1.2
export PATH=$PATH:$HBASE_HOME/bin
scp /etc/profile root@slave1:/etc/profile
scp /etc/profile root@slave2:/etc/profile
scp /usr/local/hbase-1.1.2/conf/hbase-site.xml root@slave1:/usr/local/hbase-1.1.2/conf/hbase-site.xml
scp /usr/local/hbase-1.1.2/conf/hbase-site.xml root@slave2:/usr/local/hbase-1.1.2/conf/hbase-site.xml
/usr/local/hbase-1.1.2/bin/start-hbase.sh
/usr/local/hbase-1.1.2/bin/stop-hbase.sh
date -R
yum install -y ntpdate
ntpdate time.nist.gov
cp /usr/share/zoneinfo/Asia/Shanghai /etc/localtime
ntpdate us.pool.ntp.org
cp /usr/local/hadoop-2.6.0/share/hadoop/common/lib/htrace-core-3.0.4.jar /usr/local/hbase-1.1.2/lib
/usr/local/hbase-1.1.2/bin/hbase shell
create 'emp','emp_no','emp_name'
list
describe 'emp'
put 'emp', 'row_7369', 'emp_no', '7369'
put 'emp', 'row_7369', 'emp_name','Jay'
get 'emp', 'row_7369'
启动
/usr/local/zookeeper-3.4.7/bin/zkServer.sh start
/usr/local/hadoop-2.6.0/sbin/start-all.sh
/usr/local/hbase-1.1.2/bin/start-hbase.sh
停止
/usr/local/hbase-1.1.2/bin/stop-hbase.sh
/usr/local/hadoop-2.6.0/sbin/stop-all.sh
/usr/local/zookeeper-3.4.7/bin/zkServer.sh stop 阅读更多
ssh-keygen -t rsa
cd ~/.ssh
# ll
总用量 8
-rw------- 1 root root 1679 1月 2 21:04 id_rsa
-rw-r--r-- 1 root root 393 1月 2 21:04 id_rsa.pub
slave1
scp ~/.ssh/id_rsa.pub root@master:~/.ssh/id_rsa.pub_slave1
slave2
scp ~/.ssh/id_rsa.pub root@master:~/.ssh/id_rsa.pub_slave2
# ll
总用量 20
-rw------- 1 root root 1679 1月 2 21:04 id_rsa
-rw-r--r-- 1 root root 393 1月 2 21:04 id_rsa.pub
-rw-r--r-- 1 root root 393 1月 2 21:24 id_rsa.pub_slave1
-rw-r--r-- 1 root root 393 1月 2 21:24 id_rsa.pub_slave2
-rw-r--r-- 1 root root 808 1月 2 21:11 known_hosts
cd ~/.ssh
touch authorized_keys
cat ~/.ssh/id_rsa.pub >> authorized_keys
cat ~/.ssh/id_rsa.pub_slave1 >> authorized_keys
cat ~/.ssh/id_rsa.pub_slave2 >> authorized_keys
cat authorized_keys
# cat authorized_keys
ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEA29JCTGZjQBQUubsyMeL0rCRsRLlDWi7FqQTyp5/u+3WtS2Sg8psi/k3B+okQ6rVx6fYaUvF7TsBzjZv51ru+3Utlh56XuxL+gtrVr4KV1St/1DLuOpAvHImEbRIQxzAedaxY8PLSScVnQCoU5T9XYXdjb/z3AcvXC8Kr5GtTwGEgxndmbdmKKw0+VBboJNjNZ1chCfCpdJdZv5DK7a6uUhElXC/60+/OkM0C6lKi1/UVYdwN+A3Lch8OjfcX3iABWLjf8g2Z+tuHyDJ74XGSkkTRmgUBG1zFobnYgfnBk3LmRRtMK6yY6gsI9sYAEJs9LvP72PsXv08EcHG9qGmUNQ== root@master
ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAkV6o6cLTfUclAQpC1oJRJi+0xtX6Bx3rLlEi/nZtw6CkpGkh3JIq5MJ9A7lSfVIcLTjv6i1MovrPc9wEbVao89E5l1gUBf8ZXcoDIg6y1UknRpdkfft348/NmIg858QAENXT3XVCDN4HviIM2kpdli5r+PLbis0XFdv5Coetx/bSHkak8v0rkQSIjMrQI6ClVI3sjOfZSYwT9C3TfNK0DI7hCyqHZnAuRS52v7ntti39KOnDmCPoBf+Aat4uv5zm3KmWG43qY+fFqsBiwCnT+7omKrdQSdhhosbZsPrc7oPJ0f2snyJDWalnZx9TFYOMoXlktiZZadFo41JEGJufiQ== root@slave1
ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAxQswKavM4rPd5GLwKNpOzNhjKiw8JOd0x2EPJJ0SayV5C+/99htVQ7e6tWNaMoHhO4JiWQXnkMK7PHd8J0EyI+IzfPTcjJOMlp2oduivPknp8THMjjYdVq+NhkybNeaF9/ZCZc7/S6x/hU+Q5nPxev9glBNeZtEFiOrSV0wPeKwykBUDPJItNZNwSFJedfxtNz+LC7TPkK+EwuEnLxZz03EEWYGv8105LROdHzuPnwSXooyXHEfNGmrGkeAeUC8ghKxOiUBXvAngjXZMtQIJlmiho3b5YD1Nfawl4/RIvFz+3R+KRFbVm/AULN5z375ekLk/w6SGjgmDsY71QrE2RQ== root@slave2
rm -rf ~/.ssh/id_rsa.pub_slave1
rm -rf ~/.ssh/id_rsa.pub_slave2
scp ~/.ssh/authorized_keys root@slave1:~/.ssh/authorized_keys
scp ~/.ssh/authorized_keys root@slave2:~/.ssh/authorized_keys
zookeeper 的安装
cd /usr/local/software
tar -zxvf zookeeper-3.4.7.tar.gz -C /usr/local
cd /usr/local/zookeeper-3.4.7/conf
cp zoo_sample.cfg zoo.cfg
chmod 777 zoo.cfg
mkdir /usr/local/zookeeper-3.4.7/dataDir
cd /usr/local/zookeeper-3.4.7/dataDir
touch myid
echo 1 >> myid
// 修改 dataDir 路径
dataDir=/usr/local/zookeeper-3.4.7/dataDir
server.1=master:2888:3888
server.2=slave1:2888:3888
server.3=slave2:2888:3888
scp -r /usr/local/zookeeper-3.4.7 root@slave1:/usr/local/
scp -r /usr/local/zookeeper-3.4.7 root@slave2:/usr/local/
/usr/local/zookeeper-3.4.7/bin/zkServer.sh start
Usage: /usr/local/zookeeper-3.4.7/bin/zkServer.sh {start|start-foreground|stop|restart|status|upgrade|print-cmd}
/usr/local/zookeeper-3.4.7/bin/zkServer.sh status
配置文件如下
# The number of milliseconds of each tick
tickTime=2000
# The number of ticks that the initial
# synchronization phase can take
initLimit=10
# The number of ticks that can pass between
# sending a request and getting an acknowledgement
syncLimit=5
# the directory where the snapshot is stored.
# do not use /tmp for storage, /tmp here is just
# example sakes.
dataDir=/usr/local/zookeeper-3.4.7/dataDir
# the port at which the clients will connect
clientPort=2181
# the maximum number of client connections.
# increase this if you need to handle more clients
#maxClientCnxns=60
#
# Be sure to read the maintenance section of the
# administrator guide before turning on autopurge.
#
# http://zookeeper.apache.org/doc/current/zookeeperAdmin.html#sc_maintenance
#
# The number of snapshots to retain in dataDir
#autopurge.snapRetainCount=3
# Purge task interval in hours
# Set to "0" to disable auto purge feature
#autopurge.purgeInterval=1
server.1=0.0.0.0:2888:3888
server.2=slave1:2888:3888
server.3=slave2:2888:3888
hadoop 集群的安装
tar -zxvf /usr/local/software/hadoop-2.6.0.tar.gz -C /usr/local
vi /etc/profile
HADOOP_HOME=/usr/local/hadoop-2.6.0
export HADOOP_HOME
source /etc/profile
cp /usr/local/hadoop-2.6.0/etc/hadoop/mapred-site.xml.template /usr/local/hadoop-2.6.0/etc/hadoop/mapred-site.xml
/usr/local/hadoop-2.6.0/etc/hadoop/hadoop-env.sh 这个文件的第一行
export JAVA_HOME=/usr/local/jdk1.8.0_65
/usr/local/hadoop-2.6.0/etc/hadoop/yarn-env.sh 这个文件的第一行
export JAVA_HOME=/usr/local/jdk1.8.0_65
/usr/local/hadoop-2.6.0/bin/hdfs namenode -format
/usr/local/hadoop-2.6.0/bin/hdfs dfsadmin -report
scp -r /usr/local/hadoop-2.6.0 root@slave1:/usr/local
scp -r /usr/local/hadoop-2.6.0 root@slave2:/usr/local
cat /usr/local/hadoop-2.6.0/etc/hadoop/slaves
# cat /usr/local/hadoop-2.6.0/etc/hadoop/slaves
slave1
slave2
scp -r /usr/local/jdk1.8.0_65 root@slave1:/usr/local
scp -r /usr/local/jdk1.8.0_65 root@slave2:/usr/local
scp /etc/profile root@slave1:/etc/profile
scp /etc/profile root@slave2:/etc/profile
/usr/local/hadoop-2.6.0/sbin/start-all.sh
/usr/local/hadoop-2.6.0/sbin/stop-all.sh
http://192.168.140.128:50070
/usr/local/hadoop-2.6.0/bin/hadoop fs -copyFromLocal /usr/local/jdk1.8.0_65/src.zip hdfs://master:9000/test/src.zip
/usr/local/hadoop-2.6.0/bin/hadoop fs -mkdir hdfs://master:9000/test
/usr/local/hadoop-2.6.0/bin/hadoop fs -ls /
/usr/local/hadoop-2.6.0/bin/hadoop fs -ls /test
// 报错的解决
rm -rf /usr/local/hadoop-2.6.0/etc/hadoop/slaves
touch /usr/local/hadoop-2.6.0/etc/hadoop/slaves
chmod 777 /usr/local/hadoop-2.6.0/etc/hadoop/slaves
scp /usr/local/hadoop-2.6.0/etc/hadoop/slaves root@slave1:/usr/local/hadoop-2.6.0/etc/hadoop/slaves
scp /usr/local/hadoop-2.6.0/etc/hadoop/slaves root@slave2:/usr/local/hadoop-2.6.0/etc/hadoop/slaves
# cat /etc/hosts
192.168.140.128 master
192.168.140.129 slave1
192.168.140.130 slave2
关闭ipv6
vi /etc/sysconfig/network-scripts/ifcfg-eth0
vi /etc/sysconfig/network
vi /etc/modprobe.d/dist.conf
hbase 的安装
cd /usr/local/software
tar -zxvf hbase-1.1.2-bin.tar.gz -C /usr/local
cd /usr/local/hbase-1.1.2/lib
ls -l | grep hadoop
# ls -l | grep hadoop
-rw-r--r-- 1 root root 17041 8月 27 10:57 hadoop-annotations-2.5.1.jar
-rw-r--r-- 1 root root 52449 8月 27 10:57 hadoop-auth-2.5.1.jar
-rw-r--r-- 1 root root 2557 8月 27 10:59 hadoop-client-2.5.1.jar
-rw-r--r-- 1 root root 2962685 8月 27 10:57 hadoop-common-2.5.1.jar
-rw-r--r-- 1 root root 7095230 8月 27 10:59 hadoop-hdfs-2.5.1.jar
-rw-r--r-- 1 root root 491409 8月 27 10:59 hadoop-mapreduce-client-app-2.5.1.jar
-rw-r--r-- 1 root root 662892 8月 27 10:59 hadoop-mapreduce-client-common-2.5.1.jar
-rw-r--r-- 1 root root 1498368 8月 27 10:57 hadoop-mapreduce-client-core-2.5.1.jar
-rw-r--r-- 1 root root 35733 8月 27 10:59 hadoop-mapreduce-client-jobclient-2.5.1.jar
-rw-r--r-- 1 root root 43642 8月 27 10:59 hadoop-mapreduce-client-shuffle-2.5.1.jar
-rw-r--r-- 1 root root 1649852 8月 27 10:57 hadoop-yarn-api-2.5.1.jar
-rw-r--r-- 1 root root 117982 8月 27 10:59 hadoop-yarn-client-2.5.1.jar
-rw-r--r-- 1 root root 1416427 8月 27 10:57 hadoop-yarn-common-2.5.1.jar
-rw-r--r-- 1 root root 242381 8月 27 10:59 hadoop-yarn-server-common-2.5.1.jar
-rw-r--r-- 1 root root 87662 8月 27 11:11 hbase-hadoop2-compat-1.1.2.jar
-rw-r--r-- 1 root root 35944 8月 27 11:11 hbase-hadoop-compat-1.1.2.jar
hadoop-annotations-2.5.1.jar
find -name 'hadoop-yarn-server-common*.jar'
cp /usr/local/hadoop-2.6.0/share/hadoop/common/lib/hadoop-annotations-2.6.0.jar /usr/local/hbase-1.1.2/lib/
rm -rf /usr/local/hbase-1.1.2/lib/hadoop-annotations-2.5.1.jar
cp /usr/local/hadoop-2.6.0/share/hadoop/common/lib/hadoop-auth-2.6.0.jar /usr/local/hbase-1.1.2/lib/
rm -rf /usr/local/hbase-1.1.2/lib/hadoop-auth-2.5.1.jar
cp /usr/local/hadoop-2.6.0/share/hadoop/common/hadoop-common-2.6.0.jar /usr/local/hbase-1.1.2/lib/
rm -rf /usr/local/hbase-1.1.2/lib/hadoop-common-2.5.1.jar
cp /usr/local/hadoop-2.6.0/share/hadoop/hdfs/hadoop-hdfs-2.6.0.jar /usr/local/hbase-1.1.2/lib/
rm -rf /usr/local/hbase-1.1.2/lib/hadoop-hdfs-2.5.1.jar
cp /usr/local/hadoop-2.6.0/share/hadoop/mapreduce/hadoop-mapreduce-client-app-2.6.0.jar /usr/local/hbase-1.1.2/lib/
rm -rf /usr/local/hbase-1.1.2/lib/hadoop-mapreduce-client-app-2.5.1.jar
cp /usr/local/hadoop-2.6.0/share/hadoop/mapreduce/hadoop-mapreduce-client-common-2.6.0.jar /usr/local/hbase-1.1.2/lib/
rm -rf /usr/local/hbase-1.1.2/lib/hadoop-mapreduce-client-common-2.5.1.jar
cp /usr/local/hadoop-2.6.0/share/hadoop/mapreduce/hadoop-mapreduce-client-core-2.6.0.jar /usr/local/hbase-1.1.2/lib/
rm -rf /usr/local/hbase-1.1.2/lib/hadoop-mapreduce-client-core-2.5.1.jar
cp /usr/local/hadoop-2.6.0/share/hadoop/mapreduce/hadoop-mapreduce-client-jobclient-2.6.0.jar /usr/local/hbase-1.1.2/lib/
rm -rf /usr/local/hbase-1.1.2/lib/hadoop-mapreduce-client-jobclient-2.5.1.jar
cp /usr/local/hadoop-2.6.0/share/hadoop/mapreduce/hadoop-mapreduce-client-shuffle-2.6.0.jar /usr/local/hbase-1.1.2/lib/
rm -rf /usr/local/hbase-1.1.2/lib/hadoop-mapreduce-client-shuffle-2.5.1.jar
cp /usr/local/hadoop-2.6.0/share/hadoop/yarn/hadoop-yarn-api-2.6.0.jar /usr/local/hbase-1.1.2/lib/
rm -rf /usr/local/hbase-1.1.2/lib/hadoop-yarn-api-2.5.1.jar
cp /usr/local/hadoop-2.6.0/share/hadoop/yarn/hadoop-yarn-client-2.6.0.jar /usr/local/hbase-1.1.2/lib/
rm -rf /usr/local/hbase-1.1.2/lib/hadoop-yarn-client-2.5.1.jar
cp /usr/local/hadoop-2.6.0/share/hadoop/yarn/hadoop-yarn-common-2.6.0.jar /usr/local/hbase-1.1.2/lib/
rm -rf /usr/local/hbase-1.1.2/lib/hadoop-yarn-common-2.5.1.jar
cp /usr/local/hadoop-2.6.0/share/hadoop/yarn/hadoop-yarn-server-common-2.6.0.jar /usr/local/hbase-1.1.2/lib/
rm -rf /usr/local/hbase-1.1.2/lib/hadoop-yarn-server-common-2.5.1.jar
hbase 安装
mkdir /usr/local/hbase-1.1.2/pids
/usr/local/hadoop-2.6.0/bin/hadoop fs -mkdir hdfs://master:9000/hbase
<configuration>
<property>
<name>hbase.rootdir</name>
<value>hdfs://master:9000/hbase</value>
</property>
<property>
<name>hbase.tmp.dir</name>
<value>/usr/local/hbase-1.1.2/tmp</value>
</property>
<property>
<name>hbase.cluster.distributed</name>
<value>true</value>
</property>
<property>
<name>hbase.zookeeper.quorum</name>
<value>master,slave1,slave2</value>
</property>
</configuration>
scp -r /usr/local/hbase-1.1.2 root@slave1:/usr/local
scp -r /usr/local/hbase-1.1.2 root@slave2:/usr/local
export HBASE_HOME=/usr/local/hbase-1.1.2
export PATH=$PATH:$HBASE_HOME/bin
scp /etc/profile root@slave1:/etc/profile
scp /etc/profile root@slave2:/etc/profile
scp /usr/local/hbase-1.1.2/conf/hbase-site.xml root@slave1:/usr/local/hbase-1.1.2/conf/hbase-site.xml
scp /usr/local/hbase-1.1.2/conf/hbase-site.xml root@slave2:/usr/local/hbase-1.1.2/conf/hbase-site.xml
/usr/local/hbase-1.1.2/bin/start-hbase.sh
/usr/local/hbase-1.1.2/bin/stop-hbase.sh
date -R
yum install -y ntpdate
ntpdate time.nist.gov
cp /usr/share/zoneinfo/Asia/Shanghai /etc/localtime
ntpdate us.pool.ntp.org
cp /usr/local/hadoop-2.6.0/share/hadoop/common/lib/htrace-core-3.0.4.jar /usr/local/hbase-1.1.2/lib
/usr/local/hbase-1.1.2/bin/hbase shell
create 'emp','emp_no','emp_name'
list
describe 'emp'
put 'emp', 'row_7369', 'emp_no', '7369'
put 'emp', 'row_7369', 'emp_name','Jay'
get 'emp', 'row_7369'
启动
/usr/local/zookeeper-3.4.7/bin/zkServer.sh start
/usr/local/hadoop-2.6.0/sbin/start-all.sh
/usr/local/hbase-1.1.2/bin/start-hbase.sh
停止
/usr/local/hbase-1.1.2/bin/stop-hbase.sh
/usr/local/hadoop-2.6.0/sbin/stop-all.sh
/usr/local/zookeeper-3.4.7/bin/zkServer.sh stop 阅读更多
相关文章推荐
- Hadoop集群中pig工具的安装过程记录
- hadoop,hbase,hive安装全记录(转)
- 【Nutch2.3基础教程】集成Nutch/Hadoop/Hbase/Solr构建搜索引擎:安装及运行【集群环境】 分类: 1_Nutch 0_jediael开发 2015-01-24 17:24 3522人阅读 评论(1) 收藏
- 整理docker及Hadoop脚本(三)-实现一键式命令行远程安装docker集群
- hadoop1.2.1+zk-3.4.5+hbase-0.94.1集群安装过程详解
- Hadoop、HBase集群问题记录
- 大数据: 完全分布式Hadoop集群-HBase安装
- HBase入门笔记(三)-- 完全分布模式Hadoop集群安装配置
- 基于hadoop集群的Hive1.2.1、Hbase1.2.2、Zookeeper3.4.8完全分布式安装
- Hadoop集群安装过程详细记录
- HBASE 0.98版本安装,二步曲:安装HADOOP到集群
- Hbase完全分布式集群安装配置(Hbase1.0.0,Hadoop2.6.0)
- Apache Hadoop集群安装记录-基于CentOS虚拟机(2主4从)
- hadoop hbase hive 集群安装
- <Hadoop>HBase 集群安装
- 第十二章 Ganglia监控Hadoop及Hbase集群性能(安装配置)
- 【Nutch2.3基础教程】集成Nutch/Hadoop/Hbase/Solr构建搜索引擎:安装及运行【集群环境】
- hadoop,hbase,hive安装全记录 (转)
- Ubuntu 13.10 Hadoop集群安装记录
- hadoop2.2.0集群基础上安装zookeeper3.4.5和hbase0.96(双机热备)