您的位置:首页 > 运维架构

hadoop2.7 完全分布式安装

2015-06-16 21:15 267 查看
1、安装JDK
2、SSH互信免登陆
3、/etc/profile
HADOOP_PREFIX=/opt/hadoop
JAVA_HOME=/opt/jdk18
PATH=$PATH:$JAVA_HOME/bin:$HADOOP_PREFIX/bin:$HADOOP_PREFIX/sbin
export HADOOP_PREFIX PATH JAVA_HOME
4、hadoop安装目录/etc/hadoop/hadoop-en.sh
export JAVA_HOME=/opt/jdk18
export HADOOP_COMMON_HOME=/opt/hadoop
5、编辑/etc/hosts
192.168.98.34 NameNode34
192.168.98.35 DataNode35
192.168.98.37 DataNode37
192.168.98.38 DataNode38

6.1 core-site.xml

<configuration>
<property>
<name>hadoop.tmp.dir</name>
<value>/opt/hadoop/tmp</value>
<description>A base for other temporary directories.</description>
</property>
<property>
<name>fs.defaultFS</name>
<value>hdfs://NameNode34:9000</value>
</property>
<property>
<name>io.file.buffer.size</name>
<value>131072</value>
</property>
</configuration>

6.2 hdfs-site.xml

<configuration>
<!--
<property>
<name>dfs.replication</name>
<value>3</value>
</property>
-->
<property>
<name>dfs.namenode.name.dir</name>
<value>file:/opt/hadoop/Name</value>
</property>
<property>
<name>dfs.datanode.data.dir</name>
<value>/opt/hadoop/Data</value>
</property>
<property>
<name>dfs.blocksize</name>
<value>268435456</value>
</property>
<property>
<name>dfs.namenode.handler.count</name>
<value>100</value>
</property>
</configuration>

6.3 yarn-site.xml

<configuration>
<!-- Site specific YARN configuration properties -->
<property>
<name>mapreduce.job.ubertask.enable</name>
<value>true</value>
</property>

<property>
<name>mapreduce.job.ubertask.maxmaps</name>
<value>9</value>
</property>

<property>
<name> mapreduce.job.ubertask.maxreduces</name>
<value>5</value>
</property>

<property>
<name>yarn.acl.enable</name>
<value>false</value>
</property>
<property>
<name>yarn.admin.acl</name>
<value>*</value>
</property>
<property>
<name>yarn.log-aggregation-enable</name>
<value>false</value>
</property>
<property>
<name>yarn.nodemanager.aux-services.mapreduce.shuffle.class</name>
<value>org.apache.hadoop.mapred.ShuffleHandler</value>
</property>
<property>
<name>yarn.resourcemanager.address</name>
<value>NameNode34:8032</value>
</property>
<property>
<name>yarn.resourcemanager.scheduler.address</name>
<value>NameNode34:8030</value>
</property>
<property>
<name>yarn.resourcemanager.resource-tracker.address</name>
<value>NameNode34:8035</value>
</property>
<property>
<name>yarn.resourcemanager.admin.address</name>
<value>NameNode34:8033</value>
</property>
<property>
<name>yarn.resourcemanager.webapp.address</name>
<value>NameNode34:8088</value>
</property>
<property>
<name>yarn.resourcemanager.hostname</name>
<value>NameNode34</value>
</property>
<property>
<name>yarn.nodemanager.aux-services</name>
<value>mapreduce_shuffle</value>
</property>
</configuration>

6.4 mapred-site.xml

<configuration>
<property>
<name>mapreduce.framework.name</name>
<value>yarn</value>
</property>
<property>
<name>mapreduce.jobhistory.address</name>
<value>NameNode34:10020</value>
</property>
<property>
<name>mapreduce.jobhistory.webapp.address</name>
<value>NameNode34:19888</value>
</property>
<property>
<name>mapreduce.framework.name</name>
<value>yarn</value>
</property>
<property>
<name>mapreduce.jobhistory.address</name>
<value>NameNode34:10020</value>
</property>
<property>
<name>mapreduce.jobhistory.webapp.address</name>
<value>NameNode34:19888</value>
</property>

<property>
<name>mapreduce.map.memory.mb</name>
<value>4096</value>
</property>
<property>
<name>mapreduce.map.java.opts</name>
<value>-Xmx1024M</value>
</property>
<property>
<name>mapreduce.reduce.memory.mb</name>
<value>4096</value>
</property>
<property>
<name>mapreduce.reduce.java.opts</name>
<value>-Xmx1024M</value>
</property>

<!--
<property>
<name>mapreduce.framework.name</name>
<value>1536</value>
</property>
<property>
<name>mapreduce.map.java.opts</name>
<value>-Xmx1024M</value>
</property>
<property>
<name>mapreduce.reduce.memory.mb</name>
<value>3072</value>
</property>
<property>
<name>mapreduce.reduce.java.opts</name>
<value>-Xmx2560M</value>
</property>
<property>
<name>mapreduce.task.io.sort.mb</name>
<value>512</value>
</property>
<property>
<name>mapreduce.task.io.sort.factor</name>
<value>100</value>
</property>
<property>
<name>mapreduce.reduce.shuffle.parallelcopies</name>
<value>50</value>
</property>
-->
</configuration>

7、执行 hdfs namenode -format
8、编辑 hadoop安装目录/etc/hadoop/slaves文件
localhost
DataNode35
DataNode37
DataNode38

9、执行 start-dfs.sh
10、执行 start-yarn.sh

http://NameNode:8088/ 查看yarn

http://NameNode:50070/ 查看hdfs

创建如下脚本程序[root@db apps]# vi scp_hadoop.sh 脚本内容如下:#!/bin/shfor host in red mongdb nginx;do echo $host scp -r /work/apps/hadoop sch@${host}:/work/apps/Done保存退出后, 修改文件的可执行属性 (chmod a+x *.sh)

WARN util.NativeCodeLoader: Unable to load native-hadoop library for your platform
export LD_LIBRARY_PATH=$HADOOP_HOME/lib/native/
###set java_env

export JAVA_HOME=/usr/java/jdk1.8.0_25/

export CLASS_PATH=.:$CLASS_PATH:$JAVA_HOME/lib:$JAVA_HOME/jre/lib
export HADOOP_HOME=/opt/hadoop
export HADOOP_COMMON_HOME=$HADOOP_HOME
export HADOOP_HDFS_HOME=$HADOOP_HOME
export HADOOP_MAPRED_HOME=$HADOOP_HOME
export HADOOP_YARN_HOME=$HADOOP_HOME
export HADOOP_CONF_DIR=$HADOOP_HOME/etc/hadoop
export PATH=$PATH:$HADOOP_HOME/bin:$HADOOP_HOME/sbin:$HADOOP_HOME/lib
export HADOOP_COMMON_LIB_NATIVE_DIR=$HADOOP_HOME/lib/native
#export HADOOP_OPTS=\"-Djava.library.path=$HADOOP_HOME/lib/native\"

export LD_LIBRARY_PATH=/opt/hadoop/lib/native/
内容来自用户分享和网络整理,不保证内容的准确性,如有侵权内容,可联系管理员处理 点击这里给我发消息
标签:  hadoop