Hadoop 1.x MapReduce 默认驱动配置
2016-04-18 22:11
405 查看
查询源码,可以得出Hadoop 1.x MapReduce默认的驱动配置:
package org.dragon.hadoop.mr;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.input.TextInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;
import org.apache.hadoop.mapreduce.lib.partition.HashPartitioner;
/**
* MapReduce 默认的驱动配置
* @author Administrator
*
*/
public class DefaultMapReduce {
public static void main(String[] args) throws Exception {
args = new String[]{
"hdfs://hadoop-master.dragon.org:9000/opt/data/test/input/simple_file.txt",
"hdfs://hadoop-master.dragon.org:9000/opt/data/test/output7/"
};
//1、conf
Configuration conf = new Configuration();
//2、create job
Job job = new Job(conf,DefaultMapReduce.class.getSimpleName());
//3、set run jar
job.setJarByClass(DefaultMapReduce.class);
//4、set inputFormat
job.setInputFormatClass(TextInputFormat.class);
//5、set input path
FileInputFormat.addInputPath(job, new Path(args[0]));
//6、set mapper
job.setMapperClass(Mapper.class);
//7、set map output key/value class
job.setMapOutputKeyClass(LongWritable.class);
job.setMapOutputValueClass(Text.class);
//8、set partion
job.setPartitionerClass(HashPartitioner.class);
//9、set reduce number
job.setNumReduceTasks(1);
//10、set sort comparator class
job.setSortComparatorClass(LongWritable.Comparator.class);
//11、set group comparator class
job.setGroupingComparatorClass(LongWritable.Comparator.class);
//12、set combiner class
job.setCombinerClass(null);
//13、set reducer class
job.setReducerClass(Reducer.class);
//14、set output class
job.setOutputFormatClass(TextOutputFormat.class);
//15、set job output key/value class
job.setOutputKeyClass(LongWritable.class);
job.setOutputValueClass(Text.class);
//16、set job output path
FileOutputFormat.setOutputPath(job, new Path(args[1]));
//17、submit job
boolean isSuccess = job.waitForCompletion(true);
//18、exit
System.exit(isSuccess?0:1);
}
}
package org.dragon.hadoop.mr;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.input.TextInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;
import org.apache.hadoop.mapreduce.lib.partition.HashPartitioner;
/**
* MapReduce 默认的驱动配置
* @author Administrator
*
*/
public class DefaultMapReduce {
public static void main(String[] args) throws Exception {
args = new String[]{
"hdfs://hadoop-master.dragon.org:9000/opt/data/test/input/simple_file.txt",
"hdfs://hadoop-master.dragon.org:9000/opt/data/test/output7/"
};
//1、conf
Configuration conf = new Configuration();
//2、create job
Job job = new Job(conf,DefaultMapReduce.class.getSimpleName());
//3、set run jar
job.setJarByClass(DefaultMapReduce.class);
//4、set inputFormat
job.setInputFormatClass(TextInputFormat.class);
//5、set input path
FileInputFormat.addInputPath(job, new Path(args[0]));
//6、set mapper
job.setMapperClass(Mapper.class);
//7、set map output key/value class
job.setMapOutputKeyClass(LongWritable.class);
job.setMapOutputValueClass(Text.class);
//8、set partion
job.setPartitionerClass(HashPartitioner.class);
//9、set reduce number
job.setNumReduceTasks(1);
//10、set sort comparator class
job.setSortComparatorClass(LongWritable.Comparator.class);
//11、set group comparator class
job.setGroupingComparatorClass(LongWritable.Comparator.class);
//12、set combiner class
job.setCombinerClass(null);
//13、set reducer class
job.setReducerClass(Reducer.class);
//14、set output class
job.setOutputFormatClass(TextOutputFormat.class);
//15、set job output key/value class
job.setOutputKeyClass(LongWritable.class);
job.setOutputValueClass(Text.class);
//16、set job output path
FileOutputFormat.setOutputPath(job, new Path(args[1]));
//17、submit job
boolean isSuccess = job.waitForCompletion(true);
//18、exit
System.exit(isSuccess?0:1);
}
}
相关文章推荐
- 详解HDFS Short Circuit Local Reads
- Hadoop_2.1.0 MapReduce序列图
- 使用Hadoop搭建现代电信企业架构
- 单机版搭建Hadoop环境图文教程详解
- hadoop常见错误以及处理方法详解
- hadoop 单机安装配置教程
- hadoop的hdfs文件操作实现上传文件到hdfs
- hadoop实现grep示例分享
- MongoDB中的MapReduce简介
- MongoDB学习笔记之MapReduce使用示例
- MongoDB中MapReduce编程模型使用实例
- Apache Hadoop版本详解
- MapReduce中ArrayWritable 使用指南
- Java函数式编程(七):MapReduce
- linux下搭建hadoop环境步骤分享
- java连接hdfs ha和调用mapreduce jar示例
- hadoop client与datanode的通信协议分析
- hadoop中一些常用的命令介绍
- Hadoop单机版和全分布式(集群)安装