您的位置:首页 > 运维架构

Hadoop 1.x MapReduce 默认驱动配置

2016-04-18 22:11 405 查看
    查询源码,可以得出Hadoop 1.x MapReduce默认的驱动配置:

package org.dragon.hadoop.mr;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.input.TextInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;
import org.apache.hadoop.mapreduce.lib.partition.HashPartitioner;

/**
* MapReduce 默认的驱动配置
* @author Administrator
*
*/
public class DefaultMapReduce {

public static void main(String[] args) throws Exception {

args = new String[]{
"hdfs://hadoop-master.dragon.org:9000/opt/data/test/input/simple_file.txt",
"hdfs://hadoop-master.dragon.org:9000/opt/data/test/output7/"
};

//1、conf
Configuration conf = new Configuration();

//2、create job
Job job = new Job(conf,DefaultMapReduce.class.getSimpleName());

//3、set run jar
job.setJarByClass(DefaultMapReduce.class);

//4、set inputFormat
job.setInputFormatClass(TextInputFormat.class);

//5、set input path
FileInputFormat.addInputPath(job, new Path(args[0]));

//6、set mapper
job.setMapperClass(Mapper.class);

//7、set map output key/value class
job.setMapOutputKeyClass(LongWritable.class);
job.setMapOutputValueClass(Text.class);

//8、set partion
job.setPartitionerClass(HashPartitioner.class);

//9、set reduce number
job.setNumReduceTasks(1);

//10、set sort comparator class
job.setSortComparatorClass(LongWritable.Comparator.class);

//11、set group comparator class
job.setGroupingComparatorClass(LongWritable.Comparator.class);

//12、set combiner class
job.setCombinerClass(null);

//13、set reducer class
job.setReducerClass(Reducer.class);

//14、set output class
job.setOutputFormatClass(TextOutputFormat.class);

//15、set job output key/value class
job.setOutputKeyClass(LongWritable.class);
job.setOutputValueClass(Text.class);

//16、set job output path
FileOutputFormat.setOutputPath(job, new Path(args[1]));

//17、submit job
boolean isSuccess = job.waitForCompletion(true);

//18、exit
System.exit(isSuccess?0:1);
}
}
内容来自用户分享和网络整理,不保证内容的准确性,如有侵权内容,可联系管理员处理 点击这里给我发消息
标签:  Hadoop 1.x MapReduce