hadoop中wordcount编写
2018-03-12 23:23
218 查看
1.继承mapper
package com.dylan.bigdata.mr.wcdemo;import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;
import java.io.IOException;
/**
* KEYIN: 默认情况下,是mr框架所读到的一行文本的起始偏移量,Long,
* 但是在hadoop中有自己的更精简的序列化接口,所以不直接用Long,而用LongWritable
*
* VALUEIN:默认情况下,是mr框架所读到的一行文本的内容,String,同上,用Text
*
* KEYOUT:是用户自定义逻辑处理完成之后输出数据中的key,在此处是单词,String,同上,用Text
* VALUEOUT:是用户自定义逻辑处理完成之后输出数据中的value,在此处是单词次数,Integer,同上,用IntWritable
*
* @author Dylan
*
*/
public class WordcountMapper extends Mapper<LongWritable, Text, Text, IntWritable>{
/**
* map阶段的业务逻辑就写在自定义的map()方法中
* maptask会对每一行输入数据调用一次我们自定义的map()方法
*/
@Override
protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
//将maptask传给我们的文本内容先转换成String
String line = value.toString();
//根据空格将这一行切分成单词
String[] words = line.split(" ");
//将单词输出为<单词,1>
for(String word:words){
//将单词作为key,将次数1作为value,以便于后续的数据分发,可以根据单词分发,以便于相同单词会到相同的reduce task
context.write(new Text(word), new IntWritable(1));
}
}
}
2.继承reducer
package com.dylan.bigdata.mr.wcdemo; import org.apache.hadoop.io.IntWritable; import org.apache.hadoop.io.Text; import org.apache.hadoop.mapreduce.Reducer; import java.io.IOException; /** * KEYIN, VALUEIN 对应 mapper输出的KEYOUT,VALUEOUT类型对应 * * KEYOUT, VALUEOUT 是自定义reduce逻辑处理结果的输出数据类型 * KEYOUT是单词 * VLAUEOUT是总次数 * @author Dylan * */ public class WordcountReducer extends Reducer<Text, IntWritable, Text, IntWritable>{ /** * <honey,1><honey,1><honey,1><honey,1><honey,1> * <hello,1><hello,1><hello,1><hello,1><hello,1><hello,1> * <banana,1><banana,1><banana,1><banana,1><banana,1><banana,1> * 入参key,是一组相同单词kv对的key */ @Override protected void reduce(Text key, Iterable<IntWritable> values, Context context) throws IOException, InterruptedException { int count=0; /*Iterator<IntWritable> iterator = values.iterator(); while(iterator.hasNext()){ count += iterator.next().get(); }*/ for(IntWritable value:values){ count += value.get(); } context.write(key, new IntWritable(count)); } }
3.编写main
package com.dylan.bigdata.mr.wcdemo; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.io.IntWritable; import org.apache.hadoop.io.Text; import org.apache.hadoop.mapreduce.Job; import org.apache.hadoop.mapreduce.lib.input.FileInputFormat; import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat; /** * 相当于一个yarn集群的客户端 * 需要在此封装我们的mr程序的相关运行参数,指定jar包 * 最后提交给yarn * @author Dylan * */ public class WordcountDriver { public static void main(String[] args) throws Exception { if (args == null || args.length == 0) { args = new String[2]; args[0] = "hdfs://mini1:9000/wordcount/input"; args[1] = "hdfs://mini1:9000/wordcount/output"; } Configuration conf = new Configuration(); // conf.set("dfs.permissions","false"); //设置的没有用! //设置要访问fs的用户名 conf.set("HADOOP_USER_NAME", "dylan"); //使fs不检查权限 conf.set("dfs.permissions", "false"); //设置默认文件系统 conf.set("fs.defaultFS","hdfs://mini1:9000/"); /*conf.set("mapreduce.framework.name", "yarn"); conf.set("yarn.resoucemanager.hostname", "mini1");*/ Job job = Job.getInstance(conf); /*job.setJar("/home/hadoop/wc.jar");*/ //指定本程序的jar包所在的本地路径 job.setJarByClass(WordcountDriver.class); //指定本业务job要使用的mapper/Reducer业务类 job.setMapperClass(WordcountMapper.class); job.setReducerClass(WordcountReducer.class); //指定mapper输出数据的kv类型 job.setMapOutputKeyClass(Text.class); job.setMapOutputValueClass(IntWritable.class); //指定最终输出的数据的kv类型 job.setOutputKeyClass(Text.class); job.setOutputValueClass(IntWritable.class); Path path = new Path(args[1]); FileSystem fs = FileSystem.get(conf); if (fs.exists(path)){ fs.delete(path,true); } //指定job的输入原始文件所在目录 FileInputFormat.setInputPaths(job, new Path(args[0])); //指定job的输出结果所在目录 FileOutputFormat.setOutputPath(job, new Path(args[1])); //将job中配置的相关参数,以及job所用的java类所在的jar包,提交给yarn去运行 /*job.submit();*/ boolean res = job.waitForCompletion(true); System.exit(res?0:1); } }
相关文章推荐
- 【hadoop】wordcount实例编写
- java编写的hadoop wordcount,单MR任务实现按照词频排序输出结果
- 【Hadoop】编写和运行WordCount.java
- hadoop mapreduce wordcount编写
- Hadoop之WordCount计数器程序编写并打包
- Hadoop之Mapreduce------>入门级程序WordCount代码编写
- hadoop:IDEA本地编写mapreducer的wordcount并测试,并上传到hadoop的linux服务器进行测试
- Hadoop2.x实战:WordCount、Sort、去重复、average实例MapRedure编写
- hadoop之WordCount代码编写
- 编写自己的 wordcount (hadoop 1.0.3)
- win7下安装hadoop 2.6.0 的eclipse插件并编写运行WordCount程序
- Hadoop 1.x 编写自己的WordCount程序
- 简单wordcount编写及放到Hadoop集群中执行
- Hadoop集群初步使用-编写wordcount程序
- windows下idea编写WordCount程序,并打jar包上传到hadoop集群运行
- unbuntu虚拟机下hadoop实例wordcount的运行
- 理解Hadoop源码 --- WordCount
- hadoop-2.7.3的WordCount测试