用MapReduce把hdfs数据写入HBase中
2017-03-01 09:20
513 查看
1.使用Map+Reduce方式
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
———————-分—-割—-线————————–
2.只使用Map的方式
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
经过测试,导入时间明显减少。
public class MapReduceImport { /** * Mapper */ static class HMapper extends Mapper<LongWritable, Text, LongWritable, Text> { Text v2 = new Text(); protected void map(LongWritable key, Text value, Context context) throws java.io.IOException, InterruptedException { String[] splited = value.toString().split(" "); if (splited.length != 6)//清洗不符合标准的数据 return; try { //GetRowKey.getRowKeyString方法是自己定义生成rowkey的方法 //rowkey设计为IP_TimeStamp这种方式 v2.set(GetRowKey.getRowKeyString(splited[2], splited[4]) + " " + value.toString()); context.write(key, v2); } catch (NumberFormatException e) { System.out.println("出错了" + e.getMessage()); } } } /** * Reducer */ static class HReducer extends TableReducer<LongWritable, Text, NullWritable> { protected void reduce(LongWritable key, java.lang.Iterable<Text> values, Context context) throws java.io.IOException, InterruptedException { for (Text text : values) { String[] splited = text.toString().split(" "); Put put = new Put(Bytes.toBytes(splited[0])); for (int j = 1; j < splited.length; j++) { put.addColumn(Bytes.toBytes(HConfiguration.colFamily), Bytes.toBytes("log" + j), Bytes.toBytes(splited[j])); } context.write(NullWritable.get(), put); } } } /** * Main * * @param args * @throws Exception */ public static void main(String[] args) throws Exception { Configuration configuration = new Configuration(); //设置zookeeper configuration.set("hbase.zookeeper.quorum", HConfiguration.hbase_zookeeper_quorum); configuration.set("hbase.zookeeper.property.clientPort", "2181"); //设置hbase表名称 configuration.set(TableOutputFormat.OUTPUT_TABLE, HConfiguration.tableName); //将该值改大,防止hbase超时退出 configuration.set("dfs.socket.timeout", "180000"); MRDriver myDriver = MRDriver.getInstance(); try { myDriver.createTableIfExistDelete(HConfiguration.tableName, HConfiguration.colFamily); } catch (Exception e) { e.printStackTrace(); } Job job = new Job(configuration, "Map+ReduceImport"); job.setMapperClass(HMapper.class); job.setReducerClass(HReducer.class); job.setMapOutputKeyClass(LongWritable.class); job.setMapOutputValueClass(Text.class); job.setInputFormatClass(TextInputFormat.class); //不再设置输出路径,而是设置输出格式类型TableOutputFormat job.setOutputFormatClass(TableOutputFormat.class); FileInputFormat.setInputPaths(job, HConfiguration.mapreduce_inputPath); job.waitForCompletion(true); } }1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
———————-分—-割—-线————————–
2.只使用Map的方式
public class OnlyMapImport { /** * Mapper */ static class ImportMapper extends Mapper<LongWritable, Text, ImmutableBytesWritable, Put> { @Override public void map(LongWritable offset, Text value, Context context) { String[] splited = value.toString().split(" "); if (splited.length != 6) return; try { //GetRowKey.getRowKeyString方法是自己定义生成rowkey的方法 //rowkey设计为IP_TimeStamp这种方式 byte[] rowkey = Bytes.toBytes(GetRowKey.getRowKeyString(splited[2], splited[4])); Put put = new Put(rowkey); for (int j = 0; j < splited.length; j++) { put.addColumn(Bytes.toBytes(HConfiguration.colFamily), Bytes.toBytes("log" + j), Bytes.toBytes(splited[j])); } context.write(new ImmutableBytesWritable(rowkey), put); } catch (NumberFormatException e) { System.out.println("出错了" + e.getMessage()); } catch (IOException e) { e.printStackTrace(); } catch (InterruptedException e) { e.printStackTrace(); } } } /** * Main * * @param args * @throws Exception */ public static void main(String[] args) throws Exception { Configuration configuration = new Configuration(); //设置zookeeper configuration.set("hbase.zookeeper.quorum", HConfiguration.hbase_zookeeper_quorum); configuration.set("hbase.zookeeper.property.clientPort", "2181"); //设置hbase表名称 configuration.set(TableOutputFormat.OUTPUT_TABLE, HConfiguration.tableName); //将该值改大,防止hbase超时退出 configuration.set("dfs.socket.timeout", "180000"); MRDriver myDriver = MRDriver.getInstance(); try { myDriver.createTableIfExistDelete(HConfiguration.tableName, HConfiguration.colFamily); } catch (Exception e) { e.printStackTrace(); } Job job = new Job(configuration, "HBaseBatchImport"); job.setJarByClass(OnlyMapImport.class); job.setMapperClass(ImportMapper.class); //设置map的输出,不设置reduce的输出类型 job.setMapOutputKeyClass(ImmutableBytesWritable.class); job.setMapOutputValueClass(Writeable.class); job.setNumReduceTasks(0); job.setInputFormatClass(TextInputFormat.class); //不再设置输出路径,而是设置输出格式类型 job.setOutputFormatClass(TableOutputFormat.class); FileInputFormat.setInputPaths(job, HConfiguration.mapreduce_inputPath); job.waitForCompletion(true); } }1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
经过测试,导入时间明显减少。
相关文章推荐
- HBase建表高级属性,hbase应用案例看行键设计,HBase和mapreduce结合,从Hbase中读取数据、分析,写入hdfs,从hdfs中读取数据写入Hbase,协处理器和二级索引
- MapReduce中,从HDFS读取数据计算后写入HBase
- 大数据Hadoop核心架构HDFS+MapReduce+Hbase+Hive内部机理详解
- 大数据Hadoop核心架构HDFS+MapReduce+Hbase+Hive内部机理详解
- 大数据Hadoop核心架构HDFS+MapReduce+Hbase+Hive内部机理详解
- MapReduce读写结构化文件数据写入HBase表中
- 大数据Hadoop核心架构HDFS+MapReduce+Hbase+Hive内部机理详解
- 使用MapReduce写入数据到hbase的多个表中
- MapReduce将HDFS文本数据导入HBase中
- Hbase通过 Mapreduce 写入数据到Mysql
- mapreduce 将hdfs数据逐行写入mysql
- 使用MapReduce将HDFS数据导入到HBase(三)
- 使用MapReduce将HDFS数据导入到HBase(二)
- MapReduce同时写入HBase,HDFS
- Thinking in BigDate(八)大数据Hadoop核心架构HDFS+MapReduce+Hbase+Hive内部机理详解
- 从hdfs读取数据写入hbase
- 【HBase基础教程】6、HBase之读取MapReduce数据写入HBase
- hbase写入性能测试(从hdfs向hbase写入数据)
- 大数据Hadoop核心架构HDFS+MapReduce+Hbase+Hive内部机理详解
- 通过mapreduce程序读取hdfs文件写入hbase