您的位置:首页 > 运维架构

如何查看hadoop程序中自己添加的日志

2014-09-29 14:17 441 查看
用hadoop也算有一段时间了,一直没有注意过hadoop运行过程中,产生的数据日志,比如说System打印的日志,或者是log4j,slf4j等记录的日志,存放在哪里,日志信息的重要性,在这里散仙就不用多说了,调试任何程序基本上都得需要分析日志。

hadoop的日志主要是MapReduce程序,运行过程中,产生的一些数据日志,除了系统的日志外,还包含一些我们自己在测试时候,或者线上环境输出的日志,这部分日志通常会被放在userlogs这个文件夹下面,我们可以在mapred-site.xml里面配置运行日志的输出目录,散仙测试文件内容如下:

Xml代码



<?xml version="1.0"?>

<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>

<!-- Put site-specific property overrides in this file. -->

<configuration>

<!-- jobtracker的master地址-->

<property>

<name>mapred.job.tracker</name>

<value>192.168.75.130:9001</value>

</property>

<property>

<!-- hadoop的日志输出指定目录-->

<name>mapred.local.dir</name>

<value>/root/hadoop1.2/mylogs</value>

</property>

</configuration>

配置好,日志目录后,我们就可以把这个配置文件,分发到各个节点上,然后启动hadoop。

下面我们看来下在eclipse环境中如何调试,散仙在setup,map和reduce方法中,分别使用System打印了一些数据,当我们使用local方式跑MR程序时候,日志并不会被记录下来,而是直接会在控制台打印,散仙的测试代码如下:

Java代码



package com.qin.testdistributed;

import java.io.File;

import java.io.FileReader;

import java.io.IOException;

import java.net.URI;

import java.util.Scanner;

import org.apache.hadoop.conf.Configuration;

import org.apache.hadoop.filecache.DistributedCache;

import org.apache.hadoop.fs.FSDataInputStream;

import org.apache.hadoop.fs.FileSystem;

import org.apache.hadoop.fs.Path;

import org.apache.hadoop.io.IntWritable;

import org.apache.hadoop.io.LongWritable;

import org.apache.hadoop.io.Text;

import org.apache.hadoop.mapred.JobConf;

import org.apache.hadoop.mapreduce.Job;

import org.apache.hadoop.mapreduce.Mapper;

import org.apache.hadoop.mapreduce.Reducer;

import org.apache.hadoop.mapreduce.lib.db.DBConfiguration;

import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;

import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;

import org.apache.log4j.pattern.LogEvent;

import org.slf4j.Logger;

import org.slf4j.LoggerFactory;

import com.qin.operadb.WriteMapDB;

/**

* 测试hadoop的全局共享文件

* 使用DistributedCached

*

* 大数据技术交流群: 37693216

* @author qindongliang

*

* ***/

public class TestDistributed {

private static Logger logger=LoggerFactory.getLogger(TestDistributed.class);

private static class FileMapper extends Mapper<LongWritable, Text, Text, IntWritable>{

Path path[]=null;

/**

* Map函数前调用

*

* */

@Override

protected void setup(Context context)

throws IOException, InterruptedException {

logger.info("开始启动setup了哈哈哈哈");

// System.out.println("运行了.........");

Configuration conf=context.getConfiguration();

path=DistributedCache.getLocalCacheFiles(conf);

System.out.println("获取的路径是: "+path[0].toString());

// FileSystem fs = FileSystem.get(conf);

FileSystem fsopen= FileSystem.getLocal(conf);

// FSDataInputStream in = fsopen.open(path[0]);

// System.out.println(in.readLine());

// for(Path tmpRefPath : path) {

// if(tmpRefPath.toString().indexOf("ref.png") != -1) {

// in = reffs.open(tmpRefPath);

// break;

// }

// }

// FileReader reader=new FileReader("file://"+path[0].toString());

// File f=new File("file://"+path[0].toString());

// FSDataInputStream in=fs.open(new Path(path[0].toString()));

// Scanner scan=new Scanner(in);

// while(scan.hasNext()){

// System.out.println(Thread.currentThread().getName()+"扫描的内容: "+scan.next());

// }

// scan.close();

//

// System.out.println("size: "+path.length);

}

@Override

protected void map(LongWritable key, Text value,Context context)

throws IOException, InterruptedException {

// System.out.println("map aaa");

//logger.info("Map里的任务");

System.out.println("map里输出了");

// logger.info();

context.write(new Text(""), new IntWritable(0));

}

@Override

protected void cleanup(Context context)

throws IOException, InterruptedException {

logger.info("清空任务了。。。。。。");

}

}

private static class FileReduce extends Reducer<Object, Object, Object, Object>{

@Override

protected void reduce(Object arg0, Iterable<Object> arg1,

Context arg2)throws IOException, InterruptedException {

System.out.println("我是reduce里面的东西");

}

}

public static void main(String[] args)throws Exception {

JobConf conf=new JobConf(TestDistributed.class);

//conf.set("mapred.local.dir", "/root/hadoop");

//Configuration conf=new Configuration();

// conf.set("mapred.job.tracker","192.168.75.130:9001");

//读取person中的数据字段

//conf.setJar("tt.jar");

//注意这行代码放在最前面,进行初始化,否则会报

String inputPath="hdfs://192.168.75.130:9000/root/input";

String outputPath="hdfs://192.168.75.130:9000/root/outputsort";

Job job=new Job(conf, "a");

DistributedCache.addCacheFile(new URI("hdfs://192.168.75.130:9000/root/input/f1.txt"), job.getConfiguration());

job.setJarByClass(TestDistributed.class);

System.out.println("运行模式: "+conf.get("mapred.job.tracker"));

/**设置输出表的的信息 第一个参数是job任务,第二个参数是表名,第三个参数字段项**/

FileSystem fs=FileSystem.get(job.getConfiguration());

Path pout=new Path(outputPath);

if(fs.exists(pout)){

fs.delete(pout, true);

System.out.println("存在此路径, 已经删除......");

}

/**设置Map类**/

// job.setOutputKeyClass(Text.class);

//job.setOutputKeyClass(IntWritable.class);

job.setMapOutputKeyClass(Text.class);

job.setMapOutputValueClass(IntWritable.class);

job.setMapperClass(FileMapper.class);

job.setReducerClass(FileReduce.class);

FileInputFormat.setInputPaths(job, new Path(inputPath)); //输入路径

FileOutputFormat.setOutputPath(job, new Path(outputPath));//输出路径

System.exit(job.waitForCompletion(true) ? 0 : 1);

}

}

Local模式下输出如下:

Java代码



运行模式: local

存在此路径, 已经删除......

WARN - NativeCodeLoader.<clinit>(52) | Unable to load native-hadoop library for your platform... using builtin-java classes where applicable

WARN - JobClient.copyAndConfigureFiles(746) | Use GenericOptionsParser for parsing the arguments. Applications should implement Tool for the same.

WARN - JobClient.copyAndConfigureFiles(870) | No job jar file set. User classes may not be found. See JobConf(Class) or JobConf#setJar(String).

INFO - FileInputFormat.listStatus(237) | Total input paths to process : 1

WARN - LoadSnappy.<clinit>(46) | Snappy native library not loaded

INFO - TrackerDistributedCacheManager.downloadCacheObject(423) | Creating f1.txt in /root/hadoop1.2/hadooptmp/mapred/local/archive/9070031930820799196_1788685676_88844454/192.168.75.130/root/input-work-186410214545932656 with rwxr-xr-x

INFO - TrackerDistributedCacheManager.downloadCacheObject(463) | Cached hdfs://192.168.75.130:9000/root/input/f1.txt as /root/hadoop1.2/hadooptmp/mapred/local/archive/9070031930820799196_1788685676_88844454/192.168.75.130/root/input/f1.txt

INFO - TrackerDistributedCacheManager.localizePublicCacheObject(486) | Cached hdfs://192.168.75.130:9000/root/input/f1.txt as /root/hadoop1.2/hadooptmp/mapred/local/archive/9070031930820799196_1788685676_88844454/192.168.75.130/root/input/f1.txt

INFO - JobClient.monitorAndPrintJob(1380) | Running job: job_local479869714_0001

INFO - LocalJobRunner$Job.run(340) | Waiting for map tasks

INFO - LocalJobRunner$Job$MapTaskRunnable.run(204) | Starting task: attempt_local479869714_0001_m_000000_0

INFO - Task.initialize(534) | Using ResourceCalculatorPlugin : null

INFO - MapTask.runNewMapper(729) | Processing split: hdfs://192.168.75.130:9000/root/input/f1.txt:0+31

INFO - MapTask$MapOutputBuffer.<init>(949) | io.sort.mb = 100

INFO - MapTask$MapOutputBuffer.<init>(961) | data buffer = 79691776/99614720

INFO - MapTask$MapOutputBuffer.<init>(962) | record buffer = 262144/327680

INFO - TestDistributed$FileMapper.setup(57) | 开始启动setup了哈哈哈哈

获取的路径是: /root/hadoop1.2/hadooptmp/mapred/local/archive/9070031930820799196_1788685676_88844454/192.168.75.130/root/input/f1.txt

map里输出了

map里输出了

INFO - TestDistributed$FileMapper.cleanup(107) | 清空任务了。。。。。。

INFO - MapTask$MapOutputBuffer.flush(1289) | Starting flush of map output

INFO - MapTask$MapOutputBuffer.sortAndSpill(1471) | Finished spill 0

INFO - Task.done(858) | Task:attempt_local479869714_0001_m_000000_0 is done. And is in the process of commiting

INFO - LocalJobRunner$Job.statusUpdate(466) |

INFO - Task.sendDone(970) | Task 'attempt_local479869714_0001_m_000000_0' done.

INFO - LocalJobRunner$Job$MapTaskRunnable.run(229) | Finishing task: attempt_local479869714_0001_m_000000_0

INFO - LocalJobRunner$Job.run(348) | Map task executor complete.

INFO - Task.initialize(534) | Using ResourceCalculatorPlugin : null

INFO - LocalJobRunner$Job.statusUpdate(466) |

INFO - Merger$MergeQueue.merge(408) | Merging 1 sorted segments

INFO - Merger$MergeQueue.merge(491) | Down to the last merge-pass, with 1 segments left of total size: 16 bytes

INFO - LocalJobRunner$Job.statusUpdate(466) |

我是reduce里面的东西

INFO - Task.done(858) | Task:attempt_local479869714_0001_r_000000_0 is done. And is in the process of commiting

INFO - LocalJobRunner$Job.statusUpdate(466) |

INFO - Task.commit(1011) | Task attempt_local479869714_0001_r_000000_0 is allowed to commit now

INFO - FileOutputCommitter.commitTask(173) | Saved output of task 'attempt_local479869714_0001_r_000000_0' to hdfs://192.168.75.130:9000/root/outputsort

INFO - LocalJobRunner$Job.statusUpdate(466) | reduce > reduce

INFO - Task.sendDone(970) | Task 'attempt_local479869714_0001_r_000000_0' done.

INFO - JobClient.monitorAndPrintJob(1393) | map 100% reduce 100%

INFO - JobClient.monitorAndPrintJob(1448) | Job complete: job_local479869714_0001

INFO - Counters.log(585) | Counters: 18

INFO - Counters.log(587) | File Output Format Counters

INFO - Counters.log(589) | Bytes Written=0

INFO - Counters.log(587) | File Input Format Counters

INFO - Counters.log(589) | Bytes Read=31

INFO - Counters.log(587) | FileSystemCounters

INFO - Counters.log(589) | FILE_BYTES_READ=454

INFO - Counters.log(589) | HDFS_BYTES_READ=124

INFO - Counters.log(589) | FILE_BYTES_WRITTEN=138372

INFO - Counters.log(587) | Map-Reduce Framework

INFO - Counters.log(589) | Map output materialized bytes=20

INFO - Counters.log(589) | Map input records=2

INFO - Counters.log(589) | Reduce shuffle bytes=0

INFO - Counters.log(589) | Spilled Records=4

INFO - Counters.log(589) | Map output bytes=10

INFO - Counters.log(589) | Total committed heap usage (bytes)=455475200

INFO - Counters.log(589) | Combine input records=0

INFO - Counters.log(589) | SPLIT_RAW_BYTES=109

INFO - Counters.log(589) | Reduce input records=2

INFO - Counters.log(589) | Reduce input groups=1

INFO - Counters.log(589) | Combine output records=0

INFO - Counters.log(589) | Reduce output records=0

INFO - Counters.log(589) | Map output records=2

下面,我们将程序,提交成hadoop集群上运行进行测试,注意在集群上运行,日志信息就不会在控制台显示了,我们需要去自己定义的日志目录下,找到最新提交 的那个下,然后就可以查看我们的日志信息了。





查看stdout里面的内容如下:

Java代码



获取的路径是: /root/hadoop1.2/mylogs/taskTracker/distcache/2726204645197711229_1788685676_88844454/192.168.75.130/root/input/f1.txt

map里输出了

map里输出了

注意,map里面的日志需要去xxxmxxx和xxxrxxx里面去找:



当然,除了这种方式外,我们还可以直接通过50030端口在web页面上进行查看,截图示例如下:











至此,我们已经散仙已经介绍完了,这两种方式,Hadoop在执行过程中,日志会被随机分到任何一台节点上,我们可能不能确定本次提交的任务日志输出到底放在那里,但是我们可以通过在50030的web页面上,查看最新的一次任务,一般是最下面的任务,是最新提交的,通过页面上的连接我们就可以,查看到具体的本次任务的日志情况被随机分发到那个节点上了,然后就可以去具体的
节点上获取了。

原文:/article/3724459.html
内容来自用户分享和网络整理,不保证内容的准确性,如有侵权内容,可联系管理员处理 点击这里给我发消息
标签: