JAVA API操作HDFS文件系统
2017-02-14 14:32
796 查看
来自:http://blog.csdn.net/kkdelta/article/details/19910657
一个通过Java API操作HDFS文件系统的例子,本例子使用的是hadoop0.20的版本,在windows的eclipse下运行的时候,需要将core-site.xml和hdfs-site.xml放在src/bin目录中。
[java] view plain copy
public class TestHDFSFile {
private String localPath = "C:/D/JavaWorkSpace/bigdata/temp/";
private String hdfsPath = "hdfs://192.168.2.6:9000/user/hadoop/temp/";
public static void main(String[] args) throws Exception {
// new TestHDFSFile().testUpload();
// new TestHDFSFile().testCreate();
//new TestHDFSFile().testRename();
//new TestHDFSFile().testDel();
//new TestHDFSFile().testgetModifyTime();
//new TestHDFSFile().testExists();
//new TestHDFSFile().testFileBlockLocation();
new TestHDFSFile().testGetHostName();
}
// 上传本地文件到HDFS
public void testUpload() throws Exception {
Configuration conf = new Configuration();
// conf.addResource(new Path(localPath + "core-site.xml"));
FileSystem hdfs = FileSystem.get(conf);
Path src = new Path(localPath + "file01.txt");
Path dst = new Path(hdfsPath);
hdfs.copyFromLocalFile(src, dst);
System.out.println("Upload to " + conf.get("fs.default.name"));
FileStatus files[] = hdfs.listStatus(dst);
for (FileStatus file : files) {
System.out.println(file.getPath());
}
}
// 创建HDFS文件
public void testCreate() throws Exception {
Configuration conf = new Configuration();
byte[] buff = "hello world!".getBytes();
FileSystem hdfs = FileSystem.get(conf);
Path dst = new Path(hdfsPath + "hello.txt");
FSDataOutputStream outputStream = null;
try {
outputStream = hdfs.create(dst);
outputStream.write(buff, 0, buff.length);
} catch (Exception e) {
e.printStackTrace();
} finally {
if (outputStream != null) {
outputStream.close();
}
}
FileStatus files[] = hdfs.listStatus(dst);
for (FileStatus file : files) {
System.out.println(file.getPath());
}
}
// 重命名HDFS文件
public void testRename() throws Exception {
Configuration conf = new Configuration();
FileSystem hdfs = FileSystem.get(conf);
Path dst = new Path(hdfsPath);
Path frpath = new Path(hdfsPath + "hello.txt");
Path topath = new Path(hdfsPath + "hello2.txt");
hdfs.rename(frpath, topath);
FileStatus files[] = hdfs.listStatus(dst);
for (FileStatus file : files) {
System.out.println(file.getPath());
}
}
// 刪除HDFS文件
public void testDel() throws Exception {
Configuration conf = new Configuration();
FileSystem hdfs = FileSystem.get(conf);
Path dst = new Path(hdfsPath);
Path topath = new Path(hdfsPath+ "hello2.txt");
boolean ok = hdfs.delete(topath, false);
System.out.println(ok ? "删除成功" : "删除失败");
FileStatus files[] = hdfs.listStatus(dst);
for (FileStatus file : files) {
System.out.println(file.getPath());
}
}
// 查看HDFS文件的最后修改时间
public void testgetModifyTime() throws Exception {
Configuration conf = new Configuration();
FileSystem hdfs = FileSystem.get(conf);
Path dst = new Path(hdfsPath);
FileStatus files[] = hdfs.listStatus(dst);
for (FileStatus file : files) {
System.out.println(file.getPath() + "\t"
+ file.getModificationTime());
System.out.println(file.getPath() + "\t"
+ new Date(file.getModificationTime()));
}
}
// 查看HDFS文件是否存在
public void testExists() throws Exception {
Configuration conf = new Configuration();
FileSystem hdfs = FileSystem.get(conf);
Path dst = new Path(hdfsPath + "file01.txt");
boolean ok = hdfs.exists(dst);
System.out.println(ok ? "文件存在" : "文件不存在");
}
// 查看某个文件在HDFS集群的位置
public void testFileBlockLocation() throws Exception {
Configuration conf = new Configuration();
FileSystem hdfs = FileSystem.get(conf);
Path dst = new Path(hdfsPath + "file01.txt");
FileStatus fileStatus = hdfs.getFileStatus(dst);
BlockLocation[] blockLocations = hdfs.getFileBlockLocations(fileStatus,
0, fileStatus.getLen());
for (BlockLocation block : blockLocations) {
System.out.println(Arrays.toString(block.getHosts()) + "\t"
+ Arrays.toString(block.getNames()));
}
}
// 获取HDFS集群上所有节点名称
public void testGetHostName() throws Exception {
Configuration conf = new Configuration();
DistributedFileSystem hdfs = (DistributedFileSystem) FileSystem
.get(conf);
DatanodeInfo[] dataNodeStats = hdfs.getDataNodeStats();
for (DatanodeInfo dataNode : dataNodeStats) {
System.out.println(dataNode.getHostName() + "\t"
+ dataNode.getName());
}
}
}
一个通过Java API操作HDFS文件系统的例子,本例子使用的是hadoop0.20的版本,在windows的eclipse下运行的时候,需要将core-site.xml和hdfs-site.xml放在src/bin目录中。
[java] view plain copy
public class TestHDFSFile {
private String localPath = "C:/D/JavaWorkSpace/bigdata/temp/";
private String hdfsPath = "hdfs://192.168.2.6:9000/user/hadoop/temp/";
public static void main(String[] args) throws Exception {
// new TestHDFSFile().testUpload();
// new TestHDFSFile().testCreate();
//new TestHDFSFile().testRename();
//new TestHDFSFile().testDel();
//new TestHDFSFile().testgetModifyTime();
//new TestHDFSFile().testExists();
//new TestHDFSFile().testFileBlockLocation();
new TestHDFSFile().testGetHostName();
}
// 上传本地文件到HDFS
public void testUpload() throws Exception {
Configuration conf = new Configuration();
// conf.addResource(new Path(localPath + "core-site.xml"));
FileSystem hdfs = FileSystem.get(conf);
Path src = new Path(localPath + "file01.txt");
Path dst = new Path(hdfsPath);
hdfs.copyFromLocalFile(src, dst);
System.out.println("Upload to " + conf.get("fs.default.name"));
FileStatus files[] = hdfs.listStatus(dst);
for (FileStatus file : files) {
System.out.println(file.getPath());
}
}
// 创建HDFS文件
public void testCreate() throws Exception {
Configuration conf = new Configuration();
byte[] buff = "hello world!".getBytes();
FileSystem hdfs = FileSystem.get(conf);
Path dst = new Path(hdfsPath + "hello.txt");
FSDataOutputStream outputStream = null;
try {
outputStream = hdfs.create(dst);
outputStream.write(buff, 0, buff.length);
} catch (Exception e) {
e.printStackTrace();
} finally {
if (outputStream != null) {
outputStream.close();
}
}
FileStatus files[] = hdfs.listStatus(dst);
for (FileStatus file : files) {
System.out.println(file.getPath());
}
}
// 重命名HDFS文件
public void testRename() throws Exception {
Configuration conf = new Configuration();
FileSystem hdfs = FileSystem.get(conf);
Path dst = new Path(hdfsPath);
Path frpath = new Path(hdfsPath + "hello.txt");
Path topath = new Path(hdfsPath + "hello2.txt");
hdfs.rename(frpath, topath);
FileStatus files[] = hdfs.listStatus(dst);
for (FileStatus file : files) {
System.out.println(file.getPath());
}
}
// 刪除HDFS文件
public void testDel() throws Exception {
Configuration conf = new Configuration();
FileSystem hdfs = FileSystem.get(conf);
Path dst = new Path(hdfsPath);
Path topath = new Path(hdfsPath+ "hello2.txt");
boolean ok = hdfs.delete(topath, false);
System.out.println(ok ? "删除成功" : "删除失败");
FileStatus files[] = hdfs.listStatus(dst);
for (FileStatus file : files) {
System.out.println(file.getPath());
}
}
// 查看HDFS文件的最后修改时间
public void testgetModifyTime() throws Exception {
Configuration conf = new Configuration();
FileSystem hdfs = FileSystem.get(conf);
Path dst = new Path(hdfsPath);
FileStatus files[] = hdfs.listStatus(dst);
for (FileStatus file : files) {
System.out.println(file.getPath() + "\t"
+ file.getModificationTime());
System.out.println(file.getPath() + "\t"
+ new Date(file.getModificationTime()));
}
}
// 查看HDFS文件是否存在
public void testExists() throws Exception {
Configuration conf = new Configuration();
FileSystem hdfs = FileSystem.get(conf);
Path dst = new Path(hdfsPath + "file01.txt");
boolean ok = hdfs.exists(dst);
System.out.println(ok ? "文件存在" : "文件不存在");
}
// 查看某个文件在HDFS集群的位置
public void testFileBlockLocation() throws Exception {
Configuration conf = new Configuration();
FileSystem hdfs = FileSystem.get(conf);
Path dst = new Path(hdfsPath + "file01.txt");
FileStatus fileStatus = hdfs.getFileStatus(dst);
BlockLocation[] blockLocations = hdfs.getFileBlockLocations(fileStatus,
0, fileStatus.getLen());
for (BlockLocation block : blockLocations) {
System.out.println(Arrays.toString(block.getHosts()) + "\t"
+ Arrays.toString(block.getNames()));
}
}
// 获取HDFS集群上所有节点名称
public void testGetHostName() throws Exception {
Configuration conf = new Configuration();
DistributedFileSystem hdfs = (DistributedFileSystem) FileSystem
.get(conf);
DatanodeInfo[] dataNodeStats = hdfs.getDataNodeStats();
for (DatanodeInfo dataNode : dataNodeStats) {
System.out.println(dataNode.getHostName() + "\t"
+ dataNode.getName());
}
}
}
相关文章推荐
- HDFS文件系统操作JAVA-API
- JAVA API操作HDFS文件系统
- hadoop系列二:HDFS文件系统的命令及JAVA客户端API
- 4000 hadoop入门(三)之 javaAPI操作Hdfs,进行文件操作
- 从HDFS读取文件,把记录存到Hbase的java API操作
- hadoop系列之五JavaAPI操作HDFS文本系统
- HDFS的java接口——简化HDFS文件系统操作
- HDFS文件系统在JAVA中的操作(1) 读取文件
- HDFS文件系统简单的Java读写操作
- java实现对hdfs文件系统的上传,下载,删除,创建文件夹的操作演示
- HDFS的java接口——简化HDFS文件系统操作
- 用流的方式操作HDFS文件 JAVA API
- Hadoop系列-HDFS文件操作的JAVA API用法(七)
- JAVA API 实现hdfs文件操作
- java操作hdfs文件系统上的文件
- Java API操作HDFS文件
- hdfs文件系统api操作
- 利用hdfs的java api进行文件操作注意事项
- 排坑!java操作hadoop中的hdfs文件系统
- java开发系统内核:创建文件操作API