hdfs java API
2016-08-09 14:00
351 查看
package com.hadoop.hdfs; import java.io.File; import java.io.FileInputStream; import java.io.IOException; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.BlockLocation; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.io.IOUtils; public class HdfsPractise { public static void main(String[] args) throws IOException { //makeDir(); //touchFile(); //uploadFromLocal(); //reName(); //delFile(); //searchBlock(); //getNodeInfo(); } private static void makeDir() throws IOException { Configuration conf = new Configuration(); conf.set("fs.defaultFS", "hdfs://118.192.153.114:8020"); FileSystem fs = FileSystem.get(conf); if(!fs.exists(new Path("/test4"))) { fs.mkdirs(new Path("/test4")); } } private static void touchFile() throws IOException { Configuration conf = new Configuration(); conf.set("fs.defaultFS", "hdfs://118.192.153.114:8020"); FileSystem fs = FileSystem.get(conf); //fs.setReplication(new Path("/test1"), (short) 1); FSDataOutputStream out = fs.create(new Path("/test2/sample_age_data.txt")); FileInputStream in = new FileInputStream("d:\\sample_age_data.txt"); IOUtils.copyBytes(in, out, 1024, true); } private static void uploadFromLocal() throws IOException { Configuration conf = new Configuration(); conf.set("fs.defaultFS", "hdfs://118.192.153.114:8020"); FileSystem fs = FileSystem.get(conf); fs.copyFromLocalFile(new Path("d:\\sample_age_data.txt"), new Path("/test2")); } private static void reName() throws IOException { Configuration conf = new Configuration(); conf.set("fs.defaultFS", "hdfs://118.192.153.114:8020"); FileSystem fs = FileSystem.get(conf); fs.rename(new Path("/test2/sample_age_data.txt"), new Path("/test2/fff")); } private static void delFile() throws IOException { Configuration conf = new Configuration(); conf.set("fs.defaultFS", "hdfs://118.192.153.114:8020"); FileSystem fs = FileSystem.get(conf); fs.delete(new Path("/test2"), true); } private static void searchBlock() throws IOException { Configuration conf = new Configuration(); conf.set("fs.defaultFS", "hdfs://118.192.153.114:8020"); FileSystem fs = FileSystem.get(conf); FileStatus fst = fs.getFileStatus(new Path("/test1/aaa.txt")); BlockLocation[] bls=fs.getFileBlockLocations(fst, 0, fst.getLen()); for (int i = 0,h=bls.length; i < h; i++) { String[] hosts= bls[i].getHosts(); System.out.println("block_"+i+"_location: "+hosts[0]); } } private static void getNodeInfo() throws IOException { Configuration conf = new Configuration(); conf.set("fs.defaultFS", "hdfs://118.192.153.114:8020"); FileSystem fs = FileSystem.get(conf); DistributedFileSystem hdfs = (DistributedFileSystem) fs; DatanodeInfo[] dns=hdfs.getDataNodeStats(); for (int i = 0,h=dns.length; i < h; i++) { System.out.println("datanode_"+i+"_name: "+dns[i].getHostName()); } } }
相关文章推荐
- JAVA操作HDFS API(hadoop)
- HDFS的JavaAPI操作
- HDFS的Java API操作代码
- HDFS学习笔记(2)hdfs_shell & JavaAPI
- eclipse设置运行在yarn上后失败,不设置就可以运行【以及javaAPI单点访问hdfs和运行YARN、zk访问】
- 通过JAVA—API访问HDFS 上的文件
- HDFS中Java的API使用测试
- HDFS JAVA API
- java调用API操作HDFS
- java的api操作HDFS实现递归打印目录并实现在Linux下的显示效果
- hdfs 常用java API---代码篇(一)
- eclipse上hdfs的javaapi调用文件或者获取节点信息没有权限
- hdfs-JAVA-API
- HDFS的Java API操作本地环境搭建
- Hadoop Java API 操作 hdfs--1
- JAVA操作HDFS API(hadoop)
- HDFS的JAVA API操作
- JAVA API操作HDFS文件系统
- HDFS的javaAPI
- JAVA操作HDFS API(hadoop) HDFS API详解