1.详情
1.架构图
2. 读写的流程图
2. 操作
1.使用MAVEN集成对应的jar包
<!--HDFS 操作 API-->
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-common</artifactId>
<version>2.7.1</version>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-hdfs</artifactId>
<version>2.7.1</version>
</dependency>
2. java 代码实现
//强制加载hadoop.dll windows
// System.load("F:\\hadoop\\bin\\hadoop.dll");
//指定用户,如果当前操作的用户不是root,那么必须指定用户
System.setProperty("HADOOP_USER_NAME", "root");
// System.setProperty("hadoop.home.dir", "F:\\hadoop");
// 1.连接 HDFS import org.apache.hadoop.conf.Configuration;
Configuration configuration = new Configuration();
configuration.set("fs.defaultFS","hdfs://192.168.80.111:9000");
// 2. 获取文件系统对象 import org.apache.hadoop.fs.FileSystem;
FileSystem fileSystem = FileSystem.get(configuration);
// 是否是文件对象
System.out.println(fileSystem.isFile(new Path("/input/wordcount")));
System.out.println(fileSystem.isFile(new Path("/input")));
System.out.println("-----------------------");
// 是否是 文件夹
System.out.println(fileSystem.isDirectory(new Path("/input/wordcount")));
System.out.println(fileSystem.isDirectory(new Path("/input")));
Path outputPath = new Path("/output2");
if (!fileSystem.exists(outputPath)) {
System.out.println("=====================");
System.out.println(fileSystem.mkdirs(outputPath));
FileInputStream fileInputStream = new FileInputStream(new File("C:\\Users\\Lenovo\\Desktop\\1.txt"));
// FSDataInputStream fis = fileSystem.open(new Path(("/input/wordcount")));
FSDataOutputStream fos = fileSystem.create(new Path("/"+outputPath.getName()+"/1.txt"));
// 上传
IOUtils.copyBytes(fileInputStream,fos,configuration);
}
else {
FileOutputStream fileOutputStream = new FileOutputStream("C:\\Users\\Lenovo\\Desktop\\2.txt");
FSDataInputStream fsi = fileSystem.open(new Path("/output/1.txt"));
// 下载
IOUtils.copyBytes(fsi,fileOutputStream,configuration);
}
fileSystem.close();