HDFS的JAVA的部分API操作代码

前置需求

[successbox title=”pom包导入”]

<repositories>
<repository>
<id>cloudera</id>
<url>https://repository.cloudera.com/artifactory/cloudera-repos/</url>
</repository>
</repositories>
<dependencies>
<dependency>
<groupId>org.apache.Hadoop</groupId>
<artifactId>Hadoop-client</artifactId>
<version>2.6.0-mr1-cdh5.14.0</version>
</dependency>
<dependency>
<groupId>org.apache.Hadoop</groupId>
<artifactId>Hadoop-common</artifactId>
<version>2.6.0-cdh5.14.0</version>
</dependency>
<dependency>
<groupId>org.apache.Hadoop</groupId>
<artifactId>Hadoop-hdfs</artifactId>
<version>2.6.0-cdh5.14.0</version>
</dependency>
<dependency>
<groupId>org.apache.Hadoop</groupId>
<artifactId>Hadoop-mapreduce-client-core</artifactId>
<version>2.6.0-cdh5.14.0</version>
</dependency>
<!-- https://mvnrepository.com/artifact/junit/junit -->
<dependency>
<groupId>junit</groupId>
<artifactId>junit</artifactId>
<version>4.11</version>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.testng</groupId>
<artifactId>testng</artifactId>
<version>RELEASE</version>
</dependency>
</dependencies>
<build>
<plugins>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-compiler-plugin</artifactId>
<version>3.0</version>
<configuration>
<source>1.8</source>
<target>1.8</target>
<encoding>UTF-8</encoding>
<!-- <verbal>true</verbal>-->
</configuration>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-shade-plugin</artifactId>
<version>2.4.3</version>
<executions>
<execution>
<phase>package</phase>
<goals>
<goal>shade</goal>
</goals>
<configuration>
<minimizeJar>true</minimizeJar>
</configuration>
</execution>
</executions>
</plugin>
<!-- <plugin>
<artifactId>maven-assembly-plugin </artifactId>
<configuration>
<descriptorRefs>
<descriptorRef>jar-with-dependencies</descriptorRef>
</descriptorRefs>
<archive>
<manifest>
<mainClass>cn.itcast.Hadoop.db.DBToHdfs2</mainClass>
</manifest>
</archive>
</configuration>
<executions>
<execution>
<id>make-assembly</id>
<phase>package</phase>
<goals>
<goal>single</goal>
</goals>
</execution>
</executions>
</plugin>-->
</plugins>
</build>

[/successbox]

 

 

[infobox title=””]

package com.kami.wula01;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;

import java.net.URI;

public class demo {
    //该类的对象封转了客户端或者服务器的配置
    static Configuration conf=new Configuration();

    public  static void  listStatus()  throws Exception  {
        //该类的对象是一个文件系统对象
        FileSystem hdfs=FileSystem.get(new URI("hdfs://192.168.100.201:8020"),conf);
        //获取某一目录下的所有文件
        FileStatus stats[]=hdfs.listStatus(new Path("/"));

        //遍历输出
        for(int i = 0; i < stats.length; ++i)
            System.out.println(stats[i].getPath().toString());
        hdfs.close();
    }

    public  static void  rename()  throws Exception  {
        FileSystem hdfs=FileSystem.get(new URI("hdfs://192.168.100.201:8020"),conf);
        Path frpaht=new Path("/aaa");
        Path topath=new Path("/aaaaaaa");
        boolean isRename=hdfs.rename(frpaht, topath);
        String result=isRename?"修改成功!":"修改失败!";
        System.out.println(result);
    }

    public  static void  GetTime()  throws Exception  {
        FileSystem hdfs=FileSystem.get(new URI("hdfs://192.168.100.201:8020"),conf);
        FileStatus fileStatus=hdfs.getFileStatus(new Path("/yarn-daemons.txt"));
        long modiTime=fileStatus.getModificationTime();
        System.out.println(modiTime);
    }
    public  static void  deletefile()  throws Exception  {
        FileSystem hdfs=FileSystem.get(new URI("hdfs://192.168.100.201:8020"),conf);
        boolean isDeleted=hdfs.delete(new Path("/user/new"),true);
        System.out.println("Delete?"+isDeleted);
    }
    public  static void  mkdir ()  throws Exception  {
        FileSystem hdfs=FileSystem.get(new URI("hdfs://192.168.100.201:8020"),conf);
        boolean bool2=hdfs.mkdirs(new Path("/user/new"));
        if (bool2)
        {
            System.out.println("创建成功!!");
        }
        else
        {
            System.out.println("创建失败!!");
        }
    }
    public  static void  AddFile()  throws Exception  {
        FileSystem hdfs=FileSystem.get(new URI("hdfs://192.168.100.201:8020"),conf);
        byte[] buff="hello hadoop world!\r\n hadoop ".getBytes();
        FSDataOutputStream outputStream=hdfs.create(new Path("/tmp/file.txt"));
        outputStream.write(buff,0,buff.length);
        outputStream.close();
    }
    public  static void  put()  throws Exception  {
        FileSystem hdfs=FileSystem.get(new URI("hdfs://192.168.100.201:8020"),conf);
        Path src =new Path("C:/123.py");
        Path dst =new Path("/");
        hdfs.copyFromLocalFile(src, dst);
    }
    public  static void  check()  throws Exception  {
        FileSystem hdfs=FileSystem.get(new URI("hdfs://192.168.100.201:8020"),conf);
        Path findf=new Path("/abc");
        boolean isExists=hdfs.exists(findf);
        System.out.println("Exist?"+isExists);
    }

    public static void main(String[] args) throws Exception {
        //获取指定路径所有文件
        listStatus();
        //重命名
        rename();
        //获取文件日期
        GetTime();
        //创建文件夹
        mkdir ();
        //删除文件
        deletefile();
        //创建数据
        AddFile();
        //上传数据
        put();
        //检查目录是否存在
        check();
    }

}

[/infobox]


已发布

分类

, , , , ,

作者:

标签

评论

发表回复

您的电子邮箱地址不会被公开。 必填项已用*标注