天天看點

HDFS-第3章HDFS用戶端操作

3.1 用戶端環境準備

1. 根據自己電腦的作業系統拷貝對應的編譯後的hadoop jar包到非中文路徑(例如:D:\Develop\hadoop-2.7.2)

2. 配置HADOOP_HOME環境變量

3. 配置Path環境變量

4. 建立一個Maven工程

5.導入相應的依賴坐标+日志添加

//pom.xml
<dependencies>
		<dependency>
			<groupId>junit</groupId>
			<artifactId>junit</artifactId>
			<version>RELEASE</version>
		</dependency>
		<dependency>
			<groupId>org.apache.logging.log4j</groupId>
			<artifactId>log4j-core</artifactId>
			<version>2.8.2</version>
		</dependency>
		<dependency>
			<groupId>org.apache.hadoop</groupId>
			<artifactId>hadoop-common</artifactId>
			<version>2.7.2</version>
		</dependency>
		<dependency>
			<groupId>org.apache.hadoop</groupId>
			<artifactId>hadoop-client</artifactId>
			<version>2.7.2</version>
		</dependency>
		<dependency>
			<groupId>org.apache.hadoop</groupId>
			<artifactId>hadoop-hdfs</artifactId>
			<version>2.7.2</version>
		</dependency>
		<dependency>
			<groupId>jdk.tools</groupId>
			<artifactId>jdk.tools</artifactId>
			<version>1.8</version>
			<scope>system</scope>
			<systemPath>${JAVA_HOME}/lib/tools.jar</systemPath>
		</dependency>
</dependencies>

           
#log4j.properties
log4j.rootLogger=INFO, stdout
log4j.appender.stdout=org.apache.log4j.ConsoleAppender
log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
log4j.appender.stdout.layout.ConversionPattern=%d %p [%c] - %m%n
log4j.appender.logfile=org.apache.log4j.FileAppender
log4j.appender.logfile.File=target/spring.log
log4j.appender.logfile.layout=org.apache.log4j.PatternLayout
log4j.appender.logfile.layout.ConversionPattern=%d %p [%c] - %m%n
           

3.2 HDFS的API操作

(1)檔案上傳

public void testCopyFromLocalFile() throws IOException, InterruptedException, URISyntaxException {

		// 1 擷取檔案系統
		Configuration configuration = new Configuration();
		configuration.set("dfs.replication", "2");
		FileSystem fs = FileSystem.get(new URI("hdfs://hadoop01:9000"), configuration, "root");

		// 2 上傳檔案
		fs.copyFromLocalFile(new Path("e:/test.txt"), new Path("/test.txt"));

		// 3 關閉資源
		fs.close();

		System.out.println("over");
}

參數優先級排序:(1)用戶端代碼中設定的值 >(2)ClassPath下的使用者自定義配置檔案 >(3)然後是伺服器的預設配置
           

(2)檔案下載下傳

public void testCopyToLocalFile() throws IOException, InterruptedException, URISyntaxException{

		// 1 擷取檔案系統
		Configuration configuration = new Configuration();
		FileSystem fs = FileSystem.get(new URI("hdfs://hadoop01:9000"), configuration, "root");
		
		// 2 執行下載下傳操作
		// boolean delSrc 指是否将原檔案删除
		// Path src 指要下載下傳的檔案路徑
		// Path dst 指将檔案下載下傳到的路徑
		// boolean useRawLocalFileSystem 是否開啟檔案校驗
		fs.copyToLocalFile(false, new Path("/test.txt"), new Path("e:/test.txt"), true);
		
		// 3 關閉資源
		fs.close();
}
           

(3)建立檔案夾

public void testMkdirs() throws IOException, InterruptedException, URISyntaxException{
		
		// 1 擷取檔案系統
		Configuration configuration = new Configuration();
		FileSystem fs = FileSystem.get(new URI("hdfs://hadoop01:9000"), configuration, "root");

		// 2 建立目錄
		fs.mkdirs(new Path("/xlz/20200719"));
		
		// 3 關閉資源
		fs.close();
}
           

(4)删除檔案夾

public void testDelete() throws IOException, InterruptedException, URISyntaxException{

	// 1 擷取檔案系統
	Configuration configuration = new Configuration();
	FileSystem fs = FileSystem.get(new URI("hdfs://hadoop01:9000"), configuration, "root");
		
	// 2 執行删除
	fs.delete(new Path("/xlz/20200719"), true);
		
	// 3 關閉資源
	fs.close();
}
           

(5)檔案名更改

public void testRename() throws IOException, InterruptedException, URISyntaxException{

	// 1 擷取檔案系統
	Configuration configuration = new Configuration();
	FileSystem fs = FileSystem.get(new URI("hdfs://hadoop01:9000"), configuration, "root"); 
		
	// 2 修改檔案名稱
	fs.rename(new Path("/test.txt"), new Path("/xlz.txt"));
		
	// 3 關閉資源
	fs.close();
}
           

(6)檔案詳情檢視

public void testListFiles() throws IOException, InterruptedException, URISyntaxException{

	// 1擷取檔案系統
	Configuration configuration = new Configuration();
	FileSystem fs = FileSystem.get(new URI("hdfs://hadoop01:9000"), configuration, "root"); 
		
	// 2 擷取檔案詳情
	RemoteIterator<LocatedFileStatus> listFiles = fs.listFiles(new Path("/"), true);
		
	while(listFiles.hasNext()){
		LocatedFileStatus status = listFiles.next();
			
		// 輸出詳情
		// 檔案名稱
		System.out.println(status.getPath().getName());
		// 長度
		System.out.println(status.getLen());
		// 權限
		System.out.println(status.getPermission());
		// 分組
		System.out.println(status.getGroup());
			
		// 擷取存儲的塊資訊
		BlockLocation[] blockLocations = status.getBlockLocations();
			
		for (BlockLocation blockLocation : blockLocations) {
				
			// 擷取塊存儲的主機節點
			String[] hosts = blockLocation.getHosts();
				
			for (String host : hosts) {
				System.out.println(host);
			}
		}
			
		System.out.println("-----------分割線----------");
	}

       // 3 關閉資源
       fs.close();
}
           

(7)檔案和檔案夾判斷

public void testListStatus() throws IOException, InterruptedException, URISyntaxException{
		
	// 1 擷取檔案配置資訊
	Configuration configuration = new Configuration();
	FileSystem fs = FileSystem.get(new URI("hdfs://hadoop01:9000"), configuration, "root");
		
	// 2 判斷是檔案還是檔案夾
	FileStatus[] listStatus = fs.listStatus(new Path("/"));
		
	for (FileStatus fileStatus : listStatus) {
		
		// 如果是檔案
		if (fileStatus.isFile()) {
				System.out.println("f:"+fileStatus.getPath().getName());
			}else {
				System.out.println("d:"+fileStatus.getPath().getName());
			}
		}
		
	// 3 關閉資源
	fs.close();
}
           

3.3 HDFS的I/O流操作

(a)檔案上傳

public void putFileToHDFS() throws IOException, InterruptedException, URISyntaxException {

	// 1 擷取檔案系統
	Configuration configuration = new Configuration();
	FileSystem fs = FileSystem.get(new URI("hdfs://hadoop01:9000"), configuration, "root");

	// 2 建立輸入流
	FileInputStream fis = new FileInputStream(new File("e:/upload.txt"));

	// 3 擷取輸出流
	FSDataOutputStream fos = fs.create(new Path("/upload.txt"));

	// 4 流對拷
	IOUtils.copyBytes(fis, fos, configuration);

	// 5 關閉資源
	IOUtils.closeStream(fos);
	IOUtils.closeStream(fis);
    fs.close();
}
           

(b)檔案下載下傳

public void getFileFromHDFS() throws IOException, InterruptedException, URISyntaxException{

	// 1 擷取檔案系統
	Configuration configuration = new Configuration();
	FileSystem fs = FileSystem.get(new URI("hdfs://hadoop01:9000"), configuration, "root");
		
	// 2 擷取輸入流
	FSDataInputStream fis = fs.open(new Path("/upload.txt"));
		
	// 3 擷取輸出流
	FileOutputStream fos = new FileOutputStream(new File("e:/download.txt"));
		
	// 4 流的對拷
	IOUtils.copyBytes(fis, fos, configuration);
		
	// 5 關閉資源
	IOUtils.closeStream(fos);
	IOUtils.closeStream(fis);
	fs.close();
}
           

(c)定位檔案讀取

//下載下傳檔案第1塊
public void readFileSeek1() throws IOException, InterruptedException, URISyntaxException{

	// 1 擷取檔案系統
	Configuration configuration = new Configuration();
	FileSystem fs = FileSystem.get(new URI("hdfs://hadoop01:9000"), configuration, "root");
		
	// 2 擷取輸入流
	FSDataInputStream fis = fs.open(new Path("/hadoop-2.7.2.tar.gz"));
		
	// 3 建立輸出流
	FileOutputStream fos = new FileOutputStream(new File("e:/hadoop-2.7.2.tar.gz.part1"));
		
	// 4 流的拷貝
	byte[] buf = new byte[1024];
		
	for(int i =0 ; i < 1024 * 128; i++){
		fis.read(buf);
		fos.write(buf);
	}
		
	// 5關閉資源
	IOUtils.closeStream(fis);
	IOUtils.closeStream(fos);
fs.close();
}


//下載下傳檔案第2塊
public void readFileSeek2() throws IOException, InterruptedException, URISyntaxException{

	// 1 擷取檔案系統
	Configuration configuration = new Configuration();
	FileSystem fs = FileSystem.get(new URI("hdfs://hadoop01:9000"), configuration, "root");
		
	// 2 打開輸入流
	FSDataInputStream fis = fs.open(new Path("/hadoop-2.7.2.tar.gz"));
		
	// 3 定位輸入資料位置
	fis.seek(1024*1024*128);
		
	// 4 建立輸出流
	FileOutputStream fos = new FileOutputStream(new File("e:/hadoop-2.7.2.tar.gz.part2"));
		
	// 5 流的對拷
	IOUtils.copyBytes(fis, fos, configuration);
		
	// 6 關閉資源
	IOUtils.closeStream(fis);
	IOUtils.closeStream(fos);
}
           

合并檔案:

在Window指令視窗中進入到目錄E:\,然後執行如下指令,對資料進行合并;

type hadoop-2.7.2.tar.gz.part2 >> hadoop-2.7.2.tar.gz.part1;

合并完成後,将hadoop-2.7.2.tar.gz.part1重新命名為hadoop-2.7.2.tar.gz;

解壓發現該tar包非常完整。

繼續閱讀