天天看点

Python批量下载Landsat-8数据(I)1. 获取研究区域内Landsat-8的条带号2. 根据条带号获取文件信息3. 保存下载链接4. 下载

Python批量下载Landsat-8数据

参考国外的一篇文章:Automated Bulk Downloads of Landsat-8 Data Products in Python,略作修改,从Amazon S3批量下载Landsat-8数据。

1. 获取研究区域内Landsat-8的条带号

LANDSAT_PATH = './data/external/Landsat8' #文件存放路径
wrs_path = './data/external/Landsat8/wrs2/WRS2_descending.shp' #WRS2文件
bounds_path = './data/processed/research_area.shp' #研究区shp文件
bounds = gpd.GeoDataFrame.from_file(bounds_path)
wrs = gpd.GeoDataFrame.from_file(wrs_path)
wrs_intersection = wrs[wrs.intersects(bounds.geometry[0])]
paths,rows = wrs_intersection['PATH'].values,wrs_intersection['ROW'].values
for i, (path, row) in enumerate(zip(paths, rows)):
    print('Image', i+1, ' - path:', path, 'row:', row)
           

2. 根据条带号获取文件信息

从亚马逊提供的检索目录获取所需要的文件信息。筛选条件是云量小于某一值,_T2 & _RT文件需要定标和预处理,同样排除。

def get_bulk_list(path,row):
    #Checking Available Images on Amazon S3 & Google Storage
    s3_scenes = pd.read_csv('./data/external/scene_list')
    # Empty list to add the images
    bulk_list = []
    print('Path:',path, 'Row:', row)
    # Filter the Landsat Amazon S3 table for images matching path, row, cloudcover and processing state.
    scenes = s3_scenes[(s3_scenes.path == path) & (s3_scenes.row == row) & 
                       (s3_scenes.cloudCover <= CLOUD_MAX) & 
                       (~s3_scenes.productId.str.contains('_T2')) &
                       (~s3_scenes.productId.str.contains('_RT'))]
    print(' Found {} images\n'.format(len(scenes)))
    return scenes
           

3. 保存下载链接

先获取下载链接,并以json格式保存到本地文件,之后再下载。

def get_urls(row):
    import requests
    from bs4 import BeautifulSoup
    url_list = []    
    print('\n', 'EntityId:', row.productId, '\n')
    print(' Checking content: ', '\n')
    response = requests.get(row.download_url)
    # If the response status code is fine (200)
    if response.status_code == 200:
        # Import the html to beautiful soup
        html = BeautifulSoup(response.content, 'html.parser')
        # Second loop: for each band of this image that we find using the html <li> tag
        for li in html.find_all('li'):
            # Get the href tag
            file = li.find_next('a').get('href')
            url = row.download_url.replace('index.html', file)
            url_list.append(url)
    return url_list
    

    
if __name__=='__main__':
    bulk_frame = get_bulk_list(118,39)
    #print(bulk_frame)
    down_url={}
    for i, row in bulk_frame.iterrows():
        EntityID = row.productId       
        # Print some the product ID
        print('\n', 'EntityId:', row.productId, '\n')
        down_url[EntityID] = get_urls(row)
    with open('11839.txt','w') as f:
        f.write(str(down_url))
           

4. 下载

从json中读取下载链接,下载。每一景影像保存到单独的文件夹下。

import wget,os

file_path = './11839/11839.txt' #下载链接
base_path = os.path.dirname(os.path.abspath(file_path))
with open(file_path,'r') as f:
    file = f.read()
file_list = eval(file) #str转dict
for key in file_list.keys():
    entity_dir = os.path.join(base_path, key)  
    os.makedirs(entity_dir, exist_ok=True) #生成目录
    os.chdir(entity_dir)  #转到目录下
    #print(os.getcwd())
    value = file_list[key] #文件下载链接
    for url in value:
        name = url.split('/')[-1]   #文件名      
        if os.path.exists(name): #检查是否存在
            print('\nDownloaded: ',name)
            continue
        print('\nDownloading: ',name) #下载
        try:  #若下载失败,则跳过(应该加一个日志文件)
            wget.download(url)
        except:
            continue