天天看點

Python批量下載下傳Landsat-8資料(I)1. 擷取研究區域内Landsat-8的條帶号2. 根據條帶号擷取檔案資訊3. 儲存下載下傳連結4. 下載下傳

Python批量下載下傳Landsat-8資料

參考國外的一篇文章:Automated Bulk Downloads of Landsat-8 Data Products in Python,略作修改,從Amazon S3批量下載下傳Landsat-8資料。

1. 擷取研究區域内Landsat-8的條帶号

LANDSAT_PATH = './data/external/Landsat8' #檔案存放路徑
wrs_path = './data/external/Landsat8/wrs2/WRS2_descending.shp' #WRS2檔案
bounds_path = './data/processed/research_area.shp' #研究區shp檔案
bounds = gpd.GeoDataFrame.from_file(bounds_path)
wrs = gpd.GeoDataFrame.from_file(wrs_path)
wrs_intersection = wrs[wrs.intersects(bounds.geometry[0])]
paths,rows = wrs_intersection['PATH'].values,wrs_intersection['ROW'].values
for i, (path, row) in enumerate(zip(paths, rows)):
    print('Image', i+1, ' - path:', path, 'row:', row)
           

2. 根據條帶号擷取檔案資訊

從亞馬遜提供的檢索目錄擷取所需要的檔案資訊。篩選條件是雲量小于某一值,_T2 & _RT檔案需要定标和預處理,同樣排除。

def get_bulk_list(path,row):
    #Checking Available Images on Amazon S3 & Google Storage
    s3_scenes = pd.read_csv('./data/external/scene_list')
    # Empty list to add the images
    bulk_list = []
    print('Path:',path, 'Row:', row)
    # Filter the Landsat Amazon S3 table for images matching path, row, cloudcover and processing state.
    scenes = s3_scenes[(s3_scenes.path == path) & (s3_scenes.row == row) & 
                       (s3_scenes.cloudCover <= CLOUD_MAX) & 
                       (~s3_scenes.productId.str.contains('_T2')) &
                       (~s3_scenes.productId.str.contains('_RT'))]
    print(' Found {} images\n'.format(len(scenes)))
    return scenes
           

3. 儲存下載下傳連結

先擷取下載下傳連結,并以json格式儲存到本地檔案,之後再下載下傳。

def get_urls(row):
    import requests
    from bs4 import BeautifulSoup
    url_list = []    
    print('\n', 'EntityId:', row.productId, '\n')
    print(' Checking content: ', '\n')
    response = requests.get(row.download_url)
    # If the response status code is fine (200)
    if response.status_code == 200:
        # Import the html to beautiful soup
        html = BeautifulSoup(response.content, 'html.parser')
        # Second loop: for each band of this image that we find using the html <li> tag
        for li in html.find_all('li'):
            # Get the href tag
            file = li.find_next('a').get('href')
            url = row.download_url.replace('index.html', file)
            url_list.append(url)
    return url_list
    

    
if __name__=='__main__':
    bulk_frame = get_bulk_list(118,39)
    #print(bulk_frame)
    down_url={}
    for i, row in bulk_frame.iterrows():
        EntityID = row.productId       
        # Print some the product ID
        print('\n', 'EntityId:', row.productId, '\n')
        down_url[EntityID] = get_urls(row)
    with open('11839.txt','w') as f:
        f.write(str(down_url))
           

4. 下載下傳

從json中讀取下載下傳連結,下載下傳。每一景影像儲存到單獨的檔案夾下。

import wget,os

file_path = './11839/11839.txt' #下載下傳連結
base_path = os.path.dirname(os.path.abspath(file_path))
with open(file_path,'r') as f:
    file = f.read()
file_list = eval(file) #str轉dict
for key in file_list.keys():
    entity_dir = os.path.join(base_path, key)  
    os.makedirs(entity_dir, exist_ok=True) #生成目錄
    os.chdir(entity_dir)  #轉到目錄下
    #print(os.getcwd())
    value = file_list[key] #檔案下載下傳連結
    for url in value:
        name = url.split('/')[-1]   #檔案名      
        if os.path.exists(name): #檢查是否存在
            print('\nDownloaded: ',name)
            continue
        print('\nDownloading: ',name) #下載下傳
        try:  #若下載下傳失敗,則跳過(應該加一個日志檔案)
            wget.download(url)
        except:
            continue