天天看點

高光譜圖像分類論文代碼複現(三)網絡結構搭建

。。。。。前天加記憶體把電腦搞炸了,重裝了系統,昨晚重新配置tensorflow環境,今天複現了一篇之前的論文。

Deep Feature Fusion via Two-Stream Convolutional Neural Network for Hyperspectral

收錄于:IEEE TRANSACTIONS ON GEOSCIENCE AND REMOTE SENSING, VOL. 58, NO. 4, APRIL 2020

作者:Xian Li , Student Member , IEEE, Mingli Ding, and Aleksandra Pižurica, Senior Member , IEEE

論文介紹

資料集:Indian pine

訓練:測試=1:9

環境:jupyter notebook tensorflow==2.1

100 epoch

batchsize=100

有些地方沒有和論文完全一樣,實驗結果和作者差一點。

import keras
from keras import regularizers
from keras.layers import Conv2D, Conv3D, Flatten, Dense, MaxPooling2D,Reshape, BatchNormalization,GlobalAveragePooling2D,Activation
import tensorflow as tf
from keras.layers import Dropout, Input,Lambda,Concatenate,concatenate
from keras.models import Model
from keras.optimizers import Adam
from keras.callbacks import ModelCheckpoint
from keras.utils import np_utils

from sklearn.decomposition import PCA
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix, accuracy_score, classification_report, cohen_kappa_score
import random
from operator import truediv
from plotly.offline import init_notebook_mode

import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
import scipy.io as sio
import os
import spectral
import glob
from sklearn.preprocessing import MinMaxScaler
           
def loadData():
    data_path=os.path.join(r'D:\Program Files (x86)\Anaconda\jupyter_path','dataset')
    data_path
    data=sio.loadmat(os.path.join(data_path, 'Indian_pines_corrected.mat'))['indian_pines_corrected']
    labels=sio.loadmat(os.path.join(data_path,'Indian_pines_gt.mat'))['indian_pines_gt']
    return data,labels
           
def getData():
    dataset=sio.loadmat(r'D:\Program Files (x86)\Anaconda\jupyter_path\dataset\Indian_pines_corrected.mat')['indian_pines_corrected']
    labels=sio.loadmat(r'D:\Program Files (x86)\Anaconda\jupyter_path\dataset\Indian_pines_gt.mat')['indian_pines_gt']
    return dataset,labels
           
def pca_change(X, num_components):
    newX = np.reshape(X, (-1, X.shape[2]))
    pca = PCA(n_components=num_components, whiten=True)
    newX = pca.fit_transform(newX)
    newX = np.reshape(newX, (X.shape[0], X.shape[1], num_components))
    return newX
           
def padwithzeros(X, margin=2):
    newX = np.zeros(
        (X.shape[0] + 2 * margin, X.shape[1] + 2 * margin, X.shape[2]))
    x_offset = margin
    y_offset = margin
    newX[x_offset:X.shape[0] + x_offset, y_offset:X.shape[1] + y_offset, :] = X
    return newX
           
def creatCube(X,y,windowsize=25,removeZeroLabels=True):
    margin=int((windowsize-1)/2)   #margin=12
    zeroPaddedX=padwithzeros(X,margin=margin)
    
    patchesData=np.zeros((X.shape[0]*X.shape[1],windowsize,windowsize,X.shape[2])) 
    patchesLabels=np.zeros(X.shape[0]*X.shape[1])
    patchIndex=0
    for r in range(margin,zeroPaddedX.shape[0]-margin): 
        for c in range(margin,zeroPaddedX.shape[1]-margin): 
            patch=zeroPaddedX[r-margin:r+margin+1,c-margin:c+margin+1] 
            patchesData[patchIndex,:,:,:]=patch
            patchesLabels[patchIndex]=y[r-margin,c-margin]
            patchIndex=patchIndex+1
    if removeZeroLabels:
        patchesData=patchesData[patchesLabels>0,:,:,:]
        patchesLabels=patchesLabels[patchesLabels>0]
        patchesLabels-=1
    return patchesData,patchesLabels
           
def splitTrainTest(X,Y,Ratio,randoms=2019):
    X_train,X_test,Y_train,Y_test=train_test_split(X,Y,test_size=Ratio,random_state=randoms,
                                                  stratify=Y)
    return X_train,X_test,Y_train,Y_test
           
X_Global,Y_Global=getData()
X_Local,Y_Local=getData()
X_Global.shape,Y_Global.shape,X_Local.shape,Y_Local.shape
           
((145, 145, 200), (145, 145), (145, 145, 200), (145, 145))
           
transfer=MinMaxScaler()
X_Global_norm=transfer.fit_transform(X_Global[:,:,0])
X_Global_norm=X_Global_norm.reshape((145,145,1))
for i in range(1,200):
    A=transfer.fit_transform(X_Global[:,:,i])
    A=A.reshape((145,145,1))
    X_Global_norm=tf.concat((X_Global_norm,A),axis=-1)
X_Global_norm.shape
           
TensorShape([145, 145, 200])
           
X_Global=pca_change(X_Global_norm,num_components=3)
X_Global.shape
           
(145, 145, 3)
           
X_Local=pca_change(X_Global_norm,num_components=20)
X_Local.shape
           
(145, 145, 20)
           
X_Local,Y_Local=creatCube(X_Local,Y_Local,windowsize=7)
X_Local.shape,Y_Local.shape
           
((10249, 7, 7, 20), (10249,))
           
X_Global,Y_Global=creatCube(X_Global,Y_Global,windowsize=27)
X_Global.shape,Y_Global.shape
           
((10249, 27, 27, 3), (10249,))
           
X_Local_train.shape,X_Local_test.shape,Y_Local_train.shape,Y_Local_test.shape
           
((1024, 7, 7, 20), (9225, 7, 7, 20), (1024,), (9225,))
           
X_Global_train.shape,X_Global_test.shape,Y_Global_train.shape,Y_Global_test.shape
           
((1024, 27, 27, 3), (9225, 27, 27, 3), (1024,), (9225,))
           
Y_Local_test=np_utils.to_categorical(Y_Local_test)
Y_Local_train=np_utils.to_categorical(Y_Local_train)
Y_Local_train.shape
           
(1024, 16)
           
Y_Global_train=np_utils.to_categorical(Y_Global_train)
Y_Global_train.shape
           
(1024, 16)
           

網絡結構搭建

def squeeze_excitation_layer(input_layer,filter_sq,filter_ex):
    squeeze=GlobalAveragePooling2D()(input_layer)
    excitation=Dense(units=filter_sq,activation='relu')(squeeze)
    excitation=Dense(units=filter_ex,activation='sigmoid')(excitation)
    excitation=Reshape((1,1,filter_ex))(excitation)
    scale=input_layer*excitation
    return scale
           
def SE_Conv_moule_1(input_layer):
    Conv_layer1=Conv2D(filters=192,kernel_size=(1,1),padding='same',activation='relu')(input_layer)
    Conv_layer1=BatchNormalization()(Conv_layer1)
    output_layer=squeeze_excitation_layer(Conv_layer1,192,192)
    return output_layer
           
def SE_Conv_moule_2(input_layer):
    Conv_layer1=Conv2D(filters=192,kernel_size=(3,3),padding='same',activation='relu')(input_layer)
    Conv_layer1=BatchNormalization()(Conv_layer1)
    output_layer=squeeze_excitation_layer(Conv_layer1,192,192)
    return output_layer
           
def SE_Conv_moule_3(input_layer):
    Conv_layer1=Conv2D(filters=128,kernel_size=(3,3),padding='same',activation='relu')(input_layer)
    Conv_layer1=BatchNormalization()(Conv_layer1)
    output_layer=squeeze_excitation_layer(Conv_layer1,128,128)
    return output_layer
           
def SE_Res_module(input_layer):
    layer1=BatchNormalization()(input_layer)
    layer2=Conv2D(filters=128,kernel_size=(3,3),padding='same',activation='relu')(layer1)
    layer3=BatchNormalization()(layer2)
    layer4=Conv2D(filters=128,kernel_size=(3,3),padding='same')(layer3)
    layer5=BatchNormalization()(layer4)
    layer6=squeeze_excitation_layer(layer5,128,128)
    layer7=tf.add(layer6,layer1)
    layer8=Activation('relu')(layer7)
    output_layer=BatchNormalization()(layer8)
    return output_layer
           
def Local(input_layer):
    layer2=SE_Conv_moule_1(input_layer)
    layer3=SE_Conv_moule_2(layer2)
    layer4=SE_Conv_moule_2(layer3)
    layer5=SE_Conv_moule_3(layer4)
    output_layer=MaxPooling2D(padding='valid')(layer5)
    return output_layer
           
def Global(input_layer):
    layer2=SE_Conv_moule_3(input_layer)
    layer3=MaxPooling2D(padding='valid')(layer2)
    layer4=SE_Res_module(layer3)
    layer5=SE_Res_module(layer4)
    layer6=MaxPooling2D(padding='valid')(layer5)
    layer7=SE_Conv_moule_3(layer6)
    layer8=MaxPooling2D(padding='valid')(layer7)
    layer9=SE_Conv_moule_3(layer8)
    output_layer=MaxPooling2D(strides=1,padding='same')(layer9)
    return output_layer
           
Local_W=7
Local_n_component=20
Global_W=27
Global_n_component=3
           
input_layer_local=Input((Local_W,Local_W,Local_n_component),name='input_layer_local') #input_layer_local=(7,7,20)
input_layer_Global=Input((Global_W,Global_W,Global_n_component),name='input_layer_Global')#input_layer_Glbal=(27,27,3)
           
output_layer_local=Local(input_layer_local)    #Local 輸出 (none,3,3,64)

output_layer_Global=Global(input_layer_Global) #Global 輸出 (none,3,3,64)
#output_layer_Global,output_layer_local

concat_layer=concatenate([output_layer_local,output_layer_Global],axis=-1)

flatten_layer=GlobalAveragePooling2D()(concat_layer)

Fully_connect_layer1=Dense(units=200,activation='sigmoid',kernel_regularizer=regularizers.l2(0.02))(flatten_layer)
Fully_conncet_layer2=Dense(units=100,activation='sigmoid')(Fully_connect_layer1)
output_layer_final=Dense(units=16,activation='softmax',name='output_layer_final')(Fully_conncet_layer2)
output_layer_final
           
<tf.Tensor 'output_layer_final/Softmax:0' shape=(None, 16) dtype=float32>
           
Model: "functional_1"
__________________________________________________________________________________________________
Layer (type)                    Output Shape         Param #     Connected to                     
==================================================================================================
input_layer_Global (InputLayer) [(None, 27, 27, 3)]  0                                            
__________________________________________________________________________________________________
conv2d_4 (Conv2D)               (None, 27, 27, 128)  3584        input_layer_Global[0][0]         
__________________________________________________________________________________________________
batch_normalization_4 (BatchNor (None, 27, 27, 128)  512         conv2d_4[0][0]                   
__________________________________________________________________________________________________
global_average_pooling2d_4 (Glo (None, 128)          0           batch_normalization_4[0][0]      
__________________________________________________________________________________________________
dense_8 (Dense)                 (None, 128)          16512       global_average_pooling2d_4[0][0] 
__________________________________________________________________________________________________
dense_9 (Dense)                 (None, 128)          16512       dense_8[0][0]                    
__________________________________________________________________________________________________
reshape_4 (Reshape)             (None, 1, 1, 128)    0           dense_9[0][0]                    
__________________________________________________________________________________________________
tf_op_layer_Mul_4 (TensorFlowOp [(None, 27, 27, 128) 0           batch_normalization_4[0][0]      
                                                                 reshape_4[0][0]                  
__________________________________________________________________________________________________
max_pooling2d_1 (MaxPooling2D)  (None, 13, 13, 128)  0           tf_op_layer_Mul_4[0][0]          
__________________________________________________________________________________________________
batch_normalization_5 (BatchNor (None, 13, 13, 128)  512         max_pooling2d_1[0][0]            
__________________________________________________________________________________________________
conv2d_5 (Conv2D)               (None, 13, 13, 128)  147584      batch_normalization_5[0][0]      
__________________________________________________________________________________________________
batch_normalization_6 (BatchNor (None, 13, 13, 128)  512         conv2d_5[0][0]                   
__________________________________________________________________________________________________
conv2d_6 (Conv2D)               (None, 13, 13, 128)  147584      batch_normalization_6[0][0]      
__________________________________________________________________________________________________
batch_normalization_7 (BatchNor (None, 13, 13, 128)  512         conv2d_6[0][0]                   
__________________________________________________________________________________________________
global_average_pooling2d_5 (Glo (None, 128)          0           batch_normalization_7[0][0]      
__________________________________________________________________________________________________
dense_10 (Dense)                (None, 128)          16512       global_average_pooling2d_5[0][0] 
__________________________________________________________________________________________________
dense_11 (Dense)                (None, 128)          16512       dense_10[0][0]                   
__________________________________________________________________________________________________
reshape_5 (Reshape)             (None, 1, 1, 128)    0           dense_11[0][0]                   
__________________________________________________________________________________________________
tf_op_layer_Mul_5 (TensorFlowOp [(None, 13, 13, 128) 0           batch_normalization_7[0][0]      
                                                                 reshape_5[0][0]                  
__________________________________________________________________________________________________
tf_op_layer_Add (TensorFlowOpLa [(None, 13, 13, 128) 0           tf_op_layer_Mul_5[0][0]          
                                                                 batch_normalization_5[0][0]      
__________________________________________________________________________________________________
activation (Activation)         (None, 13, 13, 128)  0           tf_op_layer_Add[0][0]            
__________________________________________________________________________________________________
batch_normalization_8 (BatchNor (None, 13, 13, 128)  512         activation[0][0]                 
__________________________________________________________________________________________________
input_layer_local (InputLayer)  [(None, 7, 7, 20)]   0                                            
__________________________________________________________________________________________________
batch_normalization_9 (BatchNor (None, 13, 13, 128)  512         batch_normalization_8[0][0]      
__________________________________________________________________________________________________
conv2d (Conv2D)                 (None, 7, 7, 192)    4032        input_layer_local[0][0]          
__________________________________________________________________________________________________
conv2d_7 (Conv2D)               (None, 13, 13, 128)  147584      batch_normalization_9[0][0]      
__________________________________________________________________________________________________
batch_normalization (BatchNorma (None, 7, 7, 192)    768         conv2d[0][0]                     
__________________________________________________________________________________________________
batch_normalization_10 (BatchNo (None, 13, 13, 128)  512         conv2d_7[0][0]                   
__________________________________________________________________________________________________
global_average_pooling2d (Globa (None, 192)          0           batch_normalization[0][0]        
__________________________________________________________________________________________________
conv2d_8 (Conv2D)               (None, 13, 13, 128)  147584      batch_normalization_10[0][0]     
__________________________________________________________________________________________________
dense (Dense)                   (None, 192)          37056       global_average_pooling2d[0][0]   
__________________________________________________________________________________________________
batch_normalization_11 (BatchNo (None, 13, 13, 128)  512         conv2d_8[0][0]                   
__________________________________________________________________________________________________
dense_1 (Dense)                 (None, 192)          37056       dense[0][0]                      
__________________________________________________________________________________________________
global_average_pooling2d_6 (Glo (None, 128)          0           batch_normalization_11[0][0]     
__________________________________________________________________________________________________
reshape (Reshape)               (None, 1, 1, 192)    0           dense_1[0][0]                    
__________________________________________________________________________________________________
dense_12 (Dense)                (None, 128)          16512       global_average_pooling2d_6[0][0] 
__________________________________________________________________________________________________
tf_op_layer_Mul (TensorFlowOpLa [(None, 7, 7, 192)]  0           batch_normalization[0][0]        
                                                                 reshape[0][0]                    
__________________________________________________________________________________________________
dense_13 (Dense)                (None, 128)          16512       dense_12[0][0]                   
__________________________________________________________________________________________________
conv2d_1 (Conv2D)               (None, 7, 7, 192)    331968      tf_op_layer_Mul[0][0]            
__________________________________________________________________________________________________
reshape_6 (Reshape)             (None, 1, 1, 128)    0           dense_13[0][0]                   
__________________________________________________________________________________________________
batch_normalization_1 (BatchNor (None, 7, 7, 192)    768         conv2d_1[0][0]                   
__________________________________________________________________________________________________
tf_op_layer_Mul_6 (TensorFlowOp [(None, 13, 13, 128) 0           batch_normalization_11[0][0]     
                                                                 reshape_6[0][0]                  
__________________________________________________________________________________________________
global_average_pooling2d_1 (Glo (None, 192)          0           batch_normalization_1[0][0]      
__________________________________________________________________________________________________
tf_op_layer_Add_1 (TensorFlowOp [(None, 13, 13, 128) 0           tf_op_layer_Mul_6[0][0]          
                                                                 batch_normalization_9[0][0]      
__________________________________________________________________________________________________
dense_2 (Dense)                 (None, 192)          37056       global_average_pooling2d_1[0][0] 
__________________________________________________________________________________________________
activation_1 (Activation)       (None, 13, 13, 128)  0           tf_op_layer_Add_1[0][0]          
__________________________________________________________________________________________________
dense_3 (Dense)                 (None, 192)          37056       dense_2[0][0]                    
__________________________________________________________________________________________________
batch_normalization_12 (BatchNo (None, 13, 13, 128)  512         activation_1[0][0]               
__________________________________________________________________________________________________
reshape_1 (Reshape)             (None, 1, 1, 192)    0           dense_3[0][0]                    
__________________________________________________________________________________________________
max_pooling2d_2 (MaxPooling2D)  (None, 6, 6, 128)    0           batch_normalization_12[0][0]     
__________________________________________________________________________________________________
tf_op_layer_Mul_1 (TensorFlowOp [(None, 7, 7, 192)]  0           batch_normalization_1[0][0]      
                                                                 reshape_1[0][0]                  
__________________________________________________________________________________________________
conv2d_9 (Conv2D)               (None, 6, 6, 128)    147584      max_pooling2d_2[0][0]            
__________________________________________________________________________________________________
conv2d_2 (Conv2D)               (None, 7, 7, 192)    331968      tf_op_layer_Mul_1[0][0]          
__________________________________________________________________________________________________
batch_normalization_13 (BatchNo (None, 6, 6, 128)    512         conv2d_9[0][0]                   
__________________________________________________________________________________________________
batch_normalization_2 (BatchNor (None, 7, 7, 192)    768         conv2d_2[0][0]                   
__________________________________________________________________________________________________
global_average_pooling2d_7 (Glo (None, 128)          0           batch_normalization_13[0][0]     
__________________________________________________________________________________________________
global_average_pooling2d_2 (Glo (None, 192)          0           batch_normalization_2[0][0]      
__________________________________________________________________________________________________
dense_14 (Dense)                (None, 128)          16512       global_average_pooling2d_7[0][0] 
__________________________________________________________________________________________________
dense_4 (Dense)                 (None, 192)          37056       global_average_pooling2d_2[0][0] 
__________________________________________________________________________________________________
dense_15 (Dense)                (None, 128)          16512       dense_14[0][0]                   
__________________________________________________________________________________________________
dense_5 (Dense)                 (None, 192)          37056       dense_4[0][0]                    
__________________________________________________________________________________________________
reshape_7 (Reshape)             (None, 1, 1, 128)    0           dense_15[0][0]                   
__________________________________________________________________________________________________
reshape_2 (Reshape)             (None, 1, 1, 192)    0           dense_5[0][0]                    
__________________________________________________________________________________________________
tf_op_layer_Mul_7 (TensorFlowOp [(None, 6, 6, 128)]  0           batch_normalization_13[0][0]     
                                                                 reshape_7[0][0]                  
__________________________________________________________________________________________________
tf_op_layer_Mul_2 (TensorFlowOp [(None, 7, 7, 192)]  0           batch_normalization_2[0][0]      
                                                                 reshape_2[0][0]                  
__________________________________________________________________________________________________
max_pooling2d_3 (MaxPooling2D)  (None, 3, 3, 128)    0           tf_op_layer_Mul_7[0][0]          
__________________________________________________________________________________________________
conv2d_3 (Conv2D)               (None, 7, 7, 128)    221312      tf_op_layer_Mul_2[0][0]          
__________________________________________________________________________________________________
conv2d_10 (Conv2D)              (None, 3, 3, 128)    147584      max_pooling2d_3[0][0]            
__________________________________________________________________________________________________
batch_normalization_3 (BatchNor (None, 7, 7, 128)    512         conv2d_3[0][0]                   
__________________________________________________________________________________________________
batch_normalization_14 (BatchNo (None, 3, 3, 128)    512         conv2d_10[0][0]                  
__________________________________________________________________________________________________
global_average_pooling2d_3 (Glo (None, 128)          0           batch_normalization_3[0][0]      
__________________________________________________________________________________________________
global_average_pooling2d_8 (Glo (None, 128)          0           batch_normalization_14[0][0]     
__________________________________________________________________________________________________
dense_6 (Dense)                 (None, 128)          16512       global_average_pooling2d_3[0][0] 
__________________________________________________________________________________________________
dense_16 (Dense)                (None, 128)          16512       global_average_pooling2d_8[0][0] 
__________________________________________________________________________________________________
dense_7 (Dense)                 (None, 128)          16512       dense_6[0][0]                    
__________________________________________________________________________________________________
dense_17 (Dense)                (None, 128)          16512       dense_16[0][0]                   
__________________________________________________________________________________________________
reshape_3 (Reshape)             (None, 1, 1, 128)    0           dense_7[0][0]                    
__________________________________________________________________________________________________
reshape_8 (Reshape)             (None, 1, 1, 128)    0           dense_17[0][0]                   
__________________________________________________________________________________________________
tf_op_layer_Mul_3 (TensorFlowOp [(None, 7, 7, 128)]  0           batch_normalization_3[0][0]      
                                                                 reshape_3[0][0]                  
__________________________________________________________________________________________________
tf_op_layer_Mul_8 (TensorFlowOp [(None, 3, 3, 128)]  0           batch_normalization_14[0][0]     
                                                                 reshape_8[0][0]                  
__________________________________________________________________________________________________
max_pooling2d (MaxPooling2D)    (None, 3, 3, 128)    0           tf_op_layer_Mul_3[0][0]          
__________________________________________________________________________________________________
max_pooling2d_4 (MaxPooling2D)  (None, 3, 3, 128)    0           tf_op_layer_Mul_8[0][0]          
__________________________________________________________________________________________________
concatenate (Concatenate)       (None, 3, 3, 256)    0           max_pooling2d[0][0]              
                                                                 max_pooling2d_4[0][0]            
__________________________________________________________________________________________________
global_average_pooling2d_9 (Glo (None, 256)          0           concatenate[0][0]                
__________________________________________________________________________________________________
dense_18 (Dense)                (None, 200)          51400       global_average_pooling2d_9[0][0] 
__________________________________________________________________________________________________
dense_19 (Dense)                (None, 100)          20100       dense_18[0][0]                   
__________________________________________________________________________________________________
output_layer_final (Dense)      (None, 16)           1616        dense_19[0][0]                   
==================================================================================================
Total params: 2,280,412
Trainable params: 2,276,188
Non-trainable params: 4,224
__________________________________________________________________________________________________
           
adam=Adam(lr=0.01)
model.compile(loss='categorical_crossentropy',optimizer=adam,metrics=['accuracy'])
           
filepath='Two_Stream.h5'
checkpoint=ModelCheckpoint(filepath,
                           monitor='loss',
                           verbose=1,
                           save_best_only=True,
                           mode='min')
callback_list=[checkpoint]
           
history=model.fit([X_Local_train,X_Global_train],
                  Y_Local_train,
                  epochs=100,
                  batch_size=100,
#                   validation_data=([X_Local_test,X_Global_test],Y_Local_test),
#                   validation_batch_size=128,
#                   validation_steps=20,
                  callbacks=callback_list
                 )
           
Epoch 1/100
11/11 [==============================] - ETA: 0s - loss: 3.7551 - accuracy: 0.3770
Epoch 00001: loss improved from inf to 3.75509, saving model to Two_Stream.h5
11/11 [==============================] - 1s 94ms/step - loss: 3.7551 - accuracy: 0.3770
Epoch 2/100
 9/11 [=======================>......] - ETA: 0s - loss: 1.6571 - accuracy: 0.6744
Epoch 00002: loss improved from 3.75509 to 1.62017, saving model to Two_Stream.h5
11/11 [==============================] - 1s 80ms/step - loss: 1.6202 - accuracy: 0.6826
Epoch 3/100
 9/11 [=======================>......] - ETA: 0s - loss: 1.1055 - accuracy: 0.7811
Epoch 00003: loss improved from 1.62017 to 1.07142, saving model to Two_Stream.h5
11/11 [==============================] - 1s 90ms/step - loss: 1.0714 - accuracy: 0.7910
Epoch 4/100
 9/11 [=======================>......] - ETA: 0s - loss: 0.7079 - accuracy: 0.8656
Epoch 00004: loss improved from 1.07142 to 0.70675, saving model to Two_Stream.h5
11/11 [==============================] - 1s 122ms/step - loss: 0.7068 - accuracy: 0.8652
Epoch 5/100
 9/11 [=======================>......] - ETA: 0s - loss: 0.6073 - accuracy: 0.9089
Epoch 00005: loss improved from 0.70675 to 0.60870, saving model to Two_Stream.h5
11/11 [==============================] - 1s 78ms/step - loss: 0.6087 - accuracy: 0.9082
Epoch 6/100
 9/11 [=======================>......] - ETA: 0s - loss: 0.5277 - accuracy: 0.9200
Epoch 00006: loss improved from 0.60870 to 0.53938, saving model to Two_Stream.h5
11/11 [==============================] - 1s 82ms/step - loss: 0.5394 - accuracy: 0.9150
Epoch 7/100
 9/11 [=======================>......] - ETA: 0s - loss: 0.5046 - accuracy: 0.9033
Epoch 00007: loss improved from 0.53938 to 0.49501, saving model to Two_Stream.h5
11/11 [==============================] - 1s 76ms/step - loss: 0.4950 - accuracy: 0.9092
Epoch 8/100
 9/11 [=======================>......] - ETA: 0s - loss: 0.4519 - accuracy: 0.9344
Epoch 00008: loss improved from 0.49501 to 0.45719, saving model to Two_Stream.h5
11/11 [==============================] - 1s 81ms/step - loss: 0.4572 - accuracy: 0.9346
Epoch 9/100
 9/11 [=======================>......] - ETA: 0s - loss: 0.4858 - accuracy: 0.9167
Epoch 00009: loss did not improve from 0.45719
11/11 [==============================] - 0s 27ms/step - loss: 0.4832 - accuracy: 0.9238
Epoch 10/100
 9/11 [=======================>......] - ETA: 0s - loss: 0.4572 - accuracy: 0.9522
Epoch 00010: loss did not improve from 0.45719
11/11 [==============================] - 0s 27ms/step - loss: 0.4686 - accuracy: 0.9434
Epoch 11/100
 9/11 [=======================>......] - ETA: 0s - loss: 0.4058 - accuracy: 0.9444
Epoch 00011: loss improved from 0.45719 to 0.41041, saving model to Two_Stream.h5
11/11 [==============================] - 1s 79ms/step - loss: 0.4104 - accuracy: 0.9453
Epoch 12/100
 9/11 [=======================>......] - ETA: 0s - loss: 0.2430 - accuracy: 0.9833
Epoch 00012: loss improved from 0.41041 to 0.24041, saving model to Two_Stream.h5
11/11 [==============================] - 1s 89ms/step - loss: 0.2404 - accuracy: 0.9814
Epoch 13/100
 9/11 [=======================>......] - ETA: 0s - loss: 0.2070 - accuracy: 0.9722
Epoch 00013: loss improved from 0.24041 to 0.21630, saving model to Two_Stream.h5
11/11 [==============================] - 1s 82ms/step - loss: 0.2163 - accuracy: 0.9717
Epoch 14/100
 9/11 [=======================>......] - ETA: 0s - loss: 0.3709 - accuracy: 0.9456
Epoch 00014: loss did not improve from 0.21630
11/11 [==============================] - 0s 27ms/step - loss: 0.3659 - accuracy: 0.9492
Epoch 15/100
 9/11 [=======================>......] - ETA: 0s - loss: 0.3677 - accuracy: 0.9644
Epoch 00015: loss did not improve from 0.21630
11/11 [==============================] - 0s 27ms/step - loss: 0.3678 - accuracy: 0.9648
Epoch 16/100
 9/11 [=======================>......] - ETA: 0s - loss: 0.2954 - accuracy: 0.9756
Epoch 00016: loss did not improve from 0.21630
11/11 [==============================] - 0s 27ms/step - loss: 0.3004 - accuracy: 0.9736
Epoch 17/100
 9/11 [=======================>......] - ETA: 0s - loss: 0.2114 - accuracy: 0.9811
Epoch 00017: loss did not improve from 0.21630
11/11 [==============================] - 0s 27ms/step - loss: 0.2199 - accuracy: 0.9795
Epoch 18/100
 9/11 [=======================>......] - ETA: 0s - loss: 0.5043 - accuracy: 0.9333
Epoch 00018: loss did not improve from 0.21630
11/11 [==============================] - 0s 26ms/step - loss: 0.5002 - accuracy: 0.9385
Epoch 19/100
 9/11 [=======================>......] - ETA: 0s - loss: 0.4192 - accuracy: 0.9656
Epoch 00019: loss did not improve from 0.21630
11/11 [==============================] - 0s 27ms/step - loss: 0.4042 - accuracy: 0.9678
Epoch 20/100
 9/11 [=======================>......] - ETA: 0s - loss: 0.3034 - accuracy: 0.9600
Epoch 00020: loss did not improve from 0.21630
11/11 [==============================] - 0s 27ms/step - loss: 0.2955 - accuracy: 0.9639
Epoch 21/100
 9/11 [=======================>......] - ETA: 0s - loss: 0.2473 - accuracy: 0.9744
Epoch 00021: loss did not improve from 0.21630
11/11 [==============================] - 0s 27ms/step - loss: 0.2455 - accuracy: 0.9756
Epoch 22/100
 9/11 [=======================>......] - ETA: 0s - loss: 0.2213 - accuracy: 0.9711
Epoch 00022: loss did not improve from 0.21630
11/11 [==============================] - 0s 27ms/step - loss: 0.2239 - accuracy: 0.9717
Epoch 23/100
 9/11 [=======================>......] - ETA: 0s - loss: 0.2485 - accuracy: 0.9767
Epoch 00023: loss did not improve from 0.21630
11/11 [==============================] - 0s 27ms/step - loss: 0.2646 - accuracy: 0.9746
Epoch 24/100
 9/11 [=======================>......] - ETA: 0s - loss: 0.3333 - accuracy: 0.9600
Epoch 00024: loss did not improve from 0.21630
11/11 [==============================] - 0s 26ms/step - loss: 0.3316 - accuracy: 0.9619
Epoch 25/100
 9/11 [=======================>......] - ETA: 0s - loss: 0.2855 - accuracy: 0.9733
Epoch 00025: loss did not improve from 0.21630
11/11 [==============================] - 0s 27ms/step - loss: 0.2852 - accuracy: 0.9707
Epoch 26/100
 9/11 [=======================>......] - ETA: 0s - loss: 0.2842 - accuracy: 0.9711
Epoch 00026: loss did not improve from 0.21630
11/11 [==============================] - 0s 27ms/step - loss: 0.2791 - accuracy: 0.9736
Epoch 27/100
 9/11 [=======================>......] - ETA: 0s - loss: 0.2078 - accuracy: 0.9911
Epoch 00027: loss improved from 0.21630 to 0.20301, saving model to Two_Stream.h5
11/11 [==============================] - 1s 80ms/step - loss: 0.2030 - accuracy: 0.9893
Epoch 28/100
 9/11 [=======================>......] - ETA: 0s - loss: 0.2561 - accuracy: 0.9733
Epoch 00028: loss did not improve from 0.20301
11/11 [==============================] - 0s 27ms/step - loss: 0.2663 - accuracy: 0.9727
Epoch 29/100
 9/11 [=======================>......] - ETA: 0s - loss: 0.3210 - accuracy: 0.9744
Epoch 00029: loss did not improve from 0.20301
11/11 [==============================] - 0s 26ms/step - loss: 0.3245 - accuracy: 0.9727
Epoch 30/100
 9/11 [=======================>......] - ETA: 0s - loss: 0.3072 - accuracy: 0.9789
Epoch 00030: loss did not improve from 0.20301
11/11 [==============================] - 0s 27ms/step - loss: 0.3074 - accuracy: 0.9785
Epoch 31/100
 9/11 [=======================>......] - ETA: 0s - loss: 0.2852 - accuracy: 0.9711
Epoch 00031: loss did not improve from 0.20301
11/11 [==============================] - 0s 27ms/step - loss: 0.2851 - accuracy: 0.9727
Epoch 32/100
 9/11 [=======================>......] - ETA: 0s - loss: 0.2839 - accuracy: 0.9689
Epoch 00032: loss did not improve from 0.20301
11/11 [==============================] - 0s 28ms/step - loss: 0.2994 - accuracy: 0.9658
Epoch 33/100
 9/11 [=======================>......] - ETA: 0s - loss: 0.4101 - accuracy: 0.9667
Epoch 00033: loss did not improve from 0.20301
11/11 [==============================] - 0s 27ms/step - loss: 0.4086 - accuracy: 0.9668
Epoch 34/100
 9/11 [=======================>......] - ETA: 0s - loss: 0.2736 - accuracy: 0.9789
Epoch 00034: loss did not improve from 0.20301
11/11 [==============================] - 0s 28ms/step - loss: 0.2780 - accuracy: 0.9785
Epoch 35/100
 9/11 [=======================>......] - ETA: 0s - loss: 0.2972 - accuracy: 0.9678
Epoch 00035: loss did not improve from 0.20301
11/11 [==============================] - 0s 26ms/step - loss: 0.3017 - accuracy: 0.9688
Epoch 36/100
 9/11 [=======================>......] - ETA: 0s - loss: 0.3201 - accuracy: 0.9600
Epoch 00036: loss did not improve from 0.20301
11/11 [==============================] - 0s 26ms/step - loss: 0.3149 - accuracy: 0.9619
Epoch 37/100
 9/11 [=======================>......] - ETA: 0s - loss: 0.3677 - accuracy: 0.9600
Epoch 00037: loss did not improve from 0.20301
11/11 [==============================] - 0s 27ms/step - loss: 0.3711 - accuracy: 0.9639
Epoch 38/100
 9/11 [=======================>......] - ETA: 0s - loss: 0.3504 - accuracy: 0.9811
Epoch 00038: loss did not improve from 0.20301
11/11 [==============================] - 0s 27ms/step - loss: 0.3486 - accuracy: 0.9775
Epoch 39/100
 9/11 [=======================>......] - ETA: 0s - loss: 0.2901 - accuracy: 0.9711
Epoch 00039: loss did not improve from 0.20301
11/11 [==============================] - 0s 27ms/step - loss: 0.2994 - accuracy: 0.9697
Epoch 40/100
 9/11 [=======================>......] - ETA: 0s - loss: 0.2268 - accuracy: 0.9867
Epoch 00040: loss did not improve from 0.20301
11/11 [==============================] - 0s 27ms/step - loss: 0.2296 - accuracy: 0.9854
Epoch 41/100
 9/11 [=======================>......] - ETA: 0s - loss: 0.2845 - accuracy: 0.9822
Epoch 00041: loss did not improve from 0.20301
11/11 [==============================] - 0s 27ms/step - loss: 0.3040 - accuracy: 0.9785
Epoch 42/100
 9/11 [=======================>......] - ETA: 0s - loss: 0.3431 - accuracy: 0.9844
Epoch 00042: loss did not improve from 0.20301
11/11 [==============================] - 0s 26ms/step - loss: 0.3339 - accuracy: 0.9854
Epoch 43/100
 9/11 [=======================>......] - ETA: 0s - loss: 0.2091 - accuracy: 0.9844
Epoch 00043: loss did not improve from 0.20301
11/11 [==============================] - 0s 27ms/step - loss: 0.2054 - accuracy: 0.9854
Epoch 44/100
 9/11 [=======================>......] - ETA: 0s - loss: 0.1775 - accuracy: 0.9833
Epoch 00044: loss improved from 0.20301 to 0.19227, saving model to Two_Stream.h5
11/11 [==============================] - 1s 88ms/step - loss: 0.1923 - accuracy: 0.9814
Epoch 45/100
 9/11 [=======================>......] - ETA: 0s - loss: 0.2461 - accuracy: 0.9689
Epoch 00045: loss did not improve from 0.19227
11/11 [==============================] - 0s 27ms/step - loss: 0.2467 - accuracy: 0.9727
Epoch 46/100
 9/11 [=======================>......] - ETA: 0s - loss: 0.1851 - accuracy: 0.9967
Epoch 00046: loss improved from 0.19227 to 0.17395, saving model to Two_Stream.h5
11/11 [==============================] - 1s 79ms/step - loss: 0.1740 - accuracy: 0.9971
Epoch 47/100
 9/11 [=======================>......] - ETA: 0s - loss: 0.0740 - accuracy: 0.9978
Epoch 00047: loss improved from 0.17395 to 0.07853, saving model to Two_Stream.h5
11/11 [==============================] - 1s 87ms/step - loss: 0.0785 - accuracy: 0.9961
Epoch 48/100
 9/11 [=======================>......] - ETA: 0s - loss: 0.1373 - accuracy: 0.9911
Epoch 00048: loss did not improve from 0.07853
11/11 [==============================] - 0s 26ms/step - loss: 0.1395 - accuracy: 0.9922
Epoch 49/100
 9/11 [=======================>......] - ETA: 0s - loss: 0.1230 - accuracy: 0.9911
Epoch 00049: loss did not improve from 0.07853
11/11 [==============================] - 0s 27ms/step - loss: 0.1302 - accuracy: 0.9883
Epoch 50/100
 9/11 [=======================>......] - ETA: 0s - loss: 0.7161 - accuracy: 0.9600
Epoch 00050: loss did not improve from 0.07853
11/11 [==============================] - 0s 27ms/step - loss: 0.7761 - accuracy: 0.9609
Epoch 51/100
 9/11 [=======================>......] - ETA: 0s - loss: 0.8508 - accuracy: 0.9889
Epoch 00051: loss did not improve from 0.07853
11/11 [==============================] - 0s 27ms/step - loss: 0.8014 - accuracy: 0.9893
Epoch 52/100
 9/11 [=======================>......] - ETA: 0s - loss: 0.3282 - accuracy: 0.9789
Epoch 00052: loss did not improve from 0.07853
11/11 [==============================] - 0s 27ms/step - loss: 0.3232 - accuracy: 0.9795
Epoch 53/100
 9/11 [=======================>......] - ETA: 0s - loss: 0.3244 - accuracy: 0.9811
Epoch 00053: loss did not improve from 0.07853
11/11 [==============================] - 0s 27ms/step - loss: 0.3232 - accuracy: 0.9805
Epoch 54/100
 9/11 [=======================>......] - ETA: 0s - loss: 0.2586 - accuracy: 0.9811
Epoch 00054: loss did not improve from 0.07853
11/11 [==============================] - 0s 27ms/step - loss: 0.2540 - accuracy: 0.9814
Epoch 55/100
 9/11 [=======================>......] - ETA: 0s - loss: 0.1867 - accuracy: 0.9889
Epoch 00055: loss did not improve from 0.07853
11/11 [==============================] - 0s 26ms/step - loss: 0.1850 - accuracy: 0.9893
Epoch 56/100
 9/11 [=======================>......] - ETA: 0s - loss: 0.1220 - accuracy: 0.9878
Epoch 00056: loss did not improve from 0.07853
11/11 [==============================] - 0s 27ms/step - loss: 0.1248 - accuracy: 0.9863
Epoch 57/100
 9/11 [=======================>......] - ETA: 0s - loss: 0.2068 - accuracy: 0.9833
Epoch 00057: loss did not improve from 0.07853
11/11 [==============================] - 0s 26ms/step - loss: 0.2066 - accuracy: 0.9834
Epoch 58/100
 9/11 [=======================>......] - ETA: 0s - loss: 0.1895 - accuracy: 0.9911
Epoch 00058: loss did not improve from 0.07853
11/11 [==============================] - 0s 27ms/step - loss: 0.1911 - accuracy: 0.9893
Epoch 59/100
 9/11 [=======================>......] - ETA: 0s - loss: 0.4751 - accuracy: 0.9478
Epoch 00059: loss did not improve from 0.07853
11/11 [==============================] - 0s 27ms/step - loss: 0.4757 - accuracy: 0.9473
Epoch 60/100
 9/11 [=======================>......] - ETA: 0s - loss: 0.3297 - accuracy: 0.9833
Epoch 00060: loss did not improve from 0.07853
11/11 [==============================] - 0s 27ms/step - loss: 0.3206 - accuracy: 0.9824
Epoch 61/100
 9/11 [=======================>......] - ETA: 0s - loss: 0.1858 - accuracy: 0.9867
Epoch 00061: loss did not improve from 0.07853
11/11 [==============================] - 0s 25ms/step - loss: 0.1821 - accuracy: 0.9883
Epoch 62/100
 9/11 [=======================>......] - ETA: 0s - loss: 0.1253 - accuracy: 0.9978
Epoch 00062: loss did not improve from 0.07853
11/11 [==============================] - 0s 27ms/step - loss: 0.1233 - accuracy: 0.9961
Epoch 63/100
 9/11 [=======================>......] - ETA: 0s - loss: 0.0734 - accuracy: 0.9978
Epoch 00063: loss improved from 0.07853 to 0.07234, saving model to Two_Stream.h5
11/11 [==============================] - 1s 77ms/step - loss: 0.0723 - accuracy: 0.9980
Epoch 64/100
 9/11 [=======================>......] - ETA: 0s - loss: 0.0586 - accuracy: 0.9967
Epoch 00064: loss improved from 0.07234 to 0.05796, saving model to Two_Stream.h5
11/11 [==============================] - 1s 81ms/step - loss: 0.0580 - accuracy: 0.9971
Epoch 65/100
 9/11 [=======================>......] - ETA: 0s - loss: 0.1316 - accuracy: 0.9889
Epoch 00065: loss did not improve from 0.05796
11/11 [==============================] - 0s 27ms/step - loss: 0.1349 - accuracy: 0.9902
Epoch 66/100
 9/11 [=======================>......] - ETA: 0s - loss: 0.1471 - accuracy: 0.9878
Epoch 00066: loss did not improve from 0.05796
11/11 [==============================] - 0s 27ms/step - loss: 0.1464 - accuracy: 0.9873
Epoch 67/100
 9/11 [=======================>......] - ETA: 0s - loss: 0.1267 - accuracy: 0.9944
Epoch 00067: loss did not improve from 0.05796
11/11 [==============================] - 0s 27ms/step - loss: 0.1322 - accuracy: 0.9932
Epoch 68/100
 9/11 [=======================>......] - ETA: 0s - loss: 0.2525 - accuracy: 0.9867
Epoch 00068: loss did not improve from 0.05796
11/11 [==============================] - 0s 27ms/step - loss: 0.2615 - accuracy: 0.9873
Epoch 69/100
 9/11 [=======================>......] - ETA: 0s - loss: 0.2953 - accuracy: 0.9878
Epoch 00069: loss did not improve from 0.05796
11/11 [==============================] - 0s 27ms/step - loss: 0.3040 - accuracy: 0.9834
Epoch 70/100
 9/11 [=======================>......] - ETA: 0s - loss: 0.4420 - accuracy: 0.9700
Epoch 00070: loss did not improve from 0.05796
11/11 [==============================] - 0s 26ms/step - loss: 0.4551 - accuracy: 0.9707
Epoch 71/100
 9/11 [=======================>......] - ETA: 0s - loss: 0.5223 - accuracy: 0.9789
Epoch 00071: loss did not improve from 0.05796
11/11 [==============================] - 0s 27ms/step - loss: 0.5245 - accuracy: 0.9717
Epoch 72/100
 9/11 [=======================>......] - ETA: 0s - loss: 0.4017 - accuracy: 0.9678
Epoch 00072: loss did not improve from 0.05796
11/11 [==============================] - 0s 27ms/step - loss: 0.4012 - accuracy: 0.9668
Epoch 73/100
 9/11 [=======================>......] - ETA: 0s - loss: 0.2732 - accuracy: 0.9844
Epoch 00073: loss did not improve from 0.05796
11/11 [==============================] - 0s 27ms/step - loss: 0.2647 - accuracy: 0.9844
Epoch 74/100
 9/11 [=======================>......] - ETA: 0s - loss: 0.1782 - accuracy: 0.9878
Epoch 00074: loss did not improve from 0.05796
11/11 [==============================] - 0s 26ms/step - loss: 0.1739 - accuracy: 0.9883
Epoch 75/100
 9/11 [=======================>......] - ETA: 0s - loss: 0.1069 - accuracy: 0.9967
Epoch 00075: loss did not improve from 0.05796
11/11 [==============================] - 0s 27ms/step - loss: 0.1062 - accuracy: 0.9961
Epoch 76/100
 9/11 [=======================>......] - ETA: 0s - loss: 0.1370 - accuracy: 0.9900
Epoch 00076: loss did not improve from 0.05796
11/11 [==============================] - 0s 27ms/step - loss: 0.1477 - accuracy: 0.9873
Epoch 77/100
 9/11 [=======================>......] - ETA: 0s - loss: 0.7560 - accuracy: 0.9433
Epoch 00077: loss did not improve from 0.05796
11/11 [==============================] - 0s 26ms/step - loss: 0.8010 - accuracy: 0.9482
Epoch 78/100
 9/11 [=======================>......] - ETA: 0s - loss: 0.8383 - accuracy: 0.9889
Epoch 00078: loss did not improve from 0.05796
11/11 [==============================] - 0s 27ms/step - loss: 0.8139 - accuracy: 0.9834
Epoch 79/100
 9/11 [=======================>......] - ETA: 0s - loss: 0.3511 - accuracy: 0.9756
Epoch 00079: loss did not improve from 0.05796
11/11 [==============================] - 0s 27ms/step - loss: 0.3408 - accuracy: 0.9775
Epoch 80/100
 9/11 [=======================>......] - ETA: 0s - loss: 0.2735 - accuracy: 0.9844
Epoch 00080: loss did not improve from 0.05796
11/11 [==============================] - 0s 27ms/step - loss: 0.2688 - accuracy: 0.9854
Epoch 81/100
 9/11 [=======================>......] - ETA: 0s - loss: 0.2034 - accuracy: 0.9900
Epoch 00081: loss did not improve from 0.05796
11/11 [==============================] - 0s 27ms/step - loss: 0.1988 - accuracy: 0.9893
Epoch 82/100
 9/11 [=======================>......] - ETA: 0s - loss: 0.2267 - accuracy: 0.9856
Epoch 00082: loss did not improve from 0.05796
11/11 [==============================] - 0s 27ms/step - loss: 0.2272 - accuracy: 0.9854
Epoch 83/100
 9/11 [=======================>......] - ETA: 0s - loss: 0.2767 - accuracy: 0.9833
Epoch 00083: loss did not improve from 0.05796
11/11 [==============================] - 0s 26ms/step - loss: 0.2742 - accuracy: 0.9844
Epoch 84/100
 9/11 [=======================>......] - ETA: 0s - loss: 0.1630 - accuracy: 0.9944
Epoch 00084: loss did not improve from 0.05796
11/11 [==============================] - 0s 27ms/step - loss: 0.1562 - accuracy: 0.9951
Epoch 85/100
 9/11 [=======================>......] - ETA: 0s - loss: 0.1351 - accuracy: 0.98 - ETA: 0s - loss: 0.1311 - accuracy: 0.9856
Epoch 00085: loss did not improve from 0.05796
11/11 [==============================] - 0s 27ms/step - loss: 0.1295 - accuracy: 0.9863
Epoch 86/100
 9/11 [=======================>......] - ETA: 0s - loss: 0.0917 - accuracy: 0.9967
Epoch 00086: loss did not improve from 0.05796
11/11 [==============================] - 0s 27ms/step - loss: 0.0917 - accuracy: 0.9961
Epoch 87/100
 9/11 [=======================>......] - ETA: 0s - loss: 0.2148 - accuracy: 0.9811
Epoch 00087: loss did not improve from 0.05796
11/11 [==============================] - 0s 27ms/step - loss: 0.2314 - accuracy: 0.9814
Epoch 88/100
 9/11 [=======================>......] - ETA: 0s - loss: 0.5057 - accuracy: 0.9633
Epoch 00088: loss did not improve from 0.05796
11/11 [==============================] - 0s 27ms/step - loss: 0.5180 - accuracy: 0.9639
Epoch 89/100
 9/11 [=======================>......] - ETA: 0s - loss: 0.8258 - accuracy: 0.9622
Epoch 00089: loss did not improve from 0.05796
11/11 [==============================] - 0s 26ms/step - loss: 0.8413 - accuracy: 0.9600
Epoch 90/100
 9/11 [=======================>......] - ETA: 0s - loss: 0.6590 - accuracy: 0.9811
Epoch 00090: loss did not improve from 0.05796
11/11 [==============================] - 0s 27ms/step - loss: 0.6306 - accuracy: 0.9805
Epoch 91/100
 9/11 [=======================>......] - ETA: 0s - loss: 0.2685 - accuracy: 0.9811
Epoch 00091: loss did not improve from 0.05796
11/11 [==============================] - 0s 27ms/step - loss: 0.2620 - accuracy: 0.9805
Epoch 92/100
 9/11 [=======================>......] - ETA: 0s - loss: 0.2093 - accuracy: 0.9833
Epoch 00092: loss did not improve from 0.05796
11/11 [==============================] - 0s 27ms/step - loss: 0.2158 - accuracy: 0.9834
Epoch 93/100
 9/11 [=======================>......] - ETA: 0s - loss: 0.2980 - accuracy: 0.9856
Epoch 00093: loss did not improve from 0.05796
11/11 [==============================] - 0s 27ms/step - loss: 0.2944 - accuracy: 0.9863
Epoch 94/100
 9/11 [=======================>......] - ETA: 0s - loss: 0.2624 - accuracy: 0.9744
Epoch 00094: loss did not improve from 0.05796
11/11 [==============================] - 0s 27ms/step - loss: 0.2557 - accuracy: 0.9756
Epoch 95/100
 9/11 [=======================>......] - ETA: 0s - loss: 0.2328 - accuracy: 0.9833
Epoch 00095: loss did not improve from 0.05796
11/11 [==============================] - 0s 26ms/step - loss: 0.2276 - accuracy: 0.9834
Epoch 96/100
 9/11 [=======================>......] - ETA: 0s - loss: 0.1896 - accuracy: 0.9867
Epoch 00096: loss did not improve from 0.05796
11/11 [==============================] - 0s 26ms/step - loss: 0.1934 - accuracy: 0.9844
Epoch 97/100
 9/11 [=======================>......] - ETA: 0s - loss: 0.2946 - accuracy: 0.9800
Epoch 00097: loss did not improve from 0.05796
11/11 [==============================] - 0s 27ms/step - loss: 0.2996 - accuracy: 0.9824
Epoch 98/100
 9/11 [=======================>......] - ETA: 0s - loss: 0.2742 - accuracy: 0.9956
Epoch 00098: loss did not improve from 0.05796
11/11 [==============================] - 0s 27ms/step - loss: 0.2615 - accuracy: 0.9961
Epoch 99/100
 9/11 [=======================>......] - ETA: 0s - loss: 0.1203 - accuracy: 0.9944 ETA: 0s - loss: 0.1213 - accuracy: 0.99
Epoch 00099: loss did not improve from 0.05796
11/11 [==============================] - 0s 27ms/step - loss: 0.1226 - accuracy: 0.9941
Epoch 100/100
 9/11 [=======================>......] - ETA: 0s - loss: 0.2027 - accuracy: 0.97 - ETA: 0s - loss: 0.2196 - accuracy: 0.9711
Epoch 00100: loss did not improve from 0.05796
11/11 [==============================] - 0s 27ms/step - loss: 0.2270 - accuracy: 0.9746
           
Y_pred_test = model.predict([X_Local_test,X_Global_test])
y_pred_test = np.argmax(Y_pred_test, axis=1)
           
classification = classification_report(np.argmax(Y_Local_test, axis=1), y_pred_test)
print(classification)
           
precision    recall  f1-score   support

           0       1.00      0.07      0.14        41
           1       0.97      0.98      0.98      1285
           2       0.98      0.93      0.95       747
           3       1.00      0.87      0.93       213
           4       0.99      0.97      0.98       435
           5       0.99      1.00      0.99       657
           6       0.67      0.56      0.61        25
           7       0.93      1.00      0.96       430
           8       1.00      0.33      0.50        18
           9       0.99      0.97      0.98       875
          10       0.99      0.99      0.99      2210
          11       0.84      1.00      0.91       534
          12       1.00      0.98      0.99       185
          13       0.98      1.00      0.99      1139
          14       0.99      0.95      0.97       347
          15       0.93      0.92      0.92        84

    accuracy                           0.97      9225
   macro avg       0.95      0.84      0.86      9225
weighted avg       0.97      0.97      0.97      9225
           
def AA_andEachClassAccuracy(confusion_matrix):
    counter = confusion_matrix.shape[0]    #
    list_diag = np.diag(confusion_matrix)                        #擷取confusion_matrix的主對角線所有數值
    list_raw_sum = np.sum(confusion_matrix, axis=1)              #将主對角線所有數求和
    each_acc = np.nan_to_num(truediv(list_diag, list_raw_sum))   #list_diag/list_raw_sum  對角線各個數字/對角線所有數字的總和
    average_acc = np.mean(each_acc)                              
    
    return each_acc, average_acc
           
def reports (X_test,y_test):
    #start = time.time()
    Y_pred = model.predict(X_test)
    y_pred = np.argmax(Y_pred, axis=1)
    #end = time.time()
    #print(end - start)
    target_names = ['Alfalfa', 'Corn-notill', 'Corn-mintill', 'Corn',
                    'Grass-pasture', 'Grass-trees', 'Grass-pasture-mowed', 
                    'Hay-windrowed', 'Oats', 'Soybean-notill', 'Soybean-mintill',
                    'Soybean-clean', 'Wheat', 'Woods', 'Buildings-Grass-Trees-Drives',
                    'Stone-Steel-Towers']
    
    classification = classification_report(np.argmax(y_test, axis=1), y_pred, target_names=target_names)
    oa = accuracy_score(np.argmax(y_test, axis=1), y_pred)                          #計算OA
    confusion = confusion_matrix(np.argmax(y_test, axis=1), y_pred)                 #計算confusion
    each_acc, aa = AA_andEachClassAccuracy(confusion)                               #計算each_acc和aa 
    kappa = cohen_kappa_score(np.argmax(y_test, axis=1), y_pred)                    #計算kappa
    score = model.evaluate(X_test, y_test, batch_size=40)
    Test_Loss =  score[0]*100
    Test_accuracy = score[1]*100
    
    return classification, confusion, Test_Loss, Test_accuracy, oa*100, each_acc*100, aa*100, kappa*100
           
classification, confusion, Test_loss, Test_accuracy, oa, each_acc, aa, kappa = reports([X_Local_test,X_Global_test],Y_Local_test)
classification = str(classification)
confusion = str(confusion)
file_name = "classification_Two_Streams_report.txt"

with open(file_name, 'w') as x_file:
    x_file.write('{} Test loss (%)'.format(Test_loss))
    x_file.write('\n')
    x_file.write('{} Test accuracy (%)'.format(Test_accuracy))
    x_file.write('\n')
    x_file.write('\n')
    x_file.write('{} Kappa accuracy (%)'.format(kappa))
    x_file.write('\n')
    x_file.write('{} Overall accuracy (%)'.format(oa))
    x_file.write('\n')
    x_file.write('{} Average accuracy (%)'.format(aa))
    x_file.write('\n')
    x_file.write('\n')
    x_file.write('{}'.format(classification))
    x_file.write('\n')
    x_file.write('{}'.format(confusion))

           
231/231 [==============================] - 1s 6ms/step - loss: 0.4064 - accuracy: 0.9713
           

40.64306616783142 Test loss (%)

97.12737202644348 Test accuracy (%)

96.7228512430845 Kappa accuracy (%)

97.12737127371274 Overall accuracy (%)

84.4225434989882 Average accuracy (%)

precision    recall  f1-score   support

                 Alfalfa       1.00      0.07      0.14        41
             Corn-notill       0.97      0.98      0.98      1285
            Corn-mintill       0.98      0.93      0.95       747
                    Corn       1.00      0.87      0.93       213
           Grass-pasture       0.99      0.97      0.98       435
             Grass-trees       0.99      1.00      0.99       657
     Grass-pasture-mowed       0.67      0.56      0.61        25
           Hay-windrowed       0.93      1.00      0.96       430
                    Oats       1.00      0.33      0.50        18
          Soybean-notill       0.99      0.97      0.98       875
         Soybean-mintill       0.99      0.99      0.99      2210
           Soybean-clean       0.84      1.00      0.91       534
                   Wheat       1.00      0.98      0.99       185
                   Woods       0.98      1.00      0.99      1139
 Buildings-Grass-Trees-Drives       0.99      0.95      0.97       347
      Stone-Steel-Towers       0.93      0.92      0.92        84

                accuracy                           0.97      9225
               macro avg       0.95      0.84      0.86      9225
            weighted avg       0.97      0.97      0.97      9225
           

[[ 3 1 0 0 0 0 5 31 0 0 0 0 0 1

0 0]

[ 0 1258 0 0 0 0 0 0 0 0 9 18 0 0

0 0]

[ 0 7 695 0 0 0 0 0 0 0 0 45 0 0

0 0]

[ 0 0 0 186 0 0 0 0 0 0 0 22 0 0

0 5]

[ 0 0 7 0 420 0 0 0 0 7 0 0 0 0

1 0]

[ 0 0 0 0 0 654 0 3 0 0 0 0 0 0

0 0]

[ 0 0 0 0 3 0 14 0 0 0 0 0 0 8

0 0]

[ 0 0 0 0 0 0 0 430 0 0 0 0 0 0

0 0]

[ 0 0 6 0 0 0 0 0 6 0 0 6 0 0

0 0]

[ 0 15 0 0 0 0 2 0 0 846 6 4 0 2

0 0]

[ 0 10 0 0 0 5 0 0 0 0 2194 0 0 0

1 0]

繼續閱讀