# -*- coding: utf-8 -*-
"""unet_eff_final.ipynb

Automatically generated by Colaboratory.

Original file is located at
    https://colab.research.google.com/drive/1qdwBOU4hJn0LA2neC9Rzo1wA5Tec5h5N
"""

!pip uninstall tensorflow
!pip install tensorflow==2.9

# Commented out IPython magic to ensure Python compatibility.
import os
import cv2
import glob
import PIL
import shutil
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from skimage import data
from skimage.util import montage
import skimage.transform as skTrans
from skimage.transform import rotate
from skimage.transform import resize
from PIL import Image, ImageOps

# neural imaging

import nibabel as nib

import albumentations as A

# ml libs
import keras
import keras.backend as K
from keras.callbacks import CSVLogger
import tensorflow as tf
from tensorflow.keras.utils import plot_model
from sklearn.preprocessing import MinMaxScaler
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report
from tensorflow.keras.models import *
from tensorflow.keras.layers import *
from tensorflow.keras.optimizers import *
from tensorflow.keras.callbacks import ModelCheckpoint, ReduceLROnPlateau, EarlyStopping, TensorBoard
from tensorflow.keras.layers.experimental import preprocessing

# %matplotlib inline
VOLUME_SLICES = 128 #裁減多於黑邊
VOLUME_START_AT = 13 #裁減多於黑邊

IMG_SIZE=128 #圖片大小

TRAIN_DATASET_PATH = '/content/MyDrive/BraTS/'#資料集路徑

"""**移除瑕疵圖**"""

def youn(path):#path為資料集的資料夾名稱
  type_img=["flair","t1ce","t1","t2"]
  type_Area=list()
  for i in range(4):
        image = nib.load(TRAIN_DATASET_PATH+path+"/"+path+"_"+type_img[i]+".nii").get_fdata()
        image=image[:,:,106]

        # 正規化圖像
        image = image / np.max(image)  # 正規化圖像至 0-1 範圍

        # 二值化圖像
        _, threshold = cv2.threshold(image, 0, 1, cv2.THRESH_BINARY)

        # 查找輪廓
        contours, _ = cv2.findContours(threshold.astype(np.uint8), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
        for contour in contours:
          # 獲取矩形範圍
          if len(contours)>1:
              ellipse= cv2.contourArea(contours[-1])
          else:
              ellipse= cv2.contourArea(contour)

        type_Area.append(ellipse)
  return type_Area

da=os.listdir(TRAIN_DATASET_PATH)#資料集路徑
image_len=list()
for i in range(len(da)):
    image_len.append(youn(da[i]))#path為資料集的資料夾名稱
image_len=np.array(image_len)
'''Dixon考驗法'''
asc=list()
for i in range(len(da)):
    asd=image_len[i,:]
    asd.sort()
    asd[3]=asd[3]+1
    if (asd[1]-asd[0])/(asd[3]-asd[0])>0.964:
        asc.append("True")
    else:
        asc.append("False")

train_ids=['BraTS19_CBICA_BAX_1', 'BraTS19_TCIA02_300_1', 'BraTS19_CBICA_BNR_1', 'BraTS19_CBICA_AYI_1', 'BraTS19_CBICA_ALX_1', 'BraTS19_CBICA_AQO_1', 'BraTS19_TCIA06_247_1', 'BraTS19_CBICA_ANI_1', 'BraTS19_CBICA_ABO_1', 'BraTS19_TCIA04_149_1', 'BraTS19_CBICA_BAP_1', 'BraTS19_2013_2_1', 'BraTS19_CBICA_ATD_1', 'BraTS19_CBICA_BHK_1', 'BraTS19_2013_3_1', 'BraTS19_CBICA_ANG_1', 'BraTS19_CBICA_ABY_1', 'BraTS19_CBICA_BHZ_1', 'BraTS19_TCIA02_322_1', 'BraTS19_CBICA_AQR_1', 'BraTS19_CBICA_BHM_1', 'BraTS19_CBICA_AQD_1', 'BraTS19_TCIA01_429_1', 'BraTS19_TCIA02_118_1', 'BraTS19_TCIA08_105_1', 'BraTS19_CBICA_BGO_1', 'BraTS19_CBICA_AYA_1', 'BraTS19_CBICA_BGX_1', 'BraTS19_CBICA_APZ_1', 'BraTS19_CBICA_ABM_1', 'BraTS19_CBICA_AOS_1', 'BraTS19_TCIA06_372_1', 'BraTS19_CBICA_ASN_1', 'BraTS19_CBICA_AYG_1', 'BraTS19_CBICA_ASO_1', 'BraTS19_TCIA02_368_1', 'BraTS19_CBICA_AZD_1', 'BraTS19_CBICA_AUQ_1', 'BraTS19_2013_5_1', 'BraTS19_TCIA02_290_1', 'BraTS19_CBICA_AVF_1', 'BraTS19_TCIA01_412_1', 'BraTS19_TCIA06_332_1', 'BraTS19_CBICA_AVJ_1', 'BraTS19_2013_11_1', 'BraTS19_CBICA_BGN_1', 'BraTS19_CBICA_AUW_1', 'BraTS19_CBICA_AOP_1', 'BraTS19_CBICA_ANV_1', 'BraTS19_TCIA03_121_1', 'BraTS19_TCIA02_151_1', 'BraTS19_TCIA08_234_1', 'BraTS19_CBICA_BKV_1', 'BraTS19_TCIA08_280_1', 'BraTS19_TCIA05_396_1', 'BraTS19_CBICA_ABE_1', 'BraTS19_CBICA_ATF_1', 'BraTS19_CBICA_BJY_1', 'BraTS19_TCIA02_321_1', 'BraTS19_TCIA02_471_1', 'BraTS19_CBICA_AQY_1', 'BraTS19_2013_14_1', 'BraTS19_CBICA_ANZ_1', 'BraTS19_TCIA02_208_1', 'BraTS19_CBICA_AVB_1', 'BraTS19_TCIA02_374_1', 'BraTS19_TMC_06290_1', 'BraTS19_CBICA_AQU_1', 'BraTS19_TCIA02_608_1', 'BraTS19_CBICA_APK_1', 'BraTS19_CBICA_ASF_1', 'BraTS19_CBICA_BBG_1', 'BraTS19_TCIA08_406_1', 'BraTS19_TCIA06_184_1', 'BraTS19_CBICA_AQJ_1', 'BraTS19_CBICA_AUR_1', 'BraTS19_CBICA_AYC_1', 'BraTS19_2013_19_1', 'BraTS19_CBICA_AWH_1', 'BraTS19_CBICA_AAP_1', 'BraTS19_CBICA_BGW_1', 'BraTS19_TCIA06_211_1', 'BraTS19_TCIA01_378_1', 'BraTS19_CBICA_AOO_1', 'BraTS19_TCIA08_242_1', 'BraTS19_TCIA03_257_1', 'BraTS19_TCIA02_283_1', 'BraTS19_CBICA_AXO_1', 'BraTS19_CBICA_BLJ_1', 'BraTS19_TCIA02_377_1', 'BraTS19_TCIA03_338_1', 'BraTS19_TCIA01_235_1', 'BraTS19_TCIA08_218_1', 'BraTS19_CBICA_AWV_1', 'BraTS19_CBICA_ARW_1', 'BraTS19_CBICA_AWG_1', 'BraTS19_CBICA_AOC_1', 'BraTS19_2013_23_1', 'BraTS19_TCIA01_460_1', 'BraTS19_TCIA01_190_1', 'BraTS19_2013_18_1', 'BraTS19_CBICA_AQZ_1', 'BraTS19_CBICA_BEM_1', 'BraTS19_CBICA_AQN_1', 'BraTS19_CBICA_BAN_1', 'BraTS19_TCIA01_390_1', 'BraTS19_TCIA01_147_1', 'BraTS19_TCIA02_222_1', 'BraTS19_CBICA_ASG_1', 'BraTS19_CBICA_AAB_1', 'BraTS19_CBICA_AUX_1', 'BraTS19_CBICA_AXN_1', 'BraTS19_CBICA_AVV_1', 'BraTS19_CBICA_AZH_1', 'BraTS19_TCIA08_469_1', 'BraTS19_TCIA04_437_1', 'BraTS19_2013_22_1', 'BraTS19_TCIA01_411_1', 'BraTS19_CBICA_AQT_1', 'BraTS19_CBICA_AXW_1', 'BraTS19_TCIA04_479_1', 'BraTS19_2013_4_1', 'BraTS19_CBICA_AXM_1', 'BraTS19_TMC_15477_1', 'BraTS19_CBICA_ARZ_1', 'BraTS19_TMC_11964_1', 'BraTS19_TCIA02_430_1', 'BraTS19_TCIA02_135_1', 'BraTS19_TCIA08_167_1', 'BraTS19_CBICA_AWI_1', 'BraTS19_TMC_21360_1', 'BraTS19_CBICA_AXJ_1', 'BraTS19_TCIA03_375_1', 'BraTS19_CBICA_ASK_1', 'BraTS19_TCIA01_150_1', 'BraTS19_CBICA_BGR_1', 'BraTS19_2013_17_1', 'BraTS19_CBICA_ANP_1', 'BraTS19_TCIA01_231_1', 'BraTS19_TCIA02_331_1', 'BraTS19_TMC_06643_1', 'BraTS19_TCIA02_606_1', 'BraTS19_CBICA_AQP_1', 'BraTS19_TCIA03_474_1', 'BraTS19_TMC_27374_1', 'BraTS19_2013_27_1', 'BraTS19_2013_10_1', 'BraTS19_CBICA_BFB_1', 'BraTS19_CBICA_APR_1', 'BraTS19_CBICA_AQV_1', 'BraTS19_TCIA01_180_1', 'BraTS19_CBICA_AUA_1', 'BraTS19_TCIA08_113_1', 'BraTS19_CBICA_ATB_1', 'BraTS19_CBICA_AUN_1', 'BraTS19_TCIA08_205_1', 'BraTS19_CBICA_ASE_1', 'BraTS19_TCIA01_425_1', 'BraTS19_CBICA_AAL_1', 'BraTS19_CBICA_ARF_1', 'BraTS19_TCIA03_265_1', 'BraTS19_CBICA_AQQ_1', 'BraTS19_TCIA02_370_1', 'BraTS19_TCIA08_319_1', 'BraTS19_CBICA_AMH_1', 'BraTS19_TCIA02_179_1', 'BraTS19_TCIA06_165_1', 'BraTS19_CBICA_BCF_1', 'BraTS19_CBICA_AQA_1', 'BraTS19_2013_12_1', 'BraTS19_TCIA04_328_1', 'BraTS19_2013_26_1', 'BraTS19_TCIA02_607_1', 'BraTS19_CBICA_BHQ_1', 'BraTS19_TCIA03_498_1', 'BraTS19_TCIA08_162_1', 'BraTS19_CBICA_AYW_1', 'BraTS19_CBICA_BGG_1', 'BraTS19_TCIA01_221_1', 'BraTS19_TCIA02_226_1', 'BraTS19_CBICA_AOD_1', 'BraTS19_TCIA10_640_1', 'BraTS19_TCIA10_639_1', 'BraTS19_TCIA10_261_1', 'BraTS19_TCIA10_387_1', 'BraTS19_TCIA12_480_1', 'BraTS19_TCIA13_653_1', 'BraTS19_TCIA13_633_1', 'BraTS19_TCIA12_249_1', 'BraTS19_TCIA12_470_1', 'BraTS19_TCIA10_420_1', 'BraTS19_2013_29_1', 'BraTS19_TCIA10_625_1', 'BraTS19_TCIA10_346_1', 'BraTS19_TCIA09_402_1', 'BraTS19_TCIA09_177_1', 'BraTS19_TCIA09_141_1', 'BraTS19_TCIA10_276_1', 'BraTS19_TCIA10_103_1', 'BraTS19_TCIA13_623_1', 'BraTS19_TCIA10_442_1', 'BraTS19_TCIA10_410_1', 'BraTS19_2013_16_1', 'BraTS19_TCIA10_628_1', 'BraTS19_TCIA12_298_1', 'BraTS19_2013_15_1', 'BraTS19_2013_28_1', 'BraTS19_TCIA10_241_1', 'BraTS19_TCIA10_130_1', 'BraTS19_TCIA13_618_1', 'BraTS19_TCIA13_650_1', 'BraTS19_TCIA10_490_1', 'BraTS19_TCIA13_654_1', 'BraTS19_TCIA10_449_1', 'BraTS19_TCIA13_615_1', 'BraTS19_TCIA10_413_1', 'BraTS19_TCIA10_282_1', 'BraTS19_2013_24_1', 'BraTS19_TCIA09_620_1', 'BraTS19_TCIA10_152_1', 'BraTS19_TCIA10_299_1', 'BraTS19_TCIA13_630_1', 'BraTS19_TCIA10_393_1', 'BraTS19_TCIA09_254_1', 'BraTS19_TCIA13_634_1', 'BraTS19_TCIA10_310_1', 'BraTS19_TCIA09_312_1', 'BraTS19_TMC_09043_1', 'BraTS19_TCIA10_109_1', 'BraTS19_TCIA13_621_1', 'BraTS19_TCIA10_632_1', 'BraTS19_TCIA10_266_1', 'BraTS19_TCIA12_101_1']
test_ids=['BraTS19_TCIA02_605_1', 'BraTS19_TCIA03_419_1', 'BraTS19_TCIA13_642_1', 'BraTS19_TCIA10_637_1', 'BraTS19_TCIA09_428_1', 'BraTS19_2013_1_1', 'BraTS19_CBICA_APY_1', 'BraTS19_TCIA03_296_1', 'BraTS19_CBICA_BDK_1', 'BraTS19_CBICA_BGE_1', 'BraTS19_2013_21_1', 'BraTS19_TCIA02_198_1', 'BraTS19_TCIA13_645_1', 'BraTS19_TCIA02_274_1', 'BraTS19_TCIA01_448_1', 'BraTS19_TCIA01_335_1', 'BraTS19_CBICA_AOH_1', 'BraTS19_CBICA_ALU_1', 'BraTS19_CBICA_AOZ_1', 'BraTS19_TCIA03_133_1', 'BraTS19_CBICA_ASW_1', 'BraTS19_TCIA04_361_1', 'BraTS19_TCIA10_351_1', 'BraTS19_TCIA10_408_1', 'BraTS19_TCIA09_255_1', 'BraTS19_2013_9_1', 'BraTS19_TCIA02_171_1', 'BraTS19_CBICA_AXQ_1', 'BraTS19_TCIA05_444_1', 'BraTS19_2013_13_1', 'BraTS19_CBICA_ASY_1', 'BraTS19_CBICA_BCL_1', 'BraTS19_CBICA_ASH_1', 'BraTS19_CBICA_ASU_1', 'BraTS19_CBICA_ASA_1', 'BraTS19_TCIA02_491_1', 'BraTS19_CBICA_AVT_1', 'BraTS19_CBICA_ABN_1', 'BraTS19_CBICA_AWX_1', 'BraTS19_CBICA_AXL_1', 'BraTS19_TCIA13_624_1', 'BraTS19_TCIA02_309_1', 'BraTS19_CBICA_AME_1', 'BraTS19_TCIA02_314_1', 'BraTS19_TCIA02_394_1', 'BraTS19_CBICA_ATN_1', 'BraTS19_TCIA10_175_1', 'BraTS19_TCIA10_629_1', 'BraTS19_CBICA_ATV_1']
val_ids=['BraTS19_2013_7_1', 'BraTS19_CBICA_AYU_1', 'BraTS19_TCIA05_277_1', 'BraTS19_CBICA_AVG_1', 'BraTS19_TCIA01_499_1', 'BraTS19_TCIA10_330_1', 'BraTS19_CBICA_AQG_1', 'BraTS19_TCIA02_168_1', 'BraTS19_CBICA_ALN_1', 'BraTS19_CBICA_ATX_1', 'BraTS19_TCIA10_202_1', 'BraTS19_CBICA_BIC_1', 'BraTS19_CBICA_BGT_1', 'BraTS19_TCIA09_462_1', 'BraTS19_2013_25_1', 'BraTS19_TCIA02_117_1', 'BraTS19_CBICA_BFP_1', 'BraTS19_TCIA06_603_1', 'BraTS19_CBICA_AAG_1', 'BraTS19_CBICA_BHB_1', 'BraTS19_CBICA_BHV_1', 'BraTS19_TCIA06_409_1', 'BraTS19_TCIA04_343_1', 'BraTS19_TCIA05_478_1', 'BraTS19_TCIA01_201_1', 'BraTS19_TCIA01_203_1', 'BraTS19_TCIA01_186_1', 'BraTS19_TMC_12866_1', 'BraTS19_TCIA02_455_1', 'BraTS19_TCIA02_473_1', 'BraTS19_TCIA09_451_1', 'BraTS19_TCIA01_401_1', 'BraTS19_TCIA04_111_1', 'BraTS19_TCIA09_493_1', 'BraTS19_TCIA03_138_1', 'BraTS19_TCIA04_192_1', 'BraTS19_TCIA03_199_1', 'BraTS19_TCIA01_131_1', 'BraTS19_CBICA_ASV_1', 'BraTS19_TCIA10_325_1', 'BraTS19_TMC_30014_1', 'BraTS19_CBICA_ATP_1', 'BraTS19_TCIA12_466_1', 'BraTS19_TCIA08_436_1', 'BraTS19_CBICA_ASR_1', 'BraTS19_TCIA10_644_1', 'BraTS19_TCIA08_278_1', 'BraTS19_CBICA_ABB_1', 'BraTS19_TCIA10_307_1']

"""**資料集切片**"""

def test(Batch_ids):
    cl_ass=list()
    for c, i in enumerate(Batch_ids):
        print(i)
        case_path = os.path.join(TRAIN_DATASET_PATH, i)

        data_path = os.path.join(case_path, f'{i}_flair.nii')
        flair = nib.load(data_path).get_fdata()

        data_path = os.path.join(case_path, f'{i}_t1ce.nii')
        ce = nib.load(data_path).get_fdata()


        data_path = os.path.join(case_path, f'{i}_t1.nii')
        t1 = nib.load(data_path).get_fdata()

        data_path = os.path.join(case_path, f'{i}_t2.nii')
        t2 = nib.load(data_path).get_fdata()

        data_path = os.path.join(case_path, f'{i}_seg.nii')
        seg = nib.load(data_path).get_fdata()
        img_non=np.zeros((240, 240, 4))
        img_non_128=np.zeros((VOLUME_SLICES,128, 128, 4))
        img_non_f=np.zeros((VOLUME_SLICES,128, 128))
        img_non_1=np.zeros((VOLUME_SLICES,128, 128))
        img_non_2=np.zeros((VOLUME_SLICES,128, 128))
        img_non_c=np.zeros((VOLUME_SLICES,128, 128))
        seg_128_128=np.zeros((VOLUME_SLICES,128, 128))
        for j in range(VOLUME_SLICES):
            asd=cv2.resize(flair[:,:,j+VOLUME_START_AT], (IMG_SIZE, IMG_SIZE))
            if np.std(asd)==0:
                 img_non_128[j,:,:,0]=asd
            else:
                 img_non_128[j,:,:,0]=(asd-np.mean(asd))/(np.std(asd))
            asd=cv2.resize(ce[:,:,j+VOLUME_START_AT], (IMG_SIZE, IMG_SIZE))
            if np.std(asd)==0:
                 img_non_128[j,:,:,1]=asd
            else:
                 img_non_128[j,:,:,1]=(asd-np.mean(asd))/(np.std(asd))
            asd=cv2.resize(t1[:,:,j+VOLUME_START_AT], (IMG_SIZE, IMG_SIZE))
            if np.std(asd)==0:
                 img_non_128[j,:,:,2]=asd
            else:
                 img_non_128[j,:,:,2]=(asd-np.mean(asd))/(np.std(asd))
            asd=cv2.resize(t2[:,:,j+VOLUME_START_AT], (IMG_SIZE, IMG_SIZE))
            if np.std(asd)==0:
                 img_non_128[j,:,:,3]=asd
            else:
                 img_non_128[j,:,:,3]=(asd-np.mean(asd))/(np.std(asd))
            mask_e=np.round(cv2.resize(seg[:,:,j+VOLUME_START_AT], (IMG_SIZE, IMG_SIZE)))
            mask_e[mask_e==4]=1
            mask_e[mask_e==3]=1
            mask_e[mask_e==2]=1
            seg_128_128[j,:,:]=mask_e
        #img_non_128=np.swapaxes(img_non_128,0,2)
        #img_non_128=np.swapaxes(img_non_128,1,2)冠狀面(1,2)矢狀面(0,1)
        #seg_128_128=np.swapaxes(seg_128_128,0,2)
        #seg_128_128=np.swapaxes(seg_128_128,1,2)冠狀面(1,2)矢狀面(0,1)
        for k in range(128):
            floder_name='/content/BraTS2021_Training_Data_v/'+i+'_'+str(k)
            os.mkdir(floder_name)
            cl_ass.append(i+'_'+str(k)
            img_name=floder_name+'/'+i+'_'+str(k)+'_img'
            seg_name=floder_name+'/'+i+'_'+str(k)+'_seg'
            np.save(img_name, img_non_128[k,:,:,:])
            np.save(seg_name, seg_128_128[k,:,:])
        oringin="rm -r /content/BraTS/"+i
        os.system(oringin)
    return cl_ass

os.mkdir("/content/BraTS2021_Training_Data_v")
train_id=test(train_ids)
train_id=np.array(train_id)
np.save("train_id", train_id)
val_id=test(val_ids)
val_id=np.array(val_id)
np.save("val_id", val_id)
test_id=test(test_ids)
test_id=np.array(test_id)
np.save("test_id", test_id)

import random
train_id=np.load("/content/train_id.npy")
val_id=np.load("/content/val_id.npy")
test_id=np.load("/content/test_id.npy")
train_id=list(train_id)
random.shuffle(train_id)
val_id=list(val_id)
random.shuffle(val_id)
test_id=list(test_id)
random.shuffle(test_id)

TRAIN_DATASET_PATH = '/content/BraTS2021_Training_Data_v/'#訓練資料集路徑

"""**骰子相似係數**"""

def dice_test(y_true, y_pred, epsilon=1e-6):
    intersection = K.sum(y_true * y_pred)
    return (2. * intersection) / (K.sum(K.square(y_true)) + K.sum(K.square(y_pred)) + epsilon)

"""**全域骰子損失函數**"""

def generalized_dice(y_true, y_pred):

    """
    Generalized Dice Score
    https://arxiv.org/pdf/1707.03237

    """

    y_true    = K.reshape(y_true,shape=(-1,1))
    y_pred    = K.reshape(y_pred,shape=(-1,1))
    sum_p     = K.sum(y_pred, -2)
    sum_r     = K.sum(y_true, -2)
    sum_pr    = K.sum(y_true * y_pred, -2)
    weights   = K.pow(sum_r+K.epsilon() , -1)#+ K.epsilon()
    generalized_dice = (2 * K.sum(weights * sum_pr)) / (K.sum(weights * (sum_r + sum_p)))

    return generalized_dice

def generalized_dice_loss(y_true, y_pred):
    return 1-generalized_dice(y_true, y_pred)

"""# **EffcientUnet_fc2**
EffcientUnet搭配(flair,t1ce,t2)
"""

K.clear_session()
from tensorflow.keras.layers import Conv2D, BatchNormalization, Activation, MaxPool2D, Conv2DTranspose, Concatenate, Input
from tensorflow.keras.models import Model
from tensorflow.keras.applications import EfficientNetV2S
from tensorflow.keras.layers import Dropout
import tensorflow as tf

print("TF Version: ", tf.__version__)

def conv_block(inputs, num_filters):
    x = Conv2D(num_filters, 3, padding="same")(inputs)
    x = BatchNormalization()(x)
    x = Activation("relu")(x)

    x = Conv2D(num_filters, 3, padding="same")(x)
    x = BatchNormalization()(x)
    x = Activation("relu")(x)

    return x

def decoder_block(inputs, skip, num_filters):
    x = Conv2DTranspose(num_filters, (2, 2), strides=2, padding="same")(inputs)
    #skip= Conv2DTranspose(num_filters, (2, 2), strides=2, padding="same")(skip)
    x = Concatenate()([x, skip])
    x = conv_block(x, num_filters)
    return x

def build_effienet_unet(input_shape):
    """ Input """
    inputs = Input(input_shape)

    """ Pre-trained Encoder """
    encoder = EfficientNetV2S(include_top=False, weights="imagenet", input_tensor=inputs)
    s0 = encoder.get_layer("input_1").output
    s1 = encoder.get_layer("block1b_add").output                      ## 256
    s2 = encoder.get_layer("block2d_add").output    ## 128
    s3 = encoder.get_layer("block3d_add").output    ## 64
    s4 = encoder.get_layer("block5i_add").output    ## 32

    """ Bottleneck """
    #b1 = encoder.get_layer("top_activation").output    ## 16
    b1 = encoder.layers[-1].output
    """ Decoder """
    d1 = decoder_block(b1, s4, 512)                               ## 32
    d2 = decoder_block(d1, s3, 256)                               ## 64
    d3 = decoder_block(d2, s2, 128)                               ## 128
    d4 = decoder_block(d3, s1, 64)

    d5 = decoder_block(d4, s0, 32)
    """ Decoder """

    """ Output """
    outputs = Conv2D(1, 1, padding="same", activation="sigmoid")(d5)

    model = Model(inputs, outputs, name="EfficientNetV2S_UNET")
    return model

input_shape = (128, 128, 3)
effienet_1f2 = build_effienet_unet(input_shape)
#effienet_unet.summary()
effienet_1f2.compile(loss=generalized_dice_loss, optimizer=tf.keras.optimizers.Adam(learning_rate=0.0001), metrics = ['accuracy', dice_test] )

class DataGenerator(tf.keras.utils.Sequence):
    'Generates data for Keras'
    def __init__(self, list_IDs, dim=(IMG_SIZE,IMG_SIZE), batch_size = 64, n_channels = 3, shuffle=True):
        'Initialization'
        self.dim = dim
        self.batch_size = batch_size
        self.list_IDs = list_IDs
        self.n_channels = n_channels
        self.shuffle = shuffle
        self.on_epoch_end()

    def __len__(self):
        'Denotes the number of batches per epoch'
        return int(np.floor(len(self.list_IDs) / self.batch_size))

    def __getitem__(self, index):
        'Generate one batch of data'
        # Generate indexes of the batch
        indexes = self.indexes[index*self.batch_size:(index+1)*self.batch_size]

        # Find list of IDs
        Batch_ids = [self.list_IDs[k] for k in indexes]

        # Generate data
        X, y = self.__data_generation(Batch_ids)

        return X, y

    def on_epoch_end(self):
        'Updates indexes after each epoch'
        self.indexes = np.arange(len(self.list_IDs))
        if self.shuffle == True:
            np.random.shuffle(self.indexes)

    def __data_generation(self, Batch_ids):
        'Generates data containing batch_size samples' # X : (n_samples, *dim, n_channels)
        # Initialization
        X = np.zeros((self.batch_size, *self.dim, self.n_channels))
        y = np.zeros((self.batch_size, 128, 128))

        # Generate data
        for c, i in enumerate(Batch_ids):
            case_path = os.path.join(TRAIN_DATASET_PATH, i)

            data_path = os.path.join(case_path, f'{i}_img.npy')
            img = np.load(data_path)


            data_path = os.path.join(case_path, f'{i}_seg.npy')
            seg = np.load(data_path)

            X[c,:,:,0] = img[...,0]#flair
            X[c,:,:,1] = img[...,1]#t1ce
            X[c,:,:,2] = img[...,3]#t2

            y[c,:,:]=seg

        return X, y



training_generator = DataGenerator(train_id)
valid_generator = DataGenerator(val_id)
test_generator = DataGenerator(test_id)

history =  effienet_1f2.fit(training_generator,
                    epochs=30,
                    validation_data = valid_generator
                    )

effienet_1f2.save('effienet_fc2_zxy.h5')

effienet_1c2 = tf.keras.models.load_model('effienet_fc2_zxy.h5',custom_objects={ 'accuracy': dice_test,"dice_test": dice_test}, compile=False)

def predictByPath(case_path,case):
    files = next(os.walk(case_path))[2]
    X = np.empty((1, IMG_SIZE, IMG_SIZE,3))
    vol_path = case_path + case + '_img.npy'
    img=np.load(vol_path)

    for j in range(1):
        X[j,:,:,0] = img[...,0]
        X[j,:,:,1] = img[...,1]
        X[j,:,:,2] = img[...,3]

    return effienet_1c2.predict(X, verbose=1)
def predictByPath_X(case_path,case):
    files = next(os.walk(case_path))[2]
    X = np.empty((VOLUME_SLICES, IMG_SIZE, IMG_SIZE, 1))
    y = np.empty((VOLUME_SLICES, IMG_SIZE, IMG_SIZE))
    vol_path = case_path + case + '_img.npy'
    img=np.load(vol_path)

    vol_path = case_path + case + '_seg.npy'
    seg=np.load(vol_path)
    for j in range(1):
        X[j,:,:,0] = img[...,0]
        y[j,...] = seg

    return X,y
import numpy as np
def showPredictsById(case, start_slice = 60):
    path = TRAIN_DATASET_PATH + case + '/'
    p = predictByPath(path,case)

    origImage,gt=predictByPath_X(path,case)

    plt.figure(figsize=(18, 50))
    f, axarr = plt.subplots(1,3, figsize = (18, 50))

    axarr[0].imshow(origImage[0,:,:], cmap="gray")
    axarr[0].title.set_text('Original image flair')
    axarr[1].imshow(gt[0,:,:], cmap="Reds", interpolation='none', alpha=0.3)
    axarr[1].title.set_text('Ground truth')
    axarr[2].imshow(np.round(p[0,:,]), cmap="OrRd", interpolation='none', alpha=0.3)
    axarr[2].title.set_text('predicted')
    plt.show()

showPredictsById(case="BraTS19_CBICA_ATN_1_93")

effienet_1c2.compile(loss=generalized_dice_loss, optimizer=tf.keras.optimizers.Adam(learning_rate=0.0001), metrics = ['accuracy', dice_test] )
# Evaluate the model on the test data using `evaluate`
print("Evaluate on test data")
results = effienet_1c2.evaluate(test_generator, batch_size=100)
print("test loss, test acc:", results)

"""# **EffcientUnet_fc1**
EffcientUnet搭配(flair,t1ce,t1)
"""

K.clear_session()
from tensorflow.keras.layers import Conv2D, BatchNormalization, Activation, MaxPool2D, Conv2DTranspose, Concatenate, Input
from tensorflow.keras.models import Model
from tensorflow.keras.applications import EfficientNetV2S
from tensorflow.keras.layers import Dropout
import tensorflow as tf

print("TF Version: ", tf.__version__)

def conv_block(inputs, num_filters):
    x = Conv2D(num_filters, 3, padding="same")(inputs)
    x = BatchNormalization()(x)
    x = Activation("relu")(x)

    x = Conv2D(num_filters, 3, padding="same")(x)
    x = BatchNormalization()(x)
    x = Activation("relu")(x)

    return x

def decoder_block(inputs, skip, num_filters):
    x = Conv2DTranspose(num_filters, (2, 2), strides=2, padding="same")(inputs)
    #skip= Conv2DTranspose(num_filters, (2, 2), strides=2, padding="same")(skip)
    x = Concatenate()([x, skip])
    x = conv_block(x, num_filters)
    return x

def build_effienet_unet(input_shape):
    """ Input """
    inputs = Input(input_shape)

    """ Pre-trained Encoder """
    encoder = EfficientNetV2S(include_top=False, weights="imagenet", input_tensor=inputs)
    s0 = encoder.get_layer("input_1").output
    s1 = encoder.get_layer("block1b_add").output                      ## 256
    s2 = encoder.get_layer("block2d_add").output    ## 128
    s3 = encoder.get_layer("block3d_add").output    ## 64
    s4 = encoder.get_layer("block5i_add").output    ## 32

    """ Bottleneck """
    #b1 = encoder.get_layer("top_activation").output    ## 16
    b1 = encoder.layers[-1].output
    """ Decoder """
    d1 = decoder_block(b1, s4, 512)                               ## 32
    d2 = decoder_block(d1, s3, 256)                               ## 64
    d3 = decoder_block(d2, s2, 128)                               ## 128
    d4 = decoder_block(d3, s1, 64)
    d5 = decoder_block(d4, s0, 32)
    """ Decoder """

    """ Output """
    outputs = Conv2D(1, 1, padding="same", activation="sigmoid")(d5)

    model = Model(inputs, outputs, name="EfficientNetV2S_UNET")
    return model

input_shape = (128, 128, 3)
effienet_1f2 = build_effienet_unet(input_shape)
#effienet_unet.summary()
effienet_1f2.compile(loss=generalized_dice_loss, optimizer=tf.keras.optimizers.Adam(learning_rate=0.0001), metrics = ['accuracy', dice_test] )

class DataGenerator(tf.keras.utils.Sequence):
    'Generates data for Keras'
    def __init__(self, list_IDs, dim=(IMG_SIZE,IMG_SIZE), batch_size = 64, n_channels = 3, shuffle=True):
        'Initialization'
        self.dim = dim
        self.batch_size = batch_size
        self.list_IDs = list_IDs
        self.n_channels = n_channels
        self.shuffle = shuffle
        self.on_epoch_end()

    def __len__(self):
        'Denotes the number of batches per epoch'
        return int(np.floor(len(self.list_IDs) / self.batch_size))

    def __getitem__(self, index):
        'Generate one batch of data'
        # Generate indexes of the batch
        indexes = self.indexes[index*self.batch_size:(index+1)*self.batch_size]

        # Find list of IDs
        Batch_ids = [self.list_IDs[k] for k in indexes]

        # Generate data
        X, y = self.__data_generation(Batch_ids)

        return X, y

    def on_epoch_end(self):
        'Updates indexes after each epoch'
        self.indexes = np.arange(len(self.list_IDs))
        if self.shuffle == True:
            np.random.shuffle(self.indexes)

    def __data_generation(self, Batch_ids):
        'Generates data containing batch_size samples' # X : (n_samples, *dim, n_channels)
        # Initialization
        X = np.zeros((self.batch_size, *self.dim, self.n_channels))
        y = np.zeros((self.batch_size, 128, 128))

        # Generate data
        for c, i in enumerate(Batch_ids):
            case_path = os.path.join(TRAIN_DATASET_PATH, i)

            data_path = os.path.join(case_path, f'{i}_img.npy')
            img = np.load(data_path)


            data_path = os.path.join(case_path, f'{i}_seg.npy')
            seg = np.load(data_path)

            X[c,:,:,0] = img[...,0]#flair
            X[c,:,:,1] = img[...,1]#t1ce
            X[c,:,:,2] = img[...,2]#t1

            y[c,:,:]=seg

        return X, y



training_generator = DataGenerator(train_id)
valid_generator = DataGenerator(val_id)
test_generator = DataGenerator(test_id)

history =  effienet_1f2.fit(training_generator,
                    epochs=30,
                    validation_data = valid_generator
                    )

effienet_1f2.save('effienet_fc1_zxy.h5')

effienet_1c2 = tf.keras.models.load_model('effienet_fc1_zxy.h5',custom_objects={ 'accuracy': dice_test,"dice_test": dice_test}, compile=False)

def predictByPath(case_path,case):
    files = next(os.walk(case_path))[2]
    X = np.empty((1, IMG_SIZE, IMG_SIZE,3))
    vol_path = case_path + case + '_img.npy'
    img=np.load(vol_path)

    for j in range(1):
        X[j,:,:,0] = img[...,0]
        X[j,:,:,1] = img[...,1]
        X[j,:,:,2] = img[...,2]

    return effienet_1c2.predict(X, verbose=1)
def predictByPath_X(case_path,case):
    files = next(os.walk(case_path))[2]
    X = np.empty((VOLUME_SLICES, IMG_SIZE, IMG_SIZE, 1))
    y = np.empty((VOLUME_SLICES, IMG_SIZE, IMG_SIZE))
    vol_path = case_path + case + '_img.npy'
    img=np.load(vol_path)

    vol_path = case_path + case + '_seg.npy'
    seg=np.load(vol_path)
    for j in range(1):
        X[j,:,:,0] = img[...,0]
        y[j,...] = seg

    return X,y
import numpy as np
def showPredictsById(case, start_slice = 60):
    path = TRAIN_DATASET_PATH + case + '/'
    p = predictByPath(path,case)

    origImage,gt=predictByPath_X(path,case)

    plt.figure(figsize=(18, 50))
    f, axarr = plt.subplots(1,3, figsize = (18, 50))

    axarr[0].imshow(origImage[0,:,:], cmap="gray")
    axarr[0].title.set_text('Original image flair')
    axarr[1].imshow(gt[0,:,:], cmap="Reds", interpolation='none', alpha=0.3)
    axarr[1].title.set_text('Ground truth')
    axarr[2].imshow(np.round(p[0,:,]), cmap="OrRd", interpolation='none', alpha=0.3)
    axarr[2].title.set_text('predicted')
    plt.show()

showPredictsById(case="BraTS19_CBICA_ATN_1_93")

effienet_1c2.compile(loss=generalized_dice_loss, optimizer=tf.keras.optimizers.Adam(learning_rate=0.0001), metrics = ['accuracy', dice_test] )
# Evaluate the model on the test data using `evaluate`
print("Evaluate on test data")
results = effienet_1c2.evaluate(test_generator, batch_size=100)
print("test loss, test acc:", results)

"""# **EffcientUnet_2c1**
EffcientUnet搭配(t2,t1ce,t1)
"""

K.clear_session()
from tensorflow.keras.layers import Conv2D, BatchNormalization, Activation, MaxPool2D, Conv2DTranspose, Concatenate, Input
from tensorflow.keras.models import Model
from tensorflow.keras.applications import EfficientNetV2S
from tensorflow.keras.layers import Dropout
import tensorflow as tf

print("TF Version: ", tf.__version__)

def conv_block(inputs, num_filters):
    x = Conv2D(num_filters, 3, padding="same")(inputs)
    x = BatchNormalization()(x)
    x = Activation("relu")(x)

    x = Conv2D(num_filters, 3, padding="same")(x)
    x = BatchNormalization()(x)
    x = Activation("relu")(x)

    return x

def decoder_block(inputs, skip, num_filters):
    x = Conv2DTranspose(num_filters, (2, 2), strides=2, padding="same")(inputs)
    #skip= Conv2DTranspose(num_filters, (2, 2), strides=2, padding="same")(skip)
    x = Concatenate()([x, skip])
    x = conv_block(x, num_filters)
    return x

def build_effienet_unet(input_shape):
    """ Input """
    inputs = Input(input_shape)

    """ Pre-trained Encoder """
    encoder = EfficientNetV2S(include_top=False, weights="imagenet", input_tensor=inputs)
    s0 = encoder.get_layer("input_1").output
    s1 = encoder.get_layer("block1b_add").output                      ## 256
    s2 = encoder.get_layer("block2d_add").output    ## 128
    s3 = encoder.get_layer("block3d_add").output    ## 64
    s4 = encoder.get_layer("block5i_add").output    ## 32

    """ Bottleneck """
    #b1 = encoder.get_layer("top_activation").output    ## 16
    b1 = encoder.layers[-1].output
    """ Decoder """
    d1 = decoder_block(b1, s4, 512)                               ## 32
    d2 = decoder_block(d1, s3, 256)                               ## 64
    d3 = decoder_block(d2, s2, 128)                               ## 128
    d4 = decoder_block(d3, s1, 64)
    d5 = decoder_block(d4, s0, 32)
    """ Decoder """

    """ Output """
    outputs = Conv2D(1, 1, padding="same", activation="sigmoid")(d5)

    model = Model(inputs, outputs, name="EfficientNetV2S_UNET")
    return model

input_shape = (128, 128, 3)
effienet_1f2 = build_effienet_unet(input_shape)
#effienet_unet.summary()
effienet_1f2.compile(loss=generalized_dice_loss, optimizer=tf.keras.optimizers.Adam(learning_rate=0.0001), metrics = ['accuracy', dice_test] )

class DataGenerator(tf.keras.utils.Sequence):
    'Generates data for Keras'
    def __init__(self, list_IDs, dim=(IMG_SIZE,IMG_SIZE), batch_size = 64, n_channels = 3, shuffle=True):
        'Initialization'
        self.dim = dim
        self.batch_size = batch_size
        self.list_IDs = list_IDs
        self.n_channels = n_channels
        self.shuffle = shuffle
        self.on_epoch_end()

    def __len__(self):
        'Denotes the number of batches per epoch'
        return int(np.floor(len(self.list_IDs) / self.batch_size))

    def __getitem__(self, index):
        'Generate one batch of data'
        # Generate indexes of the batch
        indexes = self.indexes[index*self.batch_size:(index+1)*self.batch_size]

        # Find list of IDs
        Batch_ids = [self.list_IDs[k] for k in indexes]

        # Generate data
        X, y = self.__data_generation(Batch_ids)

        return X, y

    def on_epoch_end(self):
        'Updates indexes after each epoch'
        self.indexes = np.arange(len(self.list_IDs))
        if self.shuffle == True:
            np.random.shuffle(self.indexes)

    def __data_generation(self, Batch_ids):
        'Generates data containing batch_size samples' # X : (n_samples, *dim, n_channels)
        # Initialization
        X = np.zeros((self.batch_size, *self.dim, self.n_channels))
        y = np.zeros((self.batch_size, 128, 128))

        # Generate data
        for c, i in enumerate(Batch_ids):
            case_path = os.path.join(TRAIN_DATASET_PATH, i)

            data_path = os.path.join(case_path, f'{i}_img.npy')
            img = np.load(data_path)


            data_path = os.path.join(case_path, f'{i}_seg.npy')
            seg = np.load(data_path)

            X[c,:,:,0] = img[...,3]#t2
            X[c,:,:,1] = img[...,1]#t1ce
            X[c,:,:,2] = img[...,2]#t1

            y[c,:,:]=seg

        return X, y



training_generator = DataGenerator(train_id)
valid_generator = DataGenerator(val_id)
test_generator = DataGenerator(test_id)

history =  effienet_1f2.fit(training_generator,
                    epochs=30,
                    validation_data = valid_generator
                    )

effienet_1f2.save('effienet_2c1_zxy.h5')

effienet_1c2 = tf.keras.models.load_model('effienet_2c1_zxy.h5',custom_objects={ 'accuracy': dice_test,"dice_test": dice_test}, compile=False)

def predictByPath(case_path,case):
    files = next(os.walk(case_path))[2]
    X = np.empty((1, IMG_SIZE, IMG_SIZE,3))
    vol_path = case_path + case + '_img.npy'
    img=np.load(vol_path)

    for j in range(1):
        X[j,:,:,0] = img[...,3]
        X[j,:,:,1] = img[...,1]
        X[j,:,:,2] = img[...,2]

    return effienet_1c2.predict(X, verbose=1)
def predictByPath_X(case_path,case):
    files = next(os.walk(case_path))[2]
    X = np.empty((VOLUME_SLICES, IMG_SIZE, IMG_SIZE, 1))
    y = np.empty((VOLUME_SLICES, IMG_SIZE, IMG_SIZE))
    vol_path = case_path + case + '_img.npy'
    img=np.load(vol_path)

    vol_path = case_path + case + '_seg.npy'
    seg=np.load(vol_path)
    for j in range(1):
        X[j,:,:,0] = img[...,0]
        y[j,...] = seg

    return X,y
import numpy as np
def showPredictsById(case, start_slice = 60):
    path = TRAIN_DATASET_PATH + case + '/'
    p = predictByPath(path,case)

    origImage,gt=predictByPath_X(path,case)

    plt.figure(figsize=(18, 50))
    f, axarr = plt.subplots(1,3, figsize = (18, 50))

    axarr[0].imshow(origImage[0,:,:], cmap="gray")
    axarr[0].title.set_text('Original image flair')
    axarr[1].imshow(gt[0,:,:], cmap="Reds", interpolation='none', alpha=0.3)
    axarr[1].title.set_text('Ground truth')
    axarr[2].imshow(np.round(p[0,:,]), cmap="OrRd", interpolation='none', alpha=0.3)
    axarr[2].title.set_text('predicted')
    plt.show()

showPredictsById(case="BraTS19_CBICA_ATN_1_93")

effienet_1c2.compile(loss=generalized_dice_loss, optimizer=tf.keras.optimizers.Adam(learning_rate=0.0001), metrics = ['accuracy', dice_test] )
# Evaluate the model on the test data using `evaluate`
print("Evaluate on test data")
results = effienet_1c2.evaluate(test_generator, batch_size=100)
print("test loss, test acc:", results)

"""# **EffcientUnet_f12**
EffcientUnet搭配(flair,t1,t2)
"""

K.clear_session()
from tensorflow.keras.layers import Conv2D, BatchNormalization, Activation, MaxPool2D, Conv2DTranspose, Concatenate, Input
from tensorflow.keras.models import Model
from tensorflow.keras.applications import EfficientNetV2S
from tensorflow.keras.layers import Dropout
import tensorflow as tf

print("TF Version: ", tf.__version__)

def conv_block(inputs, num_filters):
    x = Conv2D(num_filters, 3, padding="same")(inputs)
    x = BatchNormalization()(x)
    x = Activation("relu")(x)

    x = Conv2D(num_filters, 3, padding="same")(x)
    x = BatchNormalization()(x)
    x = Activation("relu")(x)

    return x

def decoder_block(inputs, skip, num_filters):
    x = Conv2DTranspose(num_filters, (2, 2), strides=2, padding="same")(inputs)
    #skip= Conv2DTranspose(num_filters, (2, 2), strides=2, padding="same")(skip)
    x = Concatenate()([x, skip])
    x = conv_block(x, num_filters)
    return x

def build_effienet_unet(input_shape):
    """ Input """
    inputs = Input(input_shape)

    """ Pre-trained Encoder """
    encoder = EfficientNetV2S(include_top=False, weights="imagenet", input_tensor=inputs)
    s0 = encoder.get_layer("input_1").output
    s1 = encoder.get_layer("block1b_add").output                      ## 256
    s2 = encoder.get_layer("block2d_add").output    ## 128
    s3 = encoder.get_layer("block3d_add").output    ## 64
    s4 = encoder.get_layer("block5i_add").output    ## 32

    """ Bottleneck """
    #b1 = encoder.get_layer("top_activation").output    ## 16
    b1 = encoder.layers[-1].output
    """ Decoder """
    d1 = decoder_block(b1, s4, 512)                               ## 32
    d2 = decoder_block(d1, s3, 256)                               ## 64
    d3 = decoder_block(d2, s2, 128)                               ## 128
    d4 = decoder_block(d3, s1, 64)
    d5 = decoder_block(d4, s0, 32)
    """ Decoder """

    """ Output """
    outputs = Conv2D(1, 1, padding="same", activation="sigmoid")(d5)

    model = Model(inputs, outputs, name="EfficientNetV2S_UNET")
    return model

input_shape = (128, 128, 3)
effienet_1f2 = build_effienet_unet(input_shape)
#effienet_unet.summary()
effienet_1f2.compile(loss=generalized_dice_loss, optimizer=tf.keras.optimizers.Adam(learning_rate=0.0001), metrics = ['accuracy', dice_test] )

class DataGenerator(tf.keras.utils.Sequence):
    'Generates data for Keras'
    def __init__(self, list_IDs, dim=(IMG_SIZE,IMG_SIZE), batch_size = 64, n_channels = 3, shuffle=True):
        'Initialization'
        self.dim = dim
        self.batch_size = batch_size
        self.list_IDs = list_IDs
        self.n_channels = n_channels
        self.shuffle = shuffle
        self.on_epoch_end()

    def __len__(self):
        'Denotes the number of batches per epoch'
        return int(np.floor(len(self.list_IDs) / self.batch_size))

    def __getitem__(self, index):
        'Generate one batch of data'
        # Generate indexes of the batch
        indexes = self.indexes[index*self.batch_size:(index+1)*self.batch_size]

        # Find list of IDs
        Batch_ids = [self.list_IDs[k] for k in indexes]

        # Generate data
        X, y = self.__data_generation(Batch_ids)

        return X, y

    def on_epoch_end(self):
        'Updates indexes after each epoch'
        self.indexes = np.arange(len(self.list_IDs))
        if self.shuffle == True:
            np.random.shuffle(self.indexes)

    def __data_generation(self, Batch_ids):
        'Generates data containing batch_size samples' # X : (n_samples, *dim, n_channels)
        # Initialization
        X = np.zeros((self.batch_size, *self.dim, self.n_channels))
        y = np.zeros((self.batch_size, 128, 128))

        # Generate data
        for c, i in enumerate(Batch_ids):
            case_path = os.path.join(TRAIN_DATASET_PATH, i)

            data_path = os.path.join(case_path, f'{i}_img.npy')
            img = np.load(data_path)


            data_path = os.path.join(case_path, f'{i}_seg.npy')
            seg = np.load(data_path)

            X[c,:,:,0] = img[...,0]#flair
            X[c,:,:,1] = img[...,2]#t1
            X[c,:,:,2] = img[...,3]#t2

            y[c,:,:]=seg

        return X, y



training_generator = DataGenerator(train_id)
valid_generator = DataGenerator(val_id)
test_generator = DataGenerator(test_id)

history =  effienet_1f2.fit(training_generator,
                    epochs=30,
                    validation_data = valid_generator
                    )

effienet_1f2.save('effienet_2c1_zxy.h5')

effienet_1c2 = tf.keras.models.load_model('effienet_2c1_zxy.h5',custom_objects={ 'accuracy': dice_test,"dice_test": dice_test}, compile=False)

def predictByPath(case_path,case):
    files = next(os.walk(case_path))[2]
    X = np.empty((1, IMG_SIZE, IMG_SIZE,3))
    vol_path = case_path + case + '_img.npy'
    img=np.load(vol_path)

    for j in range(1):
        X[j,:,:,0] = img[...,0]
        X[j,:,:,1] = img[...,2]
        X[j,:,:,2] = img[...,3]

    return effienet_1c2.predict(X, verbose=1)
def predictByPath_X(case_path,case):
    files = next(os.walk(case_path))[2]
    X = np.empty((VOLUME_SLICES, IMG_SIZE, IMG_SIZE, 1))
    y = np.empty((VOLUME_SLICES, IMG_SIZE, IMG_SIZE))
    vol_path = case_path + case + '_img.npy'
    img=np.load(vol_path)

    vol_path = case_path + case + '_seg.npy'
    seg=np.load(vol_path)
    for j in range(1):
        X[j,:,:,0] = img[...,0]
        y[j,...] = seg

    return X,y
import numpy as np
def showPredictsById(case, start_slice = 60):
    path = TRAIN_DATASET_PATH + case + '/'
    p = predictByPath(path,case)

    origImage,gt=predictByPath_X(path,case)

    plt.figure(figsize=(18, 50))
    f, axarr = plt.subplots(1,3, figsize = (18, 50))

    axarr[0].imshow(origImage[0,:,:], cmap="gray")
    axarr[0].title.set_text('Original image flair')
    axarr[1].imshow(gt[0,:,:], cmap="Reds", interpolation='none', alpha=0.3)
    axarr[1].title.set_text('Ground truth')
    axarr[2].imshow(np.round(p[0,:,]), cmap="OrRd", interpolation='none', alpha=0.3)
    axarr[2].title.set_text('predicted')
    plt.show()

showPredictsById(case="BraTS19_CBICA_ATN_1_93")

effienet_1c2.compile(loss=generalized_dice_loss, optimizer=tf.keras.optimizers.Adam(learning_rate=0.0001), metrics = ['accuracy', dice_test] )
# Evaluate the model on the test data using `evaluate`
print("Evaluate on test data")
results = effienet_1c2.evaluate(test_generator, batch_size=100)
print("test loss, test acc:", results)

"""# **UNET**"""

K.clear_session()
from tensorflow.keras.layers import Dropout
from tensorflow.keras.layers import ReLU
def build_unet(inputs, ker_init, dropout):
    conv1 = Conv2D(32, 3, padding = 'same', kernel_initializer = ker_init)(inputs)
    conv1=BatchNormalization()(conv1)
    conv1=ReLU()(conv1)
    conv1 = Conv2D(32, 3, padding = 'same', kernel_initializer = ker_init)(conv1)
    conv1=BatchNormalization()(conv1)
    conv1=ReLU()(conv1)

    pool = MaxPooling2D(pool_size=(2, 2))(conv1)
    conv = Conv2D(64, 3, padding = 'same', kernel_initializer = ker_init)(pool)
    conv=BatchNormalization()(conv)
    conv=ReLU()(conv)
    conv = Conv2D(64, 3, padding = 'same', kernel_initializer = ker_init)(conv)
    conv=BatchNormalization()(conv)
    conv=ReLU()(conv)

    pool1 = MaxPooling2D(pool_size=(2, 2))(conv)
    conv2 = Conv2D(128, 3, padding = 'same', kernel_initializer = ker_init)(pool1)
    conv2=BatchNormalization()(conv2)
    conv2=ReLU()(conv2)
    conv2 = Conv2D(128, 3, padding = 'same', kernel_initializer = ker_init)(conv2)
    conv2=BatchNormalization()(conv2)
    conv2=ReLU()(conv2)

    pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)
    conv3 = Conv2D(256, 3, padding = 'same', kernel_initializer = ker_init)(pool2)
    conv3=BatchNormalization()(conv3)
    conv3=ReLU()(conv3)
    conv3 = Conv2D(256, 3, padding = 'same', kernel_initializer = ker_init)(conv3)
    conv3=BatchNormalization()(conv3)
    conv3=ReLU()(conv3)


    pool4 = MaxPooling2D(pool_size=(2, 2))(conv3)
    conv5 = Conv2D(512, 3, padding = 'same', kernel_initializer = ker_init)(pool4)
    conv5=BatchNormalization()(conv5)
    conv5=ReLU()(conv5)
    conv5 = Conv2D(512, 3,padding = 'same', kernel_initializer = ker_init)(conv5)
    conv5=BatchNormalization()(conv5)
    conv5=ReLU()(conv5)
    drop5 = Dropout(dropout)(conv5)

    up7 = Conv2D(256, 2,activation=tf.keras.layers.ReLU(), padding = 'same', kernel_initializer = ker_init)(UpSampling2D(size = (2,2))(drop5))
    merge7 = concatenate([conv3,up7], axis = 3)
    conv7 = Conv2D(256, 3, padding = 'same', kernel_initializer = ker_init)(merge7)
    conv7=BatchNormalization()(conv7)
    conv7=ReLU()(conv7)
    conv7 = Conv2D(256, 3, padding = 'same', kernel_initializer = ker_init)(conv7)
    conv7=BatchNormalization()(conv7)
    conv7=ReLU()(conv7)


    up8 = Conv2D(128, 2,activation=tf.keras.layers.ReLU(), padding = 'same', kernel_initializer = ker_init)(UpSampling2D(size = (2,2))(conv7))
    merge8 = concatenate([conv2,up8], axis = 3)
    conv8 = Conv2D(128, 3, padding = 'same', kernel_initializer = ker_init)(merge8)
    conv8=BatchNormalization()(conv8)
    conv8=ReLU()(conv8)
    conv8 = Conv2D(128, 3, padding = 'same', kernel_initializer = ker_init)(conv8)
    conv8=BatchNormalization()(conv8)
    conv8=ReLU()(conv8)

    up9 = Conv2D(64, 2,activation=tf.keras.layers.ReLU(), padding = 'same', kernel_initializer = ker_init)(UpSampling2D(size = (2,2))(conv8))
    merge9 = concatenate([conv,up9], axis = 3)
    conv9 = Conv2D(64, 3, padding = 'same', kernel_initializer = ker_init)(merge9)
    conv9=BatchNormalization()(conv9)
    conv9=ReLU()(conv9)
    conv9 = Conv2D(64, 3, padding = 'same', kernel_initializer = ker_init)(conv9)
    conv9=BatchNormalization()(conv9)
    conv9=ReLU()(conv9)

    up = Conv2D(32, 2,activation=tf.keras.layers.ReLU(), padding = 'same', kernel_initializer = ker_init)(UpSampling2D(size = (2,2))(conv9))
    merge = concatenate([conv1,up], axis = 3)
    conv = Conv2D(32, 3, padding = 'same', kernel_initializer = ker_init)(merge)
    conv=BatchNormalization()(conv)
    conv=ReLU()(conv)
    conv = Conv2D(32, 3, padding = 'same', kernel_initializer = ker_init)(conv)
    conv=BatchNormalization()(conv)
    conv=ReLU()(conv)

    conv10 = Conv2D(1, (1,1), activation = 'sigmoid')(conv)

    return Model(inputs = inputs, outputs = conv10)

input_layer = Input((IMG_SIZE, IMG_SIZE, 4))
model = build_unet(input_layer, 'he_normal', 0.2)
model.compile(loss=generalized_dice_loss, optimizer=tf.keras.optimizers.Adam(learning_rate=0.0001), metrics = ['accuracy', dice_test] )

class DataGenerator(tf.keras.utils.Sequence):
    'Generates data for Keras'
    def __init__(self, list_IDs, dim=(IMG_SIZE,IMG_SIZE), batch_size = 64, n_channels = 4, shuffle=True):
        'Initialization'
        self.dim = dim
        self.batch_size = batch_size
        self.list_IDs = list_IDs
        self.n_channels = n_channels
        self.shuffle = shuffle
        self.on_epoch_end()

    def __len__(self):
        'Denotes the number of batches per epoch'
        return int(np.floor(len(self.list_IDs) / self.batch_size))

    def __getitem__(self, index):
        'Generate one batch of data'
        # Generate indexes of the batch
        indexes = self.indexes[index*self.batch_size:(index+1)*self.batch_size]

        # Find list of IDs
        Batch_ids = [self.list_IDs[k] for k in indexes]

        # Generate data
        X, y = self.__data_generation(Batch_ids)

        return X, y

    def on_epoch_end(self):
        'Updates indexes after each epoch'
        self.indexes = np.arange(len(self.list_IDs))
        if self.shuffle == True:
            np.random.shuffle(self.indexes)

    def __data_generation(self, Batch_ids):
        'Generates data containing batch_size samples' # X : (n_samples, *dim, n_channels)
        # Initialization
        X = np.zeros((self.batch_size, *self.dim, self.n_channels))
        y = np.zeros((self.batch_size, 128, 128))

        # Generate data
        for c, i in enumerate(Batch_ids):
            case_path = os.path.join(TRAIN_DATASET_PATH, i)

            data_path = os.path.join(case_path, f'{i}_img.npy')
            img = np.load(data_path)


            data_path = os.path.join(case_path, f'{i}_seg.npy')
            seg = np.load(data_path)

            X[c,:,:,0] = img[...,0]#flair
            X[c,:,:,1] = img[...,1]#t1ce
            X[c,:,:,2] = img[...,2]#t1
            X[c,:,:,3] = img[...,3]#t2

        return X, y



training_generator = DataGenerator(train_id)
valid_generator = DataGenerator(val_id)
test_generator = DataGenerator(test_id)

history =  model.fit(training_generator,
                    epochs=30,
                    validation_data = valid_generator
                    )

model.save('unet_xyz.h5')

model = tf.keras.models.load_model('unet_xyz.h5',custom_objects={ 'accuracy': dice_test,"dice_test": dice_test}, compile=False)

def predictByPath(case_path,case):
    files = next(os.walk(case_path))[2]
    X = np.empty((1, IMG_SIZE, IMG_SIZE,4))
    vol_path = case_path + case + '_img.npy'
    img=np.load(vol_path)

    for j in range(1):
        X[j,:,:,0] = img[...,0]
        X[j,:,:,1] = img[...,1]
        X[j,:,:,2] = img[...,2]
        X[j,:,:,3] = img[...,3]

    return model.predict(X, verbose=1)
def predictByPath_X(case_path,case):
    files = next(os.walk(case_path))[2]
    X = np.empty((VOLUME_SLICES, IMG_SIZE, IMG_SIZE, 1))
    y = np.empty((VOLUME_SLICES, IMG_SIZE, IMG_SIZE))
    vol_path = case_path + case + '_img.npy'
    img=np.load(vol_path)

    vol_path = case_path + case + '_seg.npy'
    seg=np.load(vol_path)
    for j in range(1):
        X[j,:,:,0] = img[...,0]
        y[j,...] = seg

    return X,y
import numpy as np
def showPredictsById(case, start_slice = 60):
    path = TRAIN_DATASET_PATH + case + '/'
    p = predictByPath(path,case)

    origImage,gt=predictByPath_X(path,case)

    plt.figure(figsize=(18, 50))
    f, axarr = plt.subplots(1,3, figsize = (18, 50))

    axarr[0].imshow(origImage[0,:,:], cmap="gray")
    axarr[0].title.set_text('Original image flair')
    axarr[1].imshow(gt[0,:,:], cmap="Reds", interpolation='none', alpha=0.3)
    axarr[1].title.set_text('Ground truth')
    axarr[2].imshow(np.round(p[0,:,]), cmap="OrRd", interpolation='none', alpha=0.3)
    axarr[2].title.set_text('predicted')
    plt.show()

showPredictsById(case="BraTS19_CBICA_ATN_1_93")

model.compile(loss=generalized_dice_loss, optimizer=tf.keras.optimizers.Adam(learning_rate=0.0001), metrics = ['accuracy', dice_test] )
# Evaluate the model on the test data using `evaluate`
print("Evaluate on test data")
results = model.evaluate(test_generator, batch_size=100)
print("test loss, test acc:", results)

"""# **Efficientunet b4_fc2**
EffcientUnetB4搭配(flair,t1ce,t2)
"""

K.clear_session()
from tensorflow.keras.layers import Conv2D, BatchNormalization, Activation, MaxPool2D, Conv2DTranspose, Concatenate, Input
from tensorflow.keras.models import Model
from tensorflow.keras.applications import EfficientNetB4
from tensorflow.keras.layers import Dropout
import tensorflow as tf

print("TF Version: ", tf.__version__)

def conv_block(inputs, num_filters):
    x = Conv2D(num_filters, 3, padding="same")(inputs)
    x = Activation("relu")(x)

    x = Conv2D(num_filters, 3, padding="same")(x)
    x = Activation("relu")(x)

    return x

def decoder_block(inputs, skip, num_filters):
    x = Conv2DTranspose(num_filters, (2, 2), strides=2, padding="same")(inputs)
    skip = Conv2D(num_filters, 1, padding="same")(skip)
    #skip= Conv2DTranspose(num_filters, (2, 2), strides=2, padding="same")(skip)
    x = Concatenate()([x, skip])
    x = conv_block(x, num_filters)
    x = Add()([x, skip])
    return x
def decoder_block_f(inputs, skip, num_filters):

    x = Concatenate()([inputs, skip])
    x = conv_block(x, num_filters)
    return x
def decoder_block_l(inputs, num_filters):
    x = Conv2DTranspose(num_filters, (2, 2), strides=2, padding="same")(inputs)
    x = conv_block(x, num_filters)
    return x
def build_effienet_unet(input_shape):
    """ Input """
    inputs = Input(input_shape)

    """ Pre-trained Encoder """
    encoder = EfficientNetB4(include_top=False, weights="imagenet", input_tensor=inputs)
    s0 = encoder.get_layer("input_1").output
    s1 = encoder.get_layer("block1b_add").output                      ## 256
    s2 = encoder.get_layer("block2d_add").output    ## 128
    s3 = encoder.get_layer("block3d_add").output    ## 64
    s4 = encoder.get_layer("block5f_add").output    ## 32

    """ Bottleneck """
    #b1 = encoder.get_layer("top_activation").output    ## 16
    b1 = encoder.layers[-1].output
    """ Decoder """
    d1 = decoder_block(b1, s4, 512)                               ## 32
    d2 = decoder_block(d1, s3, 256)                               ## 64
    d3 = decoder_block(d2, s2, 128)                               ## 128
    d4 = decoder_block(d3, s1, 64)
    d5 = decoder_block(d4, s0, 32)
    """ Decoder """

    """ Output """
    outputs = Conv2D(1, 1, padding="same", activation="sigmoid")(d5)

    model = Model(inputs, outputs, name="EfficientNetB4_UNET")
    return model

input_shape = (128, 128, 3)
effienet_unet = build_effienet_unet(input_shape)
#effienet_unet.summary()
effienet_unet.compile(loss=generalized_dice_loss, optimizer=tf.keras.optimizers.Adam(learning_rate=0.0001), metrics = ['accuracy', dice_test] )

class DataGenerator(tf.keras.utils.Sequence):
    'Generates data for Keras'
    def __init__(self, list_IDs, dim=(IMG_SIZE,IMG_SIZE), batch_size = 64, n_channels = 3, shuffle=True):
        'Initialization'
        self.dim = dim
        self.batch_size = batch_size
        self.list_IDs = list_IDs
        self.n_channels = n_channels
        self.shuffle = shuffle
        self.on_epoch_end()

    def __len__(self):
        'Denotes the number of batches per epoch'
        return int(np.floor(len(self.list_IDs) / self.batch_size))

    def __getitem__(self, index):
        'Generate one batch of data'
        # Generate indexes of the batch
        indexes = self.indexes[index*self.batch_size:(index+1)*self.batch_size]

        # Find list of IDs
        Batch_ids = [self.list_IDs[k] for k in indexes]

        # Generate data
        X, y = self.__data_generation(Batch_ids)

        return X, y

    def on_epoch_end(self):
        'Updates indexes after each epoch'
        self.indexes = np.arange(len(self.list_IDs))
        if self.shuffle == True:
            np.random.shuffle(self.indexes)

    def __data_generation(self, Batch_ids):
        'Generates data containing batch_size samples' # X : (n_samples, *dim, n_channels)
        # Initialization
        X = np.zeros((self.batch_size, *self.dim, self.n_channels))
        y = np.zeros((self.batch_size, 128, 128))

        # Generate data
        for c, i in enumerate(Batch_ids):
            case_path = os.path.join(TRAIN_DATASET_PATH, i)

            data_path = os.path.join(case_path, f'{i}_img.npy')
            img = np.load(data_path)


            data_path = os.path.join(case_path, f'{i}_seg.npy')
            seg = np.load(data_path)

            X[c,:,:,0] = img[...,0]#flair
            X[c,:,:,1] = img[...,1]#t1ce
            X[c,:,:,2] = img[...,3]#t2

            y[c,:,:]=seg

        return X, y



training_generator = DataGenerator(train_id)
valid_generator = DataGenerator(val_id)
test_generator = DataGenerator(test_id)

history =  effienet_unet.fit(training_generator,
                    epochs=30,
                    validation_data = valid_generator
                    )

effienet_unet.save('effienet_fc1_zxy.h5')

effienet_1c2 = tf.keras.models.load_model('effienet_fc1_zxy.h5',custom_objects={ 'accuracy': dice_test,"dice_test": dice_test}, compile=False)

def predictByPath(case_path,case):
    files = next(os.walk(case_path))[2]
    X = np.empty((1, IMG_SIZE, IMG_SIZE,3))
    vol_path = case_path + case + '_img.npy'
    img=np.load(vol_path)

    for j in range(1):
        X[j,:,:,0] = img[...,0]
        X[j,:,:,1] = img[...,1]
        X[j,:,:,2] = img[...,3]

    return effienet_1c2.predict(X, verbose=1)
def predictByPath_X(case_path,case):
    files = next(os.walk(case_path))[2]
    X = np.empty((VOLUME_SLICES, IMG_SIZE, IMG_SIZE, 1))
    y = np.empty((VOLUME_SLICES, IMG_SIZE, IMG_SIZE))
    vol_path = case_path + case + '_img.npy'
    img=np.load(vol_path)

    vol_path = case_path + case + '_seg.npy'
    seg=np.load(vol_path)
    for j in range(1):
        X[j,:,:,0] = img[...,0]
        y[j,...] = seg

    return X,y
import numpy as np
def showPredictsById(case, start_slice = 60):
    path = TRAIN_DATASET_PATH + case + '/'
    p = predictByPath(path,case)

    origImage,gt=predictByPath_X(path,case)

    plt.figure(figsize=(18, 50))
    f, axarr = plt.subplots(1,3, figsize = (18, 50))

    axarr[0].imshow(origImage[0,:,:], cmap="gray")
    axarr[0].title.set_text('Original image flair')
    axarr[1].imshow(gt[0,:,:], cmap="Reds", interpolation='none', alpha=0.3)
    axarr[1].title.set_text('Ground truth')
    axarr[2].imshow(np.round(p[0,:,]), cmap="OrRd", interpolation='none', alpha=0.3)
    axarr[2].title.set_text('predicted')
    plt.show()

showPredictsById(case="BraTS19_CBICA_ATN_1_93")

effienet_1c2.compile(loss=generalized_dice_loss, optimizer=tf.keras.optimizers.Adam(learning_rate=0.001), metrics = ['accuracy', dice_test] )
# Evaluate the model on the test data using `evaluate`
print("Evaluate on test data")
results = effienet_1c2.evaluate(test_generator, batch_size=100)
print("test loss, test acc:", results)

"""# **Efficientunet b4_fc1**
EffcientUnetB4搭配(flair,t1ce,t1)
"""

K.clear_session()
from tensorflow.keras.layers import Conv2D, BatchNormalization, Activation, MaxPool2D, Conv2DTranspose, Concatenate, Input
from tensorflow.keras.models import Model
from tensorflow.keras.applications import EfficientNetB4
from tensorflow.keras.layers import Dropout
import tensorflow as tf

print("TF Version: ", tf.__version__)

def conv_block(inputs, num_filters):
    x = Conv2D(num_filters, 3, padding="same")(inputs)
    x = Activation("relu")(x)

    x = Conv2D(num_filters, 3, padding="same")(x)
    x = Activation("relu")(x)

    return x

def decoder_block(inputs, skip, num_filters):
    x = Conv2DTranspose(num_filters, (2, 2), strides=2, padding="same")(inputs)
    skip = Conv2D(num_filters, 1, padding="same")(skip)
    #skip= Conv2DTranspose(num_filters, (2, 2), strides=2, padding="same")(skip)
    x = Concatenate()([x, skip])
    x = conv_block(x, num_filters)
    x = Add()([x, skip])
    return x
def decoder_block_f(inputs, skip, num_filters):

    x = Concatenate()([inputs, skip])
    x = conv_block(x, num_filters)
    return x
def decoder_block_l(inputs, num_filters):
    x = Conv2DTranspose(num_filters, (2, 2), strides=2, padding="same")(inputs)
    x = conv_block(x, num_filters)
    return x
def build_effienet_unet(input_shape):
    """ Input """
    inputs = Input(input_shape)

    """ Pre-trained Encoder """
    encoder = EfficientNetB4(include_top=False, weights="imagenet", input_tensor=inputs)
    s0 = encoder.get_layer("input_1").output
    s1 = encoder.get_layer("block1b_add").output                      ## 256
    s2 = encoder.get_layer("block2d_add").output    ## 128
    s3 = encoder.get_layer("block3d_add").output    ## 64
    s4 = encoder.get_layer("block5f_add").output    ## 32

    """ Bottleneck """
    #b1 = encoder.get_layer("top_activation").output    ## 16
    b1 = encoder.layers[-1].output
    """ Decoder """
    d1 = decoder_block(b1, s4, 512)                               ## 32
    d2 = decoder_block(d1, s3, 256)                               ## 64
    d3 = decoder_block(d2, s2, 128)                               ## 128
    d4 = decoder_block(d3, s1, 64)
    d5 = decoder_block(d4, s0, 32)
    """ Decoder """

    """ Output """
    outputs = Conv2D(1, 1, padding="same", activation="sigmoid")(d5)

    model = Model(inputs, outputs, name="EfficientNetB4_UNET")
    return model

input_shape = (128, 128, 3)
effienet_unet = build_effienet_unet(input_shape)
#effienet_unet.summary()
effienet_unet.compile(loss=generalized_dice_loss, optimizer=tf.keras.optimizers.Adam(learning_rate=0.0001), metrics = ['accuracy', dice_test] )

class DataGenerator(tf.keras.utils.Sequence):
    'Generates data for Keras'
    def __init__(self, list_IDs, dim=(IMG_SIZE,IMG_SIZE), batch_size = 64, n_channels = 3, shuffle=True):
        'Initialization'
        self.dim = dim
        self.batch_size = batch_size
        self.list_IDs = list_IDs
        self.n_channels = n_channels
        self.shuffle = shuffle
        self.on_epoch_end()

    def __len__(self):
        'Denotes the number of batches per epoch'
        return int(np.floor(len(self.list_IDs) / self.batch_size))

    def __getitem__(self, index):
        'Generate one batch of data'
        # Generate indexes of the batch
        indexes = self.indexes[index*self.batch_size:(index+1)*self.batch_size]

        # Find list of IDs
        Batch_ids = [self.list_IDs[k] for k in indexes]

        # Generate data
        X, y = self.__data_generation(Batch_ids)

        return X, y

    def on_epoch_end(self):
        'Updates indexes after each epoch'
        self.indexes = np.arange(len(self.list_IDs))
        if self.shuffle == True:
            np.random.shuffle(self.indexes)

    def __data_generation(self, Batch_ids):
        'Generates data containing batch_size samples' # X : (n_samples, *dim, n_channels)
        # Initialization
        X = np.zeros((self.batch_size, *self.dim, self.n_channels))
        y = np.zeros((self.batch_size, 128, 128))

        # Generate data
        for c, i in enumerate(Batch_ids):
            case_path = os.path.join(TRAIN_DATASET_PATH, i)

            data_path = os.path.join(case_path, f'{i}_img.npy')
            img = np.load(data_path)


            data_path = os.path.join(case_path, f'{i}_seg.npy')
            seg = np.load(data_path)

            X[c,:,:,0] = img[...,0]#flair
            X[c,:,:,1] = img[...,1]#t1ce
            X[c,:,:,2] = img[...,2]#t1

            y[c,:,:]=seg

        return X, y



training_generator = DataGenerator(train_id)
valid_generator = DataGenerator(val_id)
test_generator = DataGenerator(test_id)

history =  effienet_unet.fit(training_generator,
                    epochs=30,
                    validation_data = valid_generator
                    )

effienet_unet.save('effienet_fc1_zxy.h5')

effienet_1c2 = tf.keras.models.load_model('effienet_fc1_zxy.h5',custom_objects={ 'accuracy': dice_test,"dice_test": dice_test}, compile=False)

def predictByPath(case_path,case):
    files = next(os.walk(case_path))[2]
    X = np.empty((1, IMG_SIZE, IMG_SIZE,3))
    vol_path = case_path + case + '_img.npy'
    img=np.load(vol_path)

    for j in range(1):
        X[j,:,:,0] = img[...,0]
        X[j,:,:,1] = img[...,1]
        X[j,:,:,2] = img[...,2]

    return effienet_1c2.predict(X, verbose=1)
def predictByPath_X(case_path,case):
    files = next(os.walk(case_path))[2]
    X = np.empty((VOLUME_SLICES, IMG_SIZE, IMG_SIZE, 1))
    y = np.empty((VOLUME_SLICES, IMG_SIZE, IMG_SIZE))
    vol_path = case_path + case + '_img.npy'
    img=np.load(vol_path)

    vol_path = case_path + case + '_seg.npy'
    seg=np.load(vol_path)
    for j in range(1):
        X[j,:,:,0] = img[...,0]
        y[j,...] = seg

    return X,y
import numpy as np
def showPredictsById(case, start_slice = 60):
    path = TRAIN_DATASET_PATH + case + '/'
    p = predictByPath(path,case)

    origImage,gt=predictByPath_X(path,case)

    plt.figure(figsize=(18, 50))
    f, axarr = plt.subplots(1,3, figsize = (18, 50))

    axarr[0].imshow(origImage[0,:,:], cmap="gray")
    axarr[0].title.set_text('Original image flair')
    axarr[1].imshow(gt[0,:,:], cmap="Reds", interpolation='none', alpha=0.3)
    axarr[1].title.set_text('Ground truth')
    axarr[2].imshow(np.round(p[0,:,]), cmap="OrRd", interpolation='none', alpha=0.3)
    axarr[2].title.set_text('predicted')
    plt.show()

showPredictsById(case="BraTS19_CBICA_ATN_1_93")

effienet_1c2.compile(loss=generalized_dice_loss, optimizer=tf.keras.optimizers.Adam(learning_rate=0.001), metrics = ['accuracy', dice_test] )
# Evaluate the model on the test data using `evaluate`
print("Evaluate on test data")
results = effienet_1c2.evaluate(test_generator, batch_size=100)
print("test loss, test acc:", results)

"""# **Efficientunet b4_2c1**
EffcientUnetB4搭配(2,t1ce,t1)
"""

K.clear_session()
from tensorflow.keras.layers import Conv2D, BatchNormalization, Activation, MaxPool2D, Conv2DTranspose, Concatenate, Input
from tensorflow.keras.models import Model
from tensorflow.keras.applications import EfficientNetB4
from tensorflow.keras.layers import Dropout
import tensorflow as tf

print("TF Version: ", tf.__version__)

def conv_block(inputs, num_filters):
    x = Conv2D(num_filters, 3, padding="same")(inputs)
    x = Activation("relu")(x)

    x = Conv2D(num_filters, 3, padding="same")(x)
    x = Activation("relu")(x)

    return x

def decoder_block(inputs, skip, num_filters):
    x = Conv2DTranspose(num_filters, (2, 2), strides=2, padding="same")(inputs)
    skip = Conv2D(num_filters, 1, padding="same")(skip)
    #skip= Conv2DTranspose(num_filters, (2, 2), strides=2, padding="same")(skip)
    x = Concatenate()([x, skip])
    x = conv_block(x, num_filters)
    x = Add()([x, skip])
    return x
def decoder_block_f(inputs, skip, num_filters):

    x = Concatenate()([inputs, skip])
    x = conv_block(x, num_filters)
    return x
def decoder_block_l(inputs, num_filters):
    x = Conv2DTranspose(num_filters, (2, 2), strides=2, padding="same")(inputs)
    x = conv_block(x, num_filters)
    return x
def build_effienet_unet(input_shape):
    """ Input """
    inputs = Input(input_shape)

    """ Pre-trained Encoder """
    encoder = EfficientNetB4(include_top=False, weights="imagenet", input_tensor=inputs)
    s0 = encoder.get_layer("input_1").output
    s1 = encoder.get_layer("block1b_add").output                      ## 256
    s2 = encoder.get_layer("block2d_add").output    ## 128
    s3 = encoder.get_layer("block3d_add").output    ## 64
    s4 = encoder.get_layer("block5f_add").output    ## 32

    """ Bottleneck """
    #b1 = encoder.get_layer("top_activation").output    ## 16
    b1 = encoder.layers[-1].output
    """ Decoder """
    d1 = decoder_block(b1, s4, 512)                               ## 32
    d2 = decoder_block(d1, s3, 256)                               ## 64
    d3 = decoder_block(d2, s2, 128)                               ## 128
    d4 = decoder_block(d3, s1, 64)
    d5 = decoder_block(d4, s0, 32)
    """ Decoder """

    """ Output """
    outputs = Conv2D(1, 1, padding="same", activation="sigmoid")(d5)

    model = Model(inputs, outputs, name="EfficientNetB4_UNET")
    return model

input_shape = (128, 128, 3)
effienet_unet = build_effienet_unet(input_shape)
#effienet_unet.summary()
effienet_unet.compile(loss=generalized_dice_loss, optimizer=tf.keras.optimizers.Adam(learning_rate=0.0001), metrics = ['accuracy', dice_test] )

class DataGenerator(tf.keras.utils.Sequence):
    'Generates data for Keras'
    def __init__(self, list_IDs, dim=(IMG_SIZE,IMG_SIZE), batch_size = 64, n_channels = 3, shuffle=True):
        'Initialization'
        self.dim = dim
        self.batch_size = batch_size
        self.list_IDs = list_IDs
        self.n_channels = n_channels
        self.shuffle = shuffle
        self.on_epoch_end()

    def __len__(self):
        'Denotes the number of batches per epoch'
        return int(np.floor(len(self.list_IDs) / self.batch_size))

    def __getitem__(self, index):
        'Generate one batch of data'
        # Generate indexes of the batch
        indexes = self.indexes[index*self.batch_size:(index+1)*self.batch_size]

        # Find list of IDs
        Batch_ids = [self.list_IDs[k] for k in indexes]

        # Generate data
        X, y = self.__data_generation(Batch_ids)

        return X, y

    def on_epoch_end(self):
        'Updates indexes after each epoch'
        self.indexes = np.arange(len(self.list_IDs))
        if self.shuffle == True:
            np.random.shuffle(self.indexes)

    def __data_generation(self, Batch_ids):
        'Generates data containing batch_size samples' # X : (n_samples, *dim, n_channels)
        # Initialization
        X = np.zeros((self.batch_size, *self.dim, self.n_channels))
        y = np.zeros((self.batch_size, 128, 128))

        # Generate data
        for c, i in enumerate(Batch_ids):
            case_path = os.path.join(TRAIN_DATASET_PATH, i)

            data_path = os.path.join(case_path, f'{i}_img.npy')
            img = np.load(data_path)


            data_path = os.path.join(case_path, f'{i}_seg.npy')
            seg = np.load(data_path)

            X[c,:,:,0] = img[...,3]#t2
            X[c,:,:,1] = img[...,1]#t1ce
            X[c,:,:,2] = img[...,2]#t1

            y[c,:,:]=seg

        return X, y



training_generator = DataGenerator(train_id)
valid_generator = DataGenerator(val_id)
test_generator = DataGenerator(test_id)

history =  effienet_unet.fit(training_generator,
                    epochs=30,
                    validation_data = valid_generator
                    )

effienet_unet.save('effienet_fc1_zxy.h5')

effienet_1c2 = tf.keras.models.load_model('effienet_fc1_zxy.h5',custom_objects={ 'accuracy': dice_test,"dice_test": dice_test}, compile=False)

def predictByPath(case_path,case):
    files = next(os.walk(case_path))[2]
    X = np.empty((1, IMG_SIZE, IMG_SIZE,3))
    vol_path = case_path + case + '_img.npy'
    img=np.load(vol_path)

    for j in range(1):
        X[j,:,:,0] = img[...,3]
        X[j,:,:,1] = img[...,1]
        X[j,:,:,2] = img[...,2]

    return effienet_1c2.predict(X, verbose=1)
def predictByPath_X(case_path,case):
    files = next(os.walk(case_path))[2]
    X = np.empty((VOLUME_SLICES, IMG_SIZE, IMG_SIZE, 1))
    y = np.empty((VOLUME_SLICES, IMG_SIZE, IMG_SIZE))
    vol_path = case_path + case + '_img.npy'
    img=np.load(vol_path)

    vol_path = case_path + case + '_seg.npy'
    seg=np.load(vol_path)
    for j in range(1):
        X[j,:,:,0] = img[...,0]
        y[j,...] = seg

    return X,y
import numpy as np
def showPredictsById(case, start_slice = 60):
    path = TRAIN_DATASET_PATH + case + '/'
    p = predictByPath(path,case)

    origImage,gt=predictByPath_X(path,case)

    plt.figure(figsize=(18, 50))
    f, axarr = plt.subplots(1,3, figsize = (18, 50))

    axarr[0].imshow(origImage[0,:,:], cmap="gray")
    axarr[0].title.set_text('Original image flair')
    axarr[1].imshow(gt[0,:,:], cmap="Reds", interpolation='none', alpha=0.3)
    axarr[1].title.set_text('Ground truth')
    axarr[2].imshow(np.round(p[0,:,]), cmap="OrRd", interpolation='none', alpha=0.3)
    axarr[2].title.set_text('predicted')
    plt.show()

showPredictsById(case="BraTS19_CBICA_ATN_1_93")

effienet_1c2.compile(loss=generalized_dice_loss, optimizer=tf.keras.optimizers.Adam(learning_rate=0.001), metrics = ['accuracy', dice_test] )
# Evaluate the model on the test data using `evaluate`
print("Evaluate on test data")
results = effienet_1c2.evaluate(test_generator, batch_size=100)
print("test loss, test acc:", results)

"""# **Efficientunet b4_f12**
EffcientUnetB4搭配(flair,1,t2)
"""

K.clear_session()
from tensorflow.keras.layers import Conv2D, BatchNormalization, Activation, MaxPool2D, Conv2DTranspose, Concatenate, Input
from tensorflow.keras.models import Model
from tensorflow.keras.applications import EfficientNetB4
from tensorflow.keras.layers import Dropout
import tensorflow as tf

print("TF Version: ", tf.__version__)

def conv_block(inputs, num_filters):
    x = Conv2D(num_filters, 3, padding="same")(inputs)
    x = Activation("relu")(x)

    x = Conv2D(num_filters, 3, padding="same")(x)
    x = Activation("relu")(x)

    return x

def decoder_block(inputs, skip, num_filters):
    x = Conv2DTranspose(num_filters, (2, 2), strides=2, padding="same")(inputs)
    skip = Conv2D(num_filters, 1, padding="same")(skip)
    #skip= Conv2DTranspose(num_filters, (2, 2), strides=2, padding="same")(skip)
    x = Concatenate()([x, skip])
    x = conv_block(x, num_filters)
    x = Add()([x, skip])
    return x
def decoder_block_f(inputs, skip, num_filters):

    x = Concatenate()([inputs, skip])
    x = conv_block(x, num_filters)
    return x
def decoder_block_l(inputs, num_filters):
    x = Conv2DTranspose(num_filters, (2, 2), strides=2, padding="same")(inputs)
    x = conv_block(x, num_filters)
    return x
def build_effienet_unet(input_shape):
    """ Input """
    inputs = Input(input_shape)

    """ Pre-trained Encoder """
    encoder = EfficientNetB4(include_top=False, weights="imagenet", input_tensor=inputs)
    s0 = encoder.get_layer("input_1").output
    s1 = encoder.get_layer("block1b_add").output                      ## 256
    s2 = encoder.get_layer("block2d_add").output    ## 128
    s3 = encoder.get_layer("block3d_add").output    ## 64
    s4 = encoder.get_layer("block5f_add").output    ## 32

    """ Bottleneck """
    #b1 = encoder.get_layer("top_activation").output    ## 16
    b1 = encoder.layers[-1].output
    """ Decoder """
    d1 = decoder_block(b1, s4, 512)                               ## 32
    d2 = decoder_block(d1, s3, 256)                               ## 64
    d3 = decoder_block(d2, s2, 128)                               ## 128
    d4 = decoder_block(d3, s1, 64)
    d5 = decoder_block(d4, s0, 32)
    """ Decoder """

    """ Output """
    outputs = Conv2D(1, 1, padding="same", activation="sigmoid")(d5)

    model = Model(inputs, outputs, name="EfficientNetB4_UNET")
    return model

input_shape = (128, 128, 3)
effienet_unet = build_effienet_unet(input_shape)
#effienet_unet.summary()
effienet_unet.compile(loss=generalized_dice_loss, optimizer=tf.keras.optimizers.Adam(learning_rate=0.0001), metrics = ['accuracy', dice_test] )

class DataGenerator(tf.keras.utils.Sequence):
    'Generates data for Keras'
    def __init__(self, list_IDs, dim=(IMG_SIZE,IMG_SIZE), batch_size = 64, n_channels = 3, shuffle=True):
        'Initialization'
        self.dim = dim
        self.batch_size = batch_size
        self.list_IDs = list_IDs
        self.n_channels = n_channels
        self.shuffle = shuffle
        self.on_epoch_end()

    def __len__(self):
        'Denotes the number of batches per epoch'
        return int(np.floor(len(self.list_IDs) / self.batch_size))

    def __getitem__(self, index):
        'Generate one batch of data'
        # Generate indexes of the batch
        indexes = self.indexes[index*self.batch_size:(index+1)*self.batch_size]

        # Find list of IDs
        Batch_ids = [self.list_IDs[k] for k in indexes]

        # Generate data
        X, y = self.__data_generation(Batch_ids)

        return X, y

    def on_epoch_end(self):
        'Updates indexes after each epoch'
        self.indexes = np.arange(len(self.list_IDs))
        if self.shuffle == True:
            np.random.shuffle(self.indexes)

    def __data_generation(self, Batch_ids):
        'Generates data containing batch_size samples' # X : (n_samples, *dim, n_channels)
        # Initialization
        X = np.zeros((self.batch_size, *self.dim, self.n_channels))
        y = np.zeros((self.batch_size, 128, 128))

        # Generate data
        for c, i in enumerate(Batch_ids):
            case_path = os.path.join(TRAIN_DATASET_PATH, i)

            data_path = os.path.join(case_path, f'{i}_img.npy')
            img = np.load(data_path)


            data_path = os.path.join(case_path, f'{i}_seg.npy')
            seg = np.load(data_path)

            X[c,:,:,0] = img[...,0]#flair
            X[c,:,:,1] = img[...,2]#t1
            X[c,:,:,2] = img[...,3]#t2

            y[c,:,:]=seg

        return X, y



training_generator = DataGenerator(train_id)
valid_generator = DataGenerator(val_id)
test_generator = DataGenerator(test_id)

history =  effienet_unet.fit(training_generator,
                    epochs=30,
                    validation_data = valid_generator
                    )

effienet_unet.save('effienet_fc1_zxy.h5')

effienet_1c2 = tf.keras.models.load_model('effienet_fc1_zxy.h5',custom_objects={ 'accuracy': dice_test,"dice_test": dice_test}, compile=False)

def predictByPath(case_path,case):
    files = next(os.walk(case_path))[2]
    X = np.empty((1, IMG_SIZE, IMG_SIZE,3))
    vol_path = case_path + case + '_img.npy'
    img=np.load(vol_path)

    for j in range(1):
        X[j,:,:,0] = img[...,0]
        X[j,:,:,1] = img[...,2]
        X[j,:,:,2] = img[...,3]

    return effienet_1c2.predict(X, verbose=1)
def predictByPath_X(case_path,case):
    files = next(os.walk(case_path))[2]
    X = np.empty((VOLUME_SLICES, IMG_SIZE, IMG_SIZE, 1))
    y = np.empty((VOLUME_SLICES, IMG_SIZE, IMG_SIZE))
    vol_path = case_path + case + '_img.npy'
    img=np.load(vol_path)

    vol_path = case_path + case + '_seg.npy'
    seg=np.load(vol_path)
    for j in range(1):
        X[j,:,:,0] = img[...,0]
        y[j,...] = seg

    return X,y
import numpy as np
def showPredictsById(case, start_slice = 60):
    path = TRAIN_DATASET_PATH + case + '/'
    p = predictByPath(path,case)

    origImage,gt=predictByPath_X(path,case)

    plt.figure(figsize=(18, 50))
    f, axarr = plt.subplots(1,3, figsize = (18, 50))

    axarr[0].imshow(origImage[0,:,:], cmap="gray")
    axarr[0].title.set_text('Original image flair')
    axarr[1].imshow(gt[0,:,:], cmap="Reds", interpolation='none', alpha=0.3)
    axarr[1].title.set_text('Ground truth')
    axarr[2].imshow(np.round(p[0,:,]), cmap="OrRd", interpolation='none', alpha=0.3)
    axarr[2].title.set_text('predicted')
    plt.show()

showPredictsById(case="BraTS19_CBICA_ATN_1_93")

effienet_1c2.compile(loss=generalized_dice_loss, optimizer=tf.keras.optimizers.Adam(learning_rate=0.001), metrics = ['accuracy', dice_test] )
# Evaluate the model on the test data using `evaluate`
print("Evaluate on test data")
results = effienet_1c2.evaluate(test_generator, batch_size=100)
print("test loss, test acc:", results)

"""# **EffcientUnet_fc2_attention_gate**
EffcientUnet搭配(flair,t1ce,t2)並加入attention_gate
"""

K.clear_session()
from tensorflow.keras.layers import Conv2D, BatchNormalization, Activation, MaxPool2D, Conv2DTranspose, Concatenate, Input
from tensorflow.keras.models import Model
from tensorflow.keras.applications import EfficientNetV2S
from tensorflow.keras.layers import Dropout
from tensorflow.keras.layers import Add
import tensorflow as tf

print("TF Version: ", tf.__version__)
def attention_gate(input_x, g):
    theta_x = Conv2D(filters=input_x.shape[-1], kernel_size=(1, 1), strides=(1, 1), padding="same")(input_x)
    phi_g = Conv2D(filters=input_x.shape[-1], kernel_size=(1, 1), strides=(1, 1), padding="same")(g)
    f = Activation("relu")(Add()([theta_x, phi_g]))
    psi = Conv2D(filters=1, kernel_size=(1, 1), strides=(1, 1), padding="same")(f)
    attention = Activation("sigmoid")(psi)
    return Multiply()([input_x, attention])
def conv_block(inputs, num_filters):
    x = Conv2D(num_filters, 3, padding="same")(inputs)
    x = BatchNormalization()(x)
    x = Activation("relu")(x)

    x = Conv2D(num_filters, 3, padding="same")(x)
    x = BatchNormalization()(x)
    x = Activation("relu")(x)

    return x
def decoder_block_with_attention_and_residual(inputs, skip, num_filters):
    x = Conv2DTranspose(num_filters, (2, 2), strides=2, padding="same")(inputs)
    x = Concatenate()([x, skip])
    x = conv_block(x, num_filters)

    # 添加Attention Gate
    attention_output = attention_gate(x, skip)

    # 添加Residual Connection
    x = Add()([x, attention_output])

    return x
def decoder_block(inputs, skip, num_filters):
    x = Conv2DTranspose(num_filters, (2, 2), strides=2, padding="same")(inputs)
    #skip= Conv2DTranspose(num_filters, (2, 2), strides=2, padding="same")(skip)
    x = Concatenate()([x, skip])
    x = conv_block(x, num_filters)
    return x

def build_effienet_unet(input_shape):
    """ Input """
    inputs = Input(input_shape)

    """ Pre-trained Encoder """
    encoder = EfficientNetV2S(include_top=False, weights="imagenet", input_tensor=inputs)
    s0 = encoder.get_layer("input_1").output
    s1 = encoder.get_layer("block1b_add").output                      ## 256
    s2 = encoder.get_layer("block2d_add").output    ## 128
    s3 = encoder.get_layer("block3d_add").output    ## 64
    s4 = encoder.get_layer("block5i_add").output    ## 32

    """ Bottleneck """
    #b1 = encoder.get_layer("top_activation").output    ## 16
    b1 = encoder.layers[-1].output
    """ Decoder """
    d1 = decoder_block_with_attention_and_residual(b1, s4, 512)                               ## 32
    d2 = decoder_block_with_attention_and_residual(d1, s3, 256)                               ## 64
    d3 = decoder_block_with_attention_and_residual(d2, s2, 128)                               ## 128
    d4 = decoder_block_with_attention_and_residual(d3, s1, 64)
    d5 = decoder_block_with_attention_and_residual(d4, s0, 32)
    """ Decoder """

    """ Output """
    outputs = Conv2D(1, 1, padding="same", activation="sigmoid")(d5)

    model = Model(inputs, outputs, name="EfficientNetV2S_UNET")
    return model

input_shape = (128, 128, 3)
effienet_1f2 = build_effienet_unet(input_shape)
#effienet_unet.summary()
effienet_1f2.compile(loss=generalized_dice_loss, optimizer=tf.keras.optimizers.Adam(learning_rate=0.0001), metrics = ['accuracy', dice_test] )

class DataGenerator(tf.keras.utils.Sequence):
    'Generates data for Keras'
    def __init__(self, list_IDs, dim=(IMG_SIZE,IMG_SIZE), batch_size = 64, n_channels = 3, shuffle=True):
        'Initialization'
        self.dim = dim
        self.batch_size = batch_size
        self.list_IDs = list_IDs
        self.n_channels = n_channels
        self.shuffle = shuffle
        self.on_epoch_end()

    def __len__(self):
        'Denotes the number of batches per epoch'
        return int(np.floor(len(self.list_IDs) / self.batch_size))

    def __getitem__(self, index):
        'Generate one batch of data'
        # Generate indexes of the batch
        indexes = self.indexes[index*self.batch_size:(index+1)*self.batch_size]

        # Find list of IDs
        Batch_ids = [self.list_IDs[k] for k in indexes]

        # Generate data
        X, y = self.__data_generation(Batch_ids)

        return X, y

    def on_epoch_end(self):
        'Updates indexes after each epoch'
        self.indexes = np.arange(len(self.list_IDs))
        if self.shuffle == True:
            np.random.shuffle(self.indexes)

    def __data_generation(self, Batch_ids):
        'Generates data containing batch_size samples' # X : (n_samples, *dim, n_channels)
        # Initialization
        X = np.zeros((self.batch_size, *self.dim, self.n_channels))
        y = np.zeros((self.batch_size, 128, 128))

        # Generate data
        for c, i in enumerate(Batch_ids):
            case_path = os.path.join(TRAIN_DATASET_PATH, i)

            data_path = os.path.join(case_path, f'{i}_img.npy')
            img = np.load(data_path)


            data_path = os.path.join(case_path, f'{i}_seg.npy')
            seg = np.load(data_path)

            X[c,:,:,0] = img[...,0]#flair
            X[c,:,:,1] = img[...,1]#t1ce
            X[c,:,:,2] = img[...,3]#t2

            y[c,:,:]=seg

        return X, y



training_generator = DataGenerator(train_id)
valid_generator = DataGenerator(val_id)
test_generator = DataGenerator(test_id)

history =  effienet_1f2.fit(training_generator,
                    epochs=30,
                    validation_data = valid_generator
                    )

effienet_1f2.save('effienet_fc2_zxy.h5')

effienet_1c2 = tf.keras.models.load_model('effienet_fc2_zxy.h5',custom_objects={ 'accuracy': dice_test,"dice_test": dice_test}, compile=False)

def predictByPath(case_path,case):
    files = next(os.walk(case_path))[2]
    X = np.empty((1, IMG_SIZE, IMG_SIZE,3))
    vol_path = case_path + case + '_img.npy'
    img=np.load(vol_path)

    for j in range(1):
        X[j,:,:,0] = img[...,0]
        X[j,:,:,1] = img[...,1]
        X[j,:,:,2] = img[...,3]

    return effienet_1c2.predict(X, verbose=1)
def predictByPath_X(case_path,case):
    files = next(os.walk(case_path))[2]
    X = np.empty((VOLUME_SLICES, IMG_SIZE, IMG_SIZE, 1))
    y = np.empty((VOLUME_SLICES, IMG_SIZE, IMG_SIZE))
    vol_path = case_path + case + '_img.npy'
    img=np.load(vol_path)

    vol_path = case_path + case + '_seg.npy'
    seg=np.load(vol_path)
    for j in range(1):
        X[j,:,:,0] = img[...,0]
        y[j,...] = seg

    return X,y
import numpy as np
def showPredictsById(case, start_slice = 60):
    path = TRAIN_DATASET_PATH + case + '/'
    p = predictByPath(path,case)

    origImage,gt=predictByPath_X(path,case)

    plt.figure(figsize=(18, 50))
    f, axarr = plt.subplots(1,3, figsize = (18, 50))

    axarr[0].imshow(origImage[0,:,:], cmap="gray")
    axarr[0].title.set_text('Original image flair')
    axarr[1].imshow(gt[0,:,:], cmap="Reds", interpolation='none', alpha=0.3)
    axarr[1].title.set_text('Ground truth')
    axarr[2].imshow(np.round(p[0,:,]), cmap="OrRd", interpolation='none', alpha=0.3)
    axarr[2].title.set_text('predicted')
    plt.show()

showPredictsById(case="BraTS19_CBICA_ATN_1_93")

effienet_1c2.compile(loss=generalized_dice_loss, optimizer=tf.keras.optimizers.Adam(learning_rate=0.0001), metrics = ['accuracy', dice_test] )
# Evaluate the model on the test data using `evaluate`
print("Evaluate on test data")
results = effienet_1c2.evaluate(test_generator, batch_size=100)
print("test loss, test acc:", results)

"""# **EffcientUnet_fc1_attention_gate**
EffcientUnet搭配(flair,t1ce,t1)並加入attention_gate
"""

K.clear_session()
from tensorflow.keras.layers import Conv2D, BatchNormalization, Activation, MaxPool2D, Conv2DTranspose, Concatenate, Input
from tensorflow.keras.models import Model
from tensorflow.keras.applications import EfficientNetV2S
from tensorflow.keras.layers import Dropout
from tensorflow.keras.layers import Add
import tensorflow as tf

print("TF Version: ", tf.__version__)
def attention_gate(input_x, g):
    theta_x = Conv2D(filters=input_x.shape[-1], kernel_size=(1, 1), strides=(1, 1), padding="same")(input_x)
    phi_g = Conv2D(filters=input_x.shape[-1], kernel_size=(1, 1), strides=(1, 1), padding="same")(g)
    f = Activation("relu")(Add()([theta_x, phi_g]))
    psi = Conv2D(filters=1, kernel_size=(1, 1), strides=(1, 1), padding="same")(f)
    attention = Activation("sigmoid")(psi)
    return Multiply()([input_x, attention])
def conv_block(inputs, num_filters):
    x = Conv2D(num_filters, 3, padding="same")(inputs)
    x = BatchNormalization()(x)
    x = Activation("relu")(x)

    x = Conv2D(num_filters, 3, padding="same")(x)
    x = BatchNormalization()(x)
    x = Activation("relu")(x)

    return x
def decoder_block_with_attention_and_residual(inputs, skip, num_filters):
    x = Conv2DTranspose(num_filters, (2, 2), strides=2, padding="same")(inputs)
    x = Concatenate()([x, skip])
    x = conv_block(x, num_filters)

    # 添加Attention Gate
    attention_output = attention_gate(x, skip)

    # 添加Residual Connection
    x = Add()([x, attention_output])

    return x
def decoder_block(inputs, skip, num_filters):
    x = Conv2DTranspose(num_filters, (2, 2), strides=2, padding="same")(inputs)
    #skip= Conv2DTranspose(num_filters, (2, 2), strides=2, padding="same")(skip)
    x = Concatenate()([x, skip])
    x = conv_block(x, num_filters)
    return x

def build_effienet_unet(input_shape):
    """ Input """
    inputs = Input(input_shape)

    """ Pre-trained Encoder """
    encoder = EfficientNetV2S(include_top=False, weights="imagenet", input_tensor=inputs)
    s0 = encoder.get_layer("input_1").output
    s1 = encoder.get_layer("block1b_add").output                      ## 256
    s2 = encoder.get_layer("block2d_add").output    ## 128
    s3 = encoder.get_layer("block3d_add").output    ## 64
    s4 = encoder.get_layer("block5i_add").output    ## 32

    """ Bottleneck """
    #b1 = encoder.get_layer("top_activation").output    ## 16
    b1 = encoder.layers[-1].output
    """ Decoder """
    d1 = decoder_block_with_attention_and_residual(b1, s4, 512)                               ## 32
    d2 = decoder_block_with_attention_and_residual(d1, s3, 256)                               ## 64
    d3 = decoder_block_with_attention_and_residual(d2, s2, 128)                               ## 128
    d4 = decoder_block_with_attention_and_residual(d3, s1, 64)
    d5 = decoder_block_with_attention_and_residual(d4, s0, 32)
    """ Decoder """

    """ Output """
    outputs = Conv2D(1, 1, padding="same", activation="sigmoid")(d5)

    model = Model(inputs, outputs, name="EfficientNetV2S_UNET")
    return model

input_shape = (128, 128, 3)
effienet_1f2 = build_effienet_unet(input_shape)
#effienet_unet.summary()
effienet_1f2.compile(loss=generalized_dice_loss, optimizer=tf.keras.optimizers.Adam(learning_rate=0.0001), metrics = ['accuracy', dice_test] )

class DataGenerator(tf.keras.utils.Sequence):
    'Generates data for Keras'
    def __init__(self, list_IDs, dim=(IMG_SIZE,IMG_SIZE), batch_size = 64, n_channels = 3, shuffle=True):
        'Initialization'
        self.dim = dim
        self.batch_size = batch_size
        self.list_IDs = list_IDs
        self.n_channels = n_channels
        self.shuffle = shuffle
        self.on_epoch_end()

    def __len__(self):
        'Denotes the number of batches per epoch'
        return int(np.floor(len(self.list_IDs) / self.batch_size))

    def __getitem__(self, index):
        'Generate one batch of data'
        # Generate indexes of the batch
        indexes = self.indexes[index*self.batch_size:(index+1)*self.batch_size]

        # Find list of IDs
        Batch_ids = [self.list_IDs[k] for k in indexes]

        # Generate data
        X, y = self.__data_generation(Batch_ids)

        return X, y

    def on_epoch_end(self):
        'Updates indexes after each epoch'
        self.indexes = np.arange(len(self.list_IDs))
        if self.shuffle == True:
            np.random.shuffle(self.indexes)

    def __data_generation(self, Batch_ids):
        'Generates data containing batch_size samples' # X : (n_samples, *dim, n_channels)
        # Initialization
        X = np.zeros((self.batch_size, *self.dim, self.n_channels))
        y = np.zeros((self.batch_size, 128, 128))

        # Generate data
        for c, i in enumerate(Batch_ids):
            case_path = os.path.join(TRAIN_DATASET_PATH, i)

            data_path = os.path.join(case_path, f'{i}_img.npy')
            img = np.load(data_path)


            data_path = os.path.join(case_path, f'{i}_seg.npy')
            seg = np.load(data_path)

            X[c,:,:,0] = img[...,0]#flair
            X[c,:,:,1] = img[...,1]#t1ce
            X[c,:,:,2] = img[...,2]#t1

            y[c,:,:]=seg

        return X, y



training_generator = DataGenerator(train_id)
valid_generator = DataGenerator(val_id)
test_generator = DataGenerator(test_id)

history =  effienet_1f2.fit(training_generator,
                    epochs=30,
                    validation_data = valid_generator
                    )

effienet_1f2.save('effienet_fc1_zxy.h5')

effienet_1c2 = tf.keras.models.load_model('effienet_fc1_zxy.h5',custom_objects={ 'accuracy': dice_test,"dice_test": dice_test}, compile=False)

def predictByPath(case_path,case):
    files = next(os.walk(case_path))[2]
    X = np.empty((1, IMG_SIZE, IMG_SIZE,3))
    vol_path = case_path + case + '_img.npy'
    img=np.load(vol_path)

    for j in range(1):
        X[j,:,:,0] = img[...,0]
        X[j,:,:,1] = img[...,1]
        X[j,:,:,2] = img[...,2]

    return effienet_1c2.predict(X, verbose=1)
def predictByPath_X(case_path,case):
    files = next(os.walk(case_path))[2]
    X = np.empty((VOLUME_SLICES, IMG_SIZE, IMG_SIZE, 1))
    y = np.empty((VOLUME_SLICES, IMG_SIZE, IMG_SIZE))
    vol_path = case_path + case + '_img.npy'
    img=np.load(vol_path)

    vol_path = case_path + case + '_seg.npy'
    seg=np.load(vol_path)
    for j in range(1):
        X[j,:,:,0] = img[...,0]
        y[j,...] = seg

    return X,y
import numpy as np
def showPredictsById(case, start_slice = 60):
    path = TRAIN_DATASET_PATH + case + '/'
    p = predictByPath(path,case)

    origImage,gt=predictByPath_X(path,case)

    plt.figure(figsize=(18, 50))
    f, axarr = plt.subplots(1,3, figsize = (18, 50))

    axarr[0].imshow(origImage[0,:,:], cmap="gray")
    axarr[0].title.set_text('Original image flair')
    axarr[1].imshow(gt[0,:,:], cmap="Reds", interpolation='none', alpha=0.3)
    axarr[1].title.set_text('Ground truth')
    axarr[2].imshow(np.round(p[0,:,]), cmap="OrRd", interpolation='none', alpha=0.3)
    axarr[2].title.set_text('predicted')
    plt.show()

showPredictsById(case="BraTS19_CBICA_ATN_1_93")

effienet_1c2.compile(loss=generalized_dice_loss, optimizer=tf.keras.optimizers.Adam(learning_rate=0.0001), metrics = ['accuracy', dice_test] )
# Evaluate the model on the test data using `evaluate`
print("Evaluate on test data")
results = effienet_1c2.evaluate(test_generator, batch_size=100)
print("test loss, test acc:", results)

"""# **EffcientUnet_2c1_attention_gate**
EffcientUnet搭配(t2,t1ce,t1)並加入attention_gate
"""

K.clear_session()
from tensorflow.keras.layers import Conv2D, BatchNormalization, Activation, MaxPool2D, Conv2DTranspose, Concatenate, Input
from tensorflow.keras.models import Model
from tensorflow.keras.applications import EfficientNetV2S
from tensorflow.keras.layers import Dropout
from tensorflow.keras.layers import Add
import tensorflow as tf

print("TF Version: ", tf.__version__)
def attention_gate(input_x, g):
    theta_x = Conv2D(filters=input_x.shape[-1], kernel_size=(1, 1), strides=(1, 1), padding="same")(input_x)
    phi_g = Conv2D(filters=input_x.shape[-1], kernel_size=(1, 1), strides=(1, 1), padding="same")(g)
    f = Activation("relu")(Add()([theta_x, phi_g]))
    psi = Conv2D(filters=1, kernel_size=(1, 1), strides=(1, 1), padding="same")(f)
    attention = Activation("sigmoid")(psi)
    return Multiply()([input_x, attention])
def conv_block(inputs, num_filters):
    x = Conv2D(num_filters, 3, padding="same")(inputs)
    x = BatchNormalization()(x)
    x = Activation("relu")(x)

    x = Conv2D(num_filters, 3, padding="same")(x)
    x = BatchNormalization()(x)
    x = Activation("relu")(x)

    return x
def decoder_block_with_attention_and_residual(inputs, skip, num_filters):
    x = Conv2DTranspose(num_filters, (2, 2), strides=2, padding="same")(inputs)
    x = Concatenate()([x, skip])
    x = conv_block(x, num_filters)

    # 添加Attention Gate
    attention_output = attention_gate(x, skip)

    # 添加Residual Connection
    x = Add()([x, attention_output])

    return x
def decoder_block(inputs, skip, num_filters):
    x = Conv2DTranspose(num_filters, (2, 2), strides=2, padding="same")(inputs)
    #skip= Conv2DTranspose(num_filters, (2, 2), strides=2, padding="same")(skip)
    x = Concatenate()([x, skip])
    x = conv_block(x, num_filters)
    return x

def build_effienet_unet(input_shape):
    """ Input """
    inputs = Input(input_shape)

    """ Pre-trained Encoder """
    encoder = EfficientNetV2S(include_top=False, weights="imagenet", input_tensor=inputs)
    s0 = encoder.get_layer("input_1").output
    s1 = encoder.get_layer("block1b_add").output                      ## 256
    s2 = encoder.get_layer("block2d_add").output    ## 128
    s3 = encoder.get_layer("block3d_add").output    ## 64
    s4 = encoder.get_layer("block5i_add").output    ## 32

    """ Bottleneck """
    #b1 = encoder.get_layer("top_activation").output    ## 16
    b1 = encoder.layers[-1].output
    """ Decoder """
    d1 = decoder_block_with_attention_and_residual(b1, s4, 512)                               ## 32
    d2 = decoder_block_with_attention_and_residual(d1, s3, 256)                               ## 64
    d3 = decoder_block_with_attention_and_residual(d2, s2, 128)                               ## 128
    d4 = decoder_block_with_attention_and_residual(d3, s1, 64)
    d5 = decoder_block_with_attention_and_residual(d4, s0, 32)
    """ Decoder """

    """ Output """
    outputs = Conv2D(1, 1, padding="same", activation="sigmoid")(d5)

    model = Model(inputs, outputs, name="EfficientNetV2S_UNET")
    return model

input_shape = (128, 128, 3)
effienet_1f2 = build_effienet_unet(input_shape)
#effienet_unet.summary()
effienet_1f2.compile(loss=generalized_dice_loss, optimizer=tf.keras.optimizers.Adam(learning_rate=0.0001), metrics = ['accuracy', dice_test] )

class DataGenerator(tf.keras.utils.Sequence):
    'Generates data for Keras'
    def __init__(self, list_IDs, dim=(IMG_SIZE,IMG_SIZE), batch_size = 64, n_channels = 3, shuffle=True):
        'Initialization'
        self.dim = dim
        self.batch_size = batch_size
        self.list_IDs = list_IDs
        self.n_channels = n_channels
        self.shuffle = shuffle
        self.on_epoch_end()

    def __len__(self):
        'Denotes the number of batches per epoch'
        return int(np.floor(len(self.list_IDs) / self.batch_size))

    def __getitem__(self, index):
        'Generate one batch of data'
        # Generate indexes of the batch
        indexes = self.indexes[index*self.batch_size:(index+1)*self.batch_size]

        # Find list of IDs
        Batch_ids = [self.list_IDs[k] for k in indexes]

        # Generate data
        X, y = self.__data_generation(Batch_ids)

        return X, y

    def on_epoch_end(self):
        'Updates indexes after each epoch'
        self.indexes = np.arange(len(self.list_IDs))
        if self.shuffle == True:
            np.random.shuffle(self.indexes)

    def __data_generation(self, Batch_ids):
        'Generates data containing batch_size samples' # X : (n_samples, *dim, n_channels)
        # Initialization
        X = np.zeros((self.batch_size, *self.dim, self.n_channels))
        y = np.zeros((self.batch_size, 128, 128))

        # Generate data
        for c, i in enumerate(Batch_ids):
            case_path = os.path.join(TRAIN_DATASET_PATH, i)

            data_path = os.path.join(case_path, f'{i}_img.npy')
            img = np.load(data_path)


            data_path = os.path.join(case_path, f'{i}_seg.npy')
            seg = np.load(data_path)

            X[c,:,:,0] = img[...,3]#t2
            X[c,:,:,1] = img[...,1]#t1ce
            X[c,:,:,2] = img[...,2]#t1

            y[c,:,:]=seg

        return X, y



training_generator = DataGenerator(train_id)
valid_generator = DataGenerator(val_id)
test_generator = DataGenerator(test_id)

history =  effienet_1f2.fit(training_generator,
                    epochs=30,
                    validation_data = valid_generator
                    )

effienet_1f2.save('effienet_2c1_zxy.h5')

effienet_1c2 = tf.keras.models.load_model('effienet_2c1_zxy.h5',custom_objects={ 'accuracy': dice_test,"dice_test": dice_test}, compile=False)

def predictByPath(case_path,case):
    files = next(os.walk(case_path))[2]
    X = np.empty((1, IMG_SIZE, IMG_SIZE,3))
    vol_path = case_path + case + '_img.npy'
    img=np.load(vol_path)

    for j in range(1):
        X[j,:,:,0] = img[...,3]
        X[j,:,:,1] = img[...,1]
        X[j,:,:,2] = img[...,2]

    return effienet_1c2.predict(X, verbose=1)
def predictByPath_X(case_path,case):
    files = next(os.walk(case_path))[2]
    X = np.empty((VOLUME_SLICES, IMG_SIZE, IMG_SIZE, 1))
    y = np.empty((VOLUME_SLICES, IMG_SIZE, IMG_SIZE))
    vol_path = case_path + case + '_img.npy'
    img=np.load(vol_path)

    vol_path = case_path + case + '_seg.npy'
    seg=np.load(vol_path)
    for j in range(1):
        X[j,:,:,0] = img[...,0]
        y[j,...] = seg

    return X,y
import numpy as np
def showPredictsById(case, start_slice = 60):
    path = TRAIN_DATASET_PATH + case + '/'
    p = predictByPath(path,case)

    origImage,gt=predictByPath_X(path,case)

    plt.figure(figsize=(18, 50))
    f, axarr = plt.subplots(1,3, figsize = (18, 50))

    axarr[0].imshow(origImage[0,:,:], cmap="gray")
    axarr[0].title.set_text('Original image flair')
    axarr[1].imshow(gt[0,:,:], cmap="Reds", interpolation='none', alpha=0.3)
    axarr[1].title.set_text('Ground truth')
    axarr[2].imshow(np.round(p[0,:,]), cmap="OrRd", interpolation='none', alpha=0.3)
    axarr[2].title.set_text('predicted')
    plt.show()

showPredictsById(case="BraTS19_CBICA_ATN_1_93")

effienet_1c2.compile(loss=generalized_dice_loss, optimizer=tf.keras.optimizers.Adam(learning_rate=0.0001), metrics = ['accuracy', dice_test] )
# Evaluate the model on the test data using `evaluate`
print("Evaluate on test data")
results = effienet_1c2.evaluate(test_generator, batch_size=100)
print("test loss, test acc:", results)

"""# **EffcientUnet_f12_attention_gate**
EffcientUnet搭配(flair,t1,t2)並加入attention_gate
"""

K.clear_session()
from tensorflow.keras.layers import Conv2D, BatchNormalization, Activation, MaxPool2D, Conv2DTranspose, Concatenate, Input
from tensorflow.keras.models import Model
from tensorflow.keras.applications import EfficientNetV2S
from tensorflow.keras.layers import Dropout
from tensorflow.keras.layers import Add
import tensorflow as tf

print("TF Version: ", tf.__version__)
def attention_gate(input_x, g):
    theta_x = Conv2D(filters=input_x.shape[-1], kernel_size=(1, 1), strides=(1, 1), padding="same")(input_x)
    phi_g = Conv2D(filters=input_x.shape[-1], kernel_size=(1, 1), strides=(1, 1), padding="same")(g)
    f = Activation("relu")(Add()([theta_x, phi_g]))
    psi = Conv2D(filters=1, kernel_size=(1, 1), strides=(1, 1), padding="same")(f)
    attention = Activation("sigmoid")(psi)
    return Multiply()([input_x, attention])
def conv_block(inputs, num_filters):
    x = Conv2D(num_filters, 3, padding="same")(inputs)
    x = BatchNormalization()(x)
    x = Activation("relu")(x)

    x = Conv2D(num_filters, 3, padding="same")(x)
    x = BatchNormalization()(x)
    x = Activation("relu")(x)

    return x
def decoder_block_with_attention_and_residual(inputs, skip, num_filters):
    x = Conv2DTranspose(num_filters, (2, 2), strides=2, padding="same")(inputs)
    x = Concatenate()([x, skip])
    x = conv_block(x, num_filters)

    # 添加Attention Gate
    attention_output = attention_gate(x, skip)

    # 添加Residual Connection
    x = Add()([x, attention_output])

    return x
def decoder_block(inputs, skip, num_filters):
    x = Conv2DTranspose(num_filters, (2, 2), strides=2, padding="same")(inputs)
    #skip= Conv2DTranspose(num_filters, (2, 2), strides=2, padding="same")(skip)
    x = Concatenate()([x, skip])
    x = conv_block(x, num_filters)
    return x

def build_effienet_unet(input_shape):
    """ Input """
    inputs = Input(input_shape)

    """ Pre-trained Encoder """
    encoder = EfficientNetV2S(include_top=False, weights="imagenet", input_tensor=inputs)
    s0 = encoder.get_layer("input_1").output
    s1 = encoder.get_layer("block1b_add").output                      ## 256
    s2 = encoder.get_layer("block2d_add").output    ## 128
    s3 = encoder.get_layer("block3d_add").output    ## 64
    s4 = encoder.get_layer("block5i_add").output    ## 32

    """ Bottleneck """
    #b1 = encoder.get_layer("top_activation").output    ## 16
    b1 = encoder.layers[-1].output
    """ Decoder """
    d1 = decoder_block_with_attention_and_residual(b1, s4, 512)                               ## 32
    d2 = decoder_block_with_attention_and_residual(d1, s3, 256)                               ## 64
    d3 = decoder_block_with_attention_and_residual(d2, s2, 128)                               ## 128
    d4 = decoder_block_with_attention_and_residual(d3, s1, 64)
    d5 = decoder_block_with_attention_and_residual(d4, s0, 32)
    """ Decoder """

    """ Output """
    outputs = Conv2D(1, 1, padding="same", activation="sigmoid")(d5)

    model = Model(inputs, outputs, name="EfficientNetV2S_UNET")
    return model

input_shape = (128, 128, 3)
effienet_1f2 = build_effienet_unet(input_shape)
#effienet_unet.summary()
effienet_1f2.compile(loss=generalized_dice_loss, optimizer=tf.keras.optimizers.Adam(learning_rate=0.0001), metrics = ['accuracy', dice_test] )

class DataGenerator(tf.keras.utils.Sequence):
    'Generates data for Keras'
    def __init__(self, list_IDs, dim=(IMG_SIZE,IMG_SIZE), batch_size = 64, n_channels = 3, shuffle=True):
        'Initialization'
        self.dim = dim
        self.batch_size = batch_size
        self.list_IDs = list_IDs
        self.n_channels = n_channels
        self.shuffle = shuffle
        self.on_epoch_end()

    def __len__(self):
        'Denotes the number of batches per epoch'
        return int(np.floor(len(self.list_IDs) / self.batch_size))

    def __getitem__(self, index):
        'Generate one batch of data'
        # Generate indexes of the batch
        indexes = self.indexes[index*self.batch_size:(index+1)*self.batch_size]

        # Find list of IDs
        Batch_ids = [self.list_IDs[k] for k in indexes]

        # Generate data
        X, y = self.__data_generation(Batch_ids)

        return X, y

    def on_epoch_end(self):
        'Updates indexes after each epoch'
        self.indexes = np.arange(len(self.list_IDs))
        if self.shuffle == True:
            np.random.shuffle(self.indexes)

    def __data_generation(self, Batch_ids):
        'Generates data containing batch_size samples' # X : (n_samples, *dim, n_channels)
        # Initialization
        X = np.zeros((self.batch_size, *self.dim, self.n_channels))
        y = np.zeros((self.batch_size, 128, 128))

        # Generate data
        for c, i in enumerate(Batch_ids):
            case_path = os.path.join(TRAIN_DATASET_PATH, i)

            data_path = os.path.join(case_path, f'{i}_img.npy')
            img = np.load(data_path)


            data_path = os.path.join(case_path, f'{i}_seg.npy')
            seg = np.load(data_path)

            X[c,:,:,0] = img[...,0]#flair
            X[c,:,:,1] = img[...,2]#t1
            X[c,:,:,2] = img[...,3]#t2

            y[c,:,:]=seg

        return X, y



training_generator = DataGenerator(train_id)
valid_generator = DataGenerator(val_id)
test_generator = DataGenerator(test_id)

history =  effienet_1f2.fit(training_generator,
                    epochs=30,
                    validation_data = valid_generator
                    )

effienet_1f2.save('effienet_2c1_zxy.h5')

effienet_1c2 = tf.keras.models.load_model('effienet_2c1_zxy.h5',custom_objects={ 'accuracy': dice_test,"dice_test": dice_test}, compile=False)

def predictByPath(case_path,case):
    files = next(os.walk(case_path))[2]
    X = np.empty((1, IMG_SIZE, IMG_SIZE,3))
    vol_path = case_path + case + '_img.npy'
    img=np.load(vol_path)

    for j in range(1):
        X[j,:,:,0] = img[...,0]
        X[j,:,:,1] = img[...,2]
        X[j,:,:,2] = img[...,3]

    return effienet_1c2.predict(X, verbose=1)
def predictByPath_X(case_path,case):
    files = next(os.walk(case_path))[2]
    X = np.empty((VOLUME_SLICES, IMG_SIZE, IMG_SIZE, 1))
    y = np.empty((VOLUME_SLICES, IMG_SIZE, IMG_SIZE))
    vol_path = case_path + case + '_img.npy'
    img=np.load(vol_path)

    vol_path = case_path + case + '_seg.npy'
    seg=np.load(vol_path)
    for j in range(1):
        X[j,:,:,0] = img[...,0]
        y[j,...] = seg

    return X,y
import numpy as np
def showPredictsById(case, start_slice = 60):
    path = TRAIN_DATASET_PATH + case + '/'
    p = predictByPath(path,case)

    origImage,gt=predictByPath_X(path,case)

    plt.figure(figsize=(18, 50))
    f, axarr = plt.subplots(1,3, figsize = (18, 50))

    axarr[0].imshow(origImage[0,:,:], cmap="gray")
    axarr[0].title.set_text('Original image flair')
    axarr[1].imshow(gt[0,:,:], cmap="Reds", interpolation='none', alpha=0.3)
    axarr[1].title.set_text('Ground truth')
    axarr[2].imshow(np.round(p[0,:,]), cmap="OrRd", interpolation='none', alpha=0.3)
    axarr[2].title.set_text('predicted')
    plt.show()

showPredictsById(case="BraTS19_CBICA_ATN_1_93")

effienet_1c2.compile(loss=generalized_dice_loss, optimizer=tf.keras.optimizers.Adam(learning_rate=0.0001), metrics = ['accuracy', dice_test] )
# Evaluate the model on the test data using `evaluate`
print("Evaluate on test data")
results = effienet_1c2.evaluate(test_generator, batch_size=100)
print("test loss, test acc:", results)

"""# **Efficientunet b4_fc2_attention_gate**
EffcientUnetB4搭配(flair,t1ce,t2)並加入attention_gate
"""

K.clear_session()
from tensorflow.keras.layers import Conv2D, BatchNormalization, Activation, MaxPool2D, Conv2DTranspose, Concatenate, Input
from tensorflow.keras.models import Model
from tensorflow.keras.applications import EfficientNetB4
from tensorflow.keras.layers import Dropout
from tensorflow.keras.layers import Add
import tensorflow as tf

print("TF Version: ", tf.__version__)
def attention_gate(input_x, g):
    theta_x = Conv2D(filters=input_x.shape[-1], kernel_size=(1, 1), strides=(1, 1), padding="same")(input_x)
    phi_g = Conv2D(filters=input_x.shape[-1], kernel_size=(1, 1), strides=(1, 1), padding="same")(g)
    f = Activation("relu")(Add()([theta_x, phi_g]))
    psi = Conv2D(filters=1, kernel_size=(1, 1), strides=(1, 1), padding="same")(f)
    attention = Activation("sigmoid")(psi)
    return Multiply()([input_x, attention])
def conv_block(inputs, num_filters):
    x = Conv2D(num_filters, 3, padding="same")(inputs)
    x = BatchNormalization()(x)
    x = Activation("relu")(x)

    x = Conv2D(num_filters, 3, padding="same")(x)
    x = BatchNormalization()(x)
    x = Activation("relu")(x)

    return x
def decoder_block_with_attention_and_residual(inputs, skip, num_filters):
    x = Conv2DTranspose(num_filters, (2, 2), strides=2, padding="same")(inputs)
    x = Concatenate()([x, skip])
    x = conv_block(x, num_filters)

    # 添加Attention Gate

    attention_output = attention_gate(x, skip)

    # 添加Residual Connection
    x = Add()([x, attention_output])

    return x
def decoder_block(inputs, skip, num_filters):
    x = Conv2DTranspose(num_filters, (2, 2), strides=2, padding="same")(inputs)
    #skip= Conv2DTranspose(num_filters, (2, 2), strides=2, padding="same")(skip)
    x = Concatenate()([x, skip])
    x = conv_block(x, num_filters)
    return x

def build_effienet_unet(input_shape):
    """ Input """
    inputs = Input(input_shape)

    """ Pre-trained Encoder """
    encoder = EfficientNetB4(include_top=False, weights="imagenet", input_tensor=inputs)
    s0 = encoder.get_layer("input_1").output
    s1 = encoder.get_layer("block1b_add").output                      ## 256
    s2 = encoder.get_layer("block2d_add").output    ## 128
    s3 = encoder.get_layer("block3d_add").output    ## 64
    s4 = encoder.get_layer("block5f_add").output    ## 32

    """ Bottleneck """
    #b1 = encoder.get_layer("top_activation").output    ## 16
    b1 = encoder.layers[-1].output   ## 32

    """ Decoder """
    d1 = decoder_block_with_attention_and_residual(b1, s4, 512)                               ## 32
    d2 = decoder_block_with_attention_and_residual(d1, s3, 256)                               ## 64
    d3 = decoder_block_with_attention_and_residual(d2, s2, 128)                               ## 128
    d4 = decoder_block_with_attention_and_residual(d3, s1, 64)
    d5 = decoder_block_with_attention_and_residual(d4, s0, 32)
    """ Decoder """

    """ Output """
    outputs = Conv2D(1, 1, padding="same", activation="sigmoid")(d5)

    model = Model(inputs, outputs, name="EfficientNetB4_UNET")
    return model

input_shape = (128, 128, 3)
effienet_unet = build_effienet_unet(input_shape)
#effienet_unet.summary()
effienet_unet.compile(loss=generalized_dice_loss, optimizer=tf.keras.optimizers.Adam(learning_rate=0.0001), metrics = ['accuracy', dice_test] )

class DataGenerator(tf.keras.utils.Sequence):
    'Generates data for Keras'
    def __init__(self, list_IDs, dim=(IMG_SIZE,IMG_SIZE), batch_size = 64, n_channels = 3, shuffle=True):
        'Initialization'
        self.dim = dim
        self.batch_size = batch_size
        self.list_IDs = list_IDs
        self.n_channels = n_channels
        self.shuffle = shuffle
        self.on_epoch_end()

    def __len__(self):
        'Denotes the number of batches per epoch'
        return int(np.floor(len(self.list_IDs) / self.batch_size))

    def __getitem__(self, index):
        'Generate one batch of data'
        # Generate indexes of the batch
        indexes = self.indexes[index*self.batch_size:(index+1)*self.batch_size]

        # Find list of IDs
        Batch_ids = [self.list_IDs[k] for k in indexes]

        # Generate data
        X, y = self.__data_generation(Batch_ids)

        return X, y

    def on_epoch_end(self):
        'Updates indexes after each epoch'
        self.indexes = np.arange(len(self.list_IDs))
        if self.shuffle == True:
            np.random.shuffle(self.indexes)

    def __data_generation(self, Batch_ids):
        'Generates data containing batch_size samples' # X : (n_samples, *dim, n_channels)
        # Initialization
        X = np.zeros((self.batch_size, *self.dim, self.n_channels))
        y = np.zeros((self.batch_size, 128, 128))

        # Generate data
        for c, i in enumerate(Batch_ids):
            case_path = os.path.join(TRAIN_DATASET_PATH, i)

            data_path = os.path.join(case_path, f'{i}_img.npy')
            img = np.load(data_path)


            data_path = os.path.join(case_path, f'{i}_seg.npy')
            seg = np.load(data_path)

            X[c,:,:,0] = img[...,0]#flair
            X[c,:,:,1] = img[...,1]#t1ce
            X[c,:,:,2] = img[...,3]#t2

            y[c,:,:]=seg

        return X, y



training_generator = DataGenerator(train_id)
valid_generator = DataGenerator(val_id)
test_generator = DataGenerator(test_id)

history =  effienet_unet.fit(training_generator,
                    epochs=30,
                    validation_data = valid_generator
                    )

effienet_unet.save('effienet_fc1_zxy.h5')

effienet_1c2 = tf.keras.models.load_model('effienet_fc1_zxy.h5',custom_objects={ 'accuracy': dice_test,"dice_test": dice_test}, compile=False)

def predictByPath(case_path,case):
    files = next(os.walk(case_path))[2]
    X = np.empty((1, IMG_SIZE, IMG_SIZE,3))
    vol_path = case_path + case + '_img.npy'
    img=np.load(vol_path)

    for j in range(1):
        X[j,:,:,0] = img[...,0]
        X[j,:,:,1] = img[...,1]
        X[j,:,:,2] = img[...,3]

    return effienet_1c2.predict(X, verbose=1)
def predictByPath_X(case_path,case):
    files = next(os.walk(case_path))[2]
    X = np.empty((VOLUME_SLICES, IMG_SIZE, IMG_SIZE, 1))
    y = np.empty((VOLUME_SLICES, IMG_SIZE, IMG_SIZE))
    vol_path = case_path + case + '_img.npy'
    img=np.load(vol_path)

    vol_path = case_path + case + '_seg.npy'
    seg=np.load(vol_path)
    for j in range(1):
        X[j,:,:,0] = img[...,0]
        y[j,...] = seg

    return X,y
import numpy as np
def showPredictsById(case, start_slice = 60):
    path = TRAIN_DATASET_PATH + case + '/'
    p = predictByPath(path,case)

    origImage,gt=predictByPath_X(path,case)

    plt.figure(figsize=(18, 50))
    f, axarr = plt.subplots(1,3, figsize = (18, 50))

    axarr[0].imshow(origImage[0,:,:], cmap="gray")
    axarr[0].title.set_text('Original image flair')
    axarr[1].imshow(gt[0,:,:], cmap="Reds", interpolation='none', alpha=0.3)
    axarr[1].title.set_text('Ground truth')
    axarr[2].imshow(np.round(p[0,:,]), cmap="OrRd", interpolation='none', alpha=0.3)
    axarr[2].title.set_text('predicted')
    plt.show()

showPredictsById(case="BraTS19_CBICA_ATN_1_93")

effienet_1c2.compile(loss=generalized_dice_loss, optimizer=tf.keras.optimizers.Adam(learning_rate=0.001), metrics = ['accuracy', dice_test] )
# Evaluate the model on the test data using `evaluate`
print("Evaluate on test data")
results = effienet_1c2.evaluate(test_generator, batch_size=100)
print("test loss, test acc:", results)

"""# **Efficientunet b4_fc1_attention_gate**
EffcientUnetB4搭配(flair,t1ce,t1)並加入attention_gate
"""

K.clear_session()
from tensorflow.keras.layers import Conv2D, BatchNormalization, Activation, MaxPool2D, Conv2DTranspose, Concatenate, Input
from tensorflow.keras.models import Model
from tensorflow.keras.applications import EfficientNetB4
from tensorflow.keras.layers import Dropout
from tensorflow.keras.layers import Add
import tensorflow as tf

print("TF Version: ", tf.__version__)
def attention_gate(input_x, g):
    theta_x = Conv2D(filters=input_x.shape[-1], kernel_size=(1, 1), strides=(1, 1), padding="same")(input_x)
    phi_g = Conv2D(filters=input_x.shape[-1], kernel_size=(1, 1), strides=(1, 1), padding="same")(g)
    f = Activation("relu")(Add()([theta_x, phi_g]))
    psi = Conv2D(filters=1, kernel_size=(1, 1), strides=(1, 1), padding="same")(f)
    attention = Activation("sigmoid")(psi)
    return Multiply()([input_x, attention])
def conv_block(inputs, num_filters):
    x = Conv2D(num_filters, 3, padding="same")(inputs)
    x = BatchNormalization()(x)
    x = Activation("relu")(x)

    x = Conv2D(num_filters, 3, padding="same")(x)
    x = BatchNormalization()(x)
    x = Activation("relu")(x)

    return x
def decoder_block_with_attention_and_residual(inputs, skip, num_filters):
    x = Conv2DTranspose(num_filters, (2, 2), strides=2, padding="same")(inputs)
    x = Concatenate()([x, skip])
    x = conv_block(x, num_filters)

    # 添加Attention Gate

    attention_output = attention_gate(x, skip)

    # 添加Residual Connection
    x = Add()([x, attention_output])

    return x
def decoder_block(inputs, skip, num_filters):
    x = Conv2DTranspose(num_filters, (2, 2), strides=2, padding="same")(inputs)
    #skip= Conv2DTranspose(num_filters, (2, 2), strides=2, padding="same")(skip)
    x = Concatenate()([x, skip])
    x = conv_block(x, num_filters)
    return x

def build_effienet_unet(input_shape):
    """ Input """
    inputs = Input(input_shape)

    """ Pre-trained Encoder """
    encoder = EfficientNetB4(include_top=False, weights="imagenet", input_tensor=inputs)
    s0 = encoder.get_layer("input_1").output
    s1 = encoder.get_layer("block1b_add").output                      ## 256
    s2 = encoder.get_layer("block2d_add").output    ## 128
    s3 = encoder.get_layer("block3d_add").output    ## 64
    s4 = encoder.get_layer("block5f_add").output    ## 32

    """ Bottleneck """
    #b1 = encoder.get_layer("top_activation").output    ## 16
    b1 = encoder.layers[-1].output   ## 32

    """ Decoder """
    d1 = decoder_block_with_attention_and_residual(b1, s4, 512)                               ## 32
    d2 = decoder_block_with_attention_and_residual(d1, s3, 256)                               ## 64
    d3 = decoder_block_with_attention_and_residual(d2, s2, 128)                               ## 128
    d4 = decoder_block_with_attention_and_residual(d3, s1, 64)
    d5 = decoder_block_with_attention_and_residual(d4, s0, 32)
    """ Decoder """

    """ Output """
    outputs = Conv2D(1, 1, padding="same", activation="sigmoid")(d5)

    model = Model(inputs, outputs, name="EfficientNetB4_UNET")
    return model

input_shape = (128, 128, 3)
effienet_unet = build_effienet_unet(input_shape)
#effienet_unet.summary()
effienet_unet.compile(loss=generalized_dice_loss, optimizer=tf.keras.optimizers.Adam(learning_rate=0.0001), metrics = ['accuracy', dice_test] )

class DataGenerator(tf.keras.utils.Sequence):
    'Generates data for Keras'
    def __init__(self, list_IDs, dim=(IMG_SIZE,IMG_SIZE), batch_size = 64, n_channels = 3, shuffle=True):
        'Initialization'
        self.dim = dim
        self.batch_size = batch_size
        self.list_IDs = list_IDs
        self.n_channels = n_channels
        self.shuffle = shuffle
        self.on_epoch_end()

    def __len__(self):
        'Denotes the number of batches per epoch'
        return int(np.floor(len(self.list_IDs) / self.batch_size))

    def __getitem__(self, index):
        'Generate one batch of data'
        # Generate indexes of the batch
        indexes = self.indexes[index*self.batch_size:(index+1)*self.batch_size]

        # Find list of IDs
        Batch_ids = [self.list_IDs[k] for k in indexes]

        # Generate data
        X, y = self.__data_generation(Batch_ids)

        return X, y

    def on_epoch_end(self):
        'Updates indexes after each epoch'
        self.indexes = np.arange(len(self.list_IDs))
        if self.shuffle == True:
            np.random.shuffle(self.indexes)

    def __data_generation(self, Batch_ids):
        'Generates data containing batch_size samples' # X : (n_samples, *dim, n_channels)
        # Initialization
        X = np.zeros((self.batch_size, *self.dim, self.n_channels))
        y = np.zeros((self.batch_size, 128, 128))

        # Generate data
        for c, i in enumerate(Batch_ids):
            case_path = os.path.join(TRAIN_DATASET_PATH, i)

            data_path = os.path.join(case_path, f'{i}_img.npy')
            img = np.load(data_path)


            data_path = os.path.join(case_path, f'{i}_seg.npy')
            seg = np.load(data_path)

            X[c,:,:,0] = img[...,0]#flair
            X[c,:,:,1] = img[...,1]#t1ce
            X[c,:,:,2] = img[...,2]#t1

            y[c,:,:]=seg

        return X, y



training_generator = DataGenerator(train_id)
valid_generator = DataGenerator(val_id)
test_generator = DataGenerator(test_id)

history =  effienet_unet.fit(training_generator,
                    epochs=30,
                    validation_data = valid_generator
                    )

effienet_unet.save('effienet_fc1_zxy.h5')

effienet_1c2 = tf.keras.models.load_model('effienet_fc1_zxy.h5',custom_objects={ 'accuracy': dice_test,"dice_test": dice_test}, compile=False)

def predictByPath(case_path,case):
    files = next(os.walk(case_path))[2]
    X = np.empty((1, IMG_SIZE, IMG_SIZE,3))
    vol_path = case_path + case + '_img.npy'
    img=np.load(vol_path)

    for j in range(1):
        X[j,:,:,0] = img[...,0]
        X[j,:,:,1] = img[...,1]
        X[j,:,:,2] = img[...,2]

    return effienet_1c2.predict(X, verbose=1)
def predictByPath_X(case_path,case):
    files = next(os.walk(case_path))[2]
    X = np.empty((VOLUME_SLICES, IMG_SIZE, IMG_SIZE, 1))
    y = np.empty((VOLUME_SLICES, IMG_SIZE, IMG_SIZE))
    vol_path = case_path + case + '_img.npy'
    img=np.load(vol_path)

    vol_path = case_path + case + '_seg.npy'
    seg=np.load(vol_path)
    for j in range(1):
        X[j,:,:,0] = img[...,0]
        y[j,...] = seg

    return X,y
import numpy as np
def showPredictsById(case, start_slice = 60):
    path = TRAIN_DATASET_PATH + case + '/'
    p = predictByPath(path,case)

    origImage,gt=predictByPath_X(path,case)

    plt.figure(figsize=(18, 50))
    f, axarr = plt.subplots(1,3, figsize = (18, 50))

    axarr[0].imshow(origImage[0,:,:], cmap="gray")
    axarr[0].title.set_text('Original image flair')
    axarr[1].imshow(gt[0,:,:], cmap="Reds", interpolation='none', alpha=0.3)
    axarr[1].title.set_text('Ground truth')
    axarr[2].imshow(np.round(p[0,:,]), cmap="OrRd", interpolation='none', alpha=0.3)
    axarr[2].title.set_text('predicted')
    plt.show()

showPredictsById(case="BraTS19_CBICA_ATN_1_93")

effienet_1c2.compile(loss=generalized_dice_loss, optimizer=tf.keras.optimizers.Adam(learning_rate=0.001), metrics = ['accuracy', dice_test] )
# Evaluate the model on the test data using `evaluate`
print("Evaluate on test data")
results = effienet_1c2.evaluate(test_generator, batch_size=100)
print("test loss, test acc:", results)

"""# **Efficientunet b4_2c1_attention_gate**
EffcientUnetB4搭配(2,t1ce,t1)並加入attention_gate
"""

K.clear_session()
from tensorflow.keras.layers import Conv2D, BatchNormalization, Activation, MaxPool2D, Conv2DTranspose, Concatenate, Input
from tensorflow.keras.models import Model
from tensorflow.keras.applications import EfficientNetB4
from tensorflow.keras.layers import Dropout
from tensorflow.keras.layers import Add
import tensorflow as tf

print("TF Version: ", tf.__version__)
def attention_gate(input_x, g):
    theta_x = Conv2D(filters=input_x.shape[-1], kernel_size=(1, 1), strides=(1, 1), padding="same")(input_x)
    phi_g = Conv2D(filters=input_x.shape[-1], kernel_size=(1, 1), strides=(1, 1), padding="same")(g)
    f = Activation("relu")(Add()([theta_x, phi_g]))
    psi = Conv2D(filters=1, kernel_size=(1, 1), strides=(1, 1), padding="same")(f)
    attention = Activation("sigmoid")(psi)
    return Multiply()([input_x, attention])
def conv_block(inputs, num_filters):
    x = Conv2D(num_filters, 3, padding="same")(inputs)
    x = BatchNormalization()(x)
    x = Activation("relu")(x)

    x = Conv2D(num_filters, 3, padding="same")(x)
    x = BatchNormalization()(x)
    x = Activation("relu")(x)

    return x
def decoder_block_with_attention_and_residual(inputs, skip, num_filters):
    x = Conv2DTranspose(num_filters, (2, 2), strides=2, padding="same")(inputs)
    x = Concatenate()([x, skip])
    x = conv_block(x, num_filters)

    # 添加Attention Gate

    attention_output = attention_gate(x, skip)

    # 添加Residual Connection
    x = Add()([x, attention_output])

    return x
def decoder_block(inputs, skip, num_filters):
    x = Conv2DTranspose(num_filters, (2, 2), strides=2, padding="same")(inputs)
    #skip= Conv2DTranspose(num_filters, (2, 2), strides=2, padding="same")(skip)
    x = Concatenate()([x, skip])
    x = conv_block(x, num_filters)
    return x

def build_effienet_unet(input_shape):
    """ Input """
    inputs = Input(input_shape)

    """ Pre-trained Encoder """
    encoder = EfficientNetB4(include_top=False, weights="imagenet", input_tensor=inputs)
    s0 = encoder.get_layer("input_1").output
    s1 = encoder.get_layer("block1b_add").output                      ## 256
    s2 = encoder.get_layer("block2d_add").output    ## 128
    s3 = encoder.get_layer("block3d_add").output    ## 64
    s4 = encoder.get_layer("block5f_add").output    ## 32

    """ Bottleneck """
    #b1 = encoder.get_layer("top_activation").output    ## 16
    b1 = encoder.layers[-1].output   ## 32

    """ Decoder """
    d1 = decoder_block_with_attention_and_residual(b1, s4, 512)                               ## 32
    d2 = decoder_block_with_attention_and_residual(d1, s3, 256)                               ## 64
    d3 = decoder_block_with_attention_and_residual(d2, s2, 128)                               ## 128
    d4 = decoder_block_with_attention_and_residual(d3, s1, 64)
    d5 = decoder_block_with_attention_and_residual(d4, s0, 32)
    """ Decoder """

    """ Output """
    outputs = Conv2D(1, 1, padding="same", activation="sigmoid")(d5)

    model = Model(inputs, outputs, name="EfficientNetB4_UNET")
    return model

input_shape = (128, 128, 3)
effienet_unet = build_effienet_unet(input_shape)
#effienet_unet.summary()
effienet_unet.compile(loss=generalized_dice_loss, optimizer=tf.keras.optimizers.Adam(learning_rate=0.0001), metrics = ['accuracy', dice_test] )

class DataGenerator(tf.keras.utils.Sequence):
    'Generates data for Keras'
    def __init__(self, list_IDs, dim=(IMG_SIZE,IMG_SIZE), batch_size = 64, n_channels = 3, shuffle=True):
        'Initialization'
        self.dim = dim
        self.batch_size = batch_size
        self.list_IDs = list_IDs
        self.n_channels = n_channels
        self.shuffle = shuffle
        self.on_epoch_end()

    def __len__(self):
        'Denotes the number of batches per epoch'
        return int(np.floor(len(self.list_IDs) / self.batch_size))

    def __getitem__(self, index):
        'Generate one batch of data'
        # Generate indexes of the batch
        indexes = self.indexes[index*self.batch_size:(index+1)*self.batch_size]

        # Find list of IDs
        Batch_ids = [self.list_IDs[k] for k in indexes]

        # Generate data
        X, y = self.__data_generation(Batch_ids)

        return X, y

    def on_epoch_end(self):
        'Updates indexes after each epoch'
        self.indexes = np.arange(len(self.list_IDs))
        if self.shuffle == True:
            np.random.shuffle(self.indexes)

    def __data_generation(self, Batch_ids):
        'Generates data containing batch_size samples' # X : (n_samples, *dim, n_channels)
        # Initialization
        X = np.zeros((self.batch_size, *self.dim, self.n_channels))
        y = np.zeros((self.batch_size, 128, 128))

        # Generate data
        for c, i in enumerate(Batch_ids):
            case_path = os.path.join(TRAIN_DATASET_PATH, i)

            data_path = os.path.join(case_path, f'{i}_img.npy')
            img = np.load(data_path)


            data_path = os.path.join(case_path, f'{i}_seg.npy')
            seg = np.load(data_path)

            X[c,:,:,0] = img[...,3]#t2
            X[c,:,:,1] = img[...,1]#t1ce
            X[c,:,:,2] = img[...,2]#t1

            y[c,:,:]=seg

        return X, y



training_generator = DataGenerator(train_id)
valid_generator = DataGenerator(val_id)
test_generator = DataGenerator(test_id)

history =  effienet_unet.fit(training_generator,
                    epochs=30,
                    validation_data = valid_generator
                    )

effienet_unet.save('effienet_fc1_zxy.h5')

effienet_1c2 = tf.keras.models.load_model('effienet_fc1_zxy.h5',custom_objects={ 'accuracy': dice_test,"dice_test": dice_test}, compile=False)

def predictByPath(case_path,case):
    files = next(os.walk(case_path))[2]
    X = np.empty((1, IMG_SIZE, IMG_SIZE,3))
    vol_path = case_path + case + '_img.npy'
    img=np.load(vol_path)

    for j in range(1):
        X[j,:,:,0] = img[...,3]
        X[j,:,:,1] = img[...,1]
        X[j,:,:,2] = img[...,2]

    return effienet_1c2.predict(X, verbose=1)
def predictByPath_X(case_path,case):
    files = next(os.walk(case_path))[2]
    X = np.empty((VOLUME_SLICES, IMG_SIZE, IMG_SIZE, 1))
    y = np.empty((VOLUME_SLICES, IMG_SIZE, IMG_SIZE))
    vol_path = case_path + case + '_img.npy'
    img=np.load(vol_path)

    vol_path = case_path + case + '_seg.npy'
    seg=np.load(vol_path)
    for j in range(1):
        X[j,:,:,0] = img[...,0]
        y[j,...] = seg

    return X,y
import numpy as np
def showPredictsById(case, start_slice = 60):
    path = TRAIN_DATASET_PATH + case + '/'
    p = predictByPath(path,case)

    origImage,gt=predictByPath_X(path,case)

    plt.figure(figsize=(18, 50))
    f, axarr = plt.subplots(1,3, figsize = (18, 50))

    axarr[0].imshow(origImage[0,:,:], cmap="gray")
    axarr[0].title.set_text('Original image flair')
    axarr[1].imshow(gt[0,:,:], cmap="Reds", interpolation='none', alpha=0.3)
    axarr[1].title.set_text('Ground truth')
    axarr[2].imshow(np.round(p[0,:,]), cmap="OrRd", interpolation='none', alpha=0.3)
    axarr[2].title.set_text('predicted')
    plt.show()

showPredictsById(case="BraTS19_CBICA_ATN_1_93")

effienet_1c2.compile(loss=generalized_dice_loss, optimizer=tf.keras.optimizers.Adam(learning_rate=0.001), metrics = ['accuracy', dice_test] )
# Evaluate the model on the test data using `evaluate`
print("Evaluate on test data")
results = effienet_1c2.evaluate(test_generator, batch_size=100)
print("test loss, test acc:", results)

"""# **Efficientunet b4_f12_attention_gate**
EffcientUnetB4搭配(flair,1,t2)並加入attention_gate
"""

K.clear_session()
from tensorflow.keras.layers import Conv2D, BatchNormalization, Activation, MaxPool2D, Conv2DTranspose, Concatenate, Input
from tensorflow.keras.models import Model
from tensorflow.keras.applications import EfficientNetB4
from tensorflow.keras.layers import Dropout
from tensorflow.keras.layers import Add
import tensorflow as tf

print("TF Version: ", tf.__version__)
def attention_gate(input_x, g):
    theta_x = Conv2D(filters=input_x.shape[-1], kernel_size=(1, 1), strides=(1, 1), padding="same")(input_x)
    phi_g = Conv2D(filters=input_x.shape[-1], kernel_size=(1, 1), strides=(1, 1), padding="same")(g)
    f = Activation("relu")(Add()([theta_x, phi_g]))
    psi = Conv2D(filters=1, kernel_size=(1, 1), strides=(1, 1), padding="same")(f)
    attention = Activation("sigmoid")(psi)
    return Multiply()([input_x, attention])
def conv_block(inputs, num_filters):
    x = Conv2D(num_filters, 3, padding="same")(inputs)
    x = BatchNormalization()(x)
    x = Activation("relu")(x)

    x = Conv2D(num_filters, 3, padding="same")(x)
    x = BatchNormalization()(x)
    x = Activation("relu")(x)

    return x
def decoder_block_with_attention_and_residual(inputs, skip, num_filters):
    x = Conv2DTranspose(num_filters, (2, 2), strides=2, padding="same")(inputs)
    x = Concatenate()([x, skip])
    x = conv_block(x, num_filters)

    # 添加Attention Gate

    attention_output = attention_gate(x, skip)

    # 添加Residual Connection
    x = Add()([x, attention_output])

    return x
def decoder_block(inputs, skip, num_filters):
    x = Conv2DTranspose(num_filters, (2, 2), strides=2, padding="same")(inputs)
    #skip= Conv2DTranspose(num_filters, (2, 2), strides=2, padding="same")(skip)
    x = Concatenate()([x, skip])
    x = conv_block(x, num_filters)
    return x

def build_effienet_unet(input_shape):
    """ Input """
    inputs = Input(input_shape)

    """ Pre-trained Encoder """
    encoder = EfficientNetB4(include_top=False, weights="imagenet", input_tensor=inputs)
    s0 = encoder.get_layer("input_1").output
    s1 = encoder.get_layer("block1b_add").output                      ## 256
    s2 = encoder.get_layer("block2d_add").output    ## 128
    s3 = encoder.get_layer("block3d_add").output    ## 64
    s4 = encoder.get_layer("block5f_add").output    ## 32

    """ Bottleneck """
    #b1 = encoder.get_layer("top_activation").output    ## 16
    b1 = encoder.layers[-1].output   ## 32

    """ Decoder """
    d1 = decoder_block_with_attention_and_residual(b1, s4, 512)                               ## 32
    d2 = decoder_block_with_attention_and_residual(d1, s3, 256)                               ## 64
    d3 = decoder_block_with_attention_and_residual(d2, s2, 128)                               ## 128
    d4 = decoder_block_with_attention_and_residual(d3, s1, 64)
    d5 = decoder_block_with_attention_and_residual(d4, s0, 32)
    """ Decoder """

    """ Output """
    outputs = Conv2D(1, 1, padding="same", activation="sigmoid")(d5)

    model = Model(inputs, outputs, name="EfficientNetB4_UNET")
    return model
input_shape = (128, 128, 3)
effienet_unet = build_effienet_unet(input_shape)
#effienet_unet.summary()
effienet_unet.compile(loss=generalized_dice_loss, optimizer=tf.keras.optimizers.Adam(learning_rate=0.0001), metrics = ['accuracy', dice_test] )

class DataGenerator(tf.keras.utils.Sequence):
    'Generates data for Keras'
    def __init__(self, list_IDs, dim=(IMG_SIZE,IMG_SIZE), batch_size = 64, n_channels = 3, shuffle=True):
        'Initialization'
        self.dim = dim
        self.batch_size = batch_size
        self.list_IDs = list_IDs
        self.n_channels = n_channels
        self.shuffle = shuffle
        self.on_epoch_end()

    def __len__(self):
        'Denotes the number of batches per epoch'
        return int(np.floor(len(self.list_IDs) / self.batch_size))

    def __getitem__(self, index):
        'Generate one batch of data'
        # Generate indexes of the batch
        indexes = self.indexes[index*self.batch_size:(index+1)*self.batch_size]

        # Find list of IDs
        Batch_ids = [self.list_IDs[k] for k in indexes]

        # Generate data
        X, y = self.__data_generation(Batch_ids)

        return X, y

    def on_epoch_end(self):
        'Updates indexes after each epoch'
        self.indexes = np.arange(len(self.list_IDs))
        if self.shuffle == True:
            np.random.shuffle(self.indexes)

    def __data_generation(self, Batch_ids):
        'Generates data containing batch_size samples' # X : (n_samples, *dim, n_channels)
        # Initialization
        X = np.zeros((self.batch_size, *self.dim, self.n_channels))
        y = np.zeros((self.batch_size, 128, 128))

        # Generate data
        for c, i in enumerate(Batch_ids):
            case_path = os.path.join(TRAIN_DATASET_PATH, i)

            data_path = os.path.join(case_path, f'{i}_img.npy')
            img = np.load(data_path)


            data_path = os.path.join(case_path, f'{i}_seg.npy')
            seg = np.load(data_path)

            X[c,:,:,0] = img[...,0]#flair
            X[c,:,:,1] = img[...,2]#t1
            X[c,:,:,2] = img[...,3]#t2

            y[c,:,:]=seg

        return X, y



training_generator = DataGenerator(train_id)
valid_generator = DataGenerator(val_id)
test_generator = DataGenerator(test_id)

history =  effienet_unet.fit(training_generator,
                    epochs=30,
                    validation_data = valid_generator
                    )

effienet_unet.save('effienet_fc1_zxy.h5')

effienet_1c2 = tf.keras.models.load_model('effienet_fc1_zxy.h5',custom_objects={ 'accuracy': dice_test,"dice_test": dice_test}, compile=False)

def predictByPath(case_path,case):
    files = next(os.walk(case_path))[2]
    X = np.empty((1, IMG_SIZE, IMG_SIZE,3))
    vol_path = case_path + case + '_img.npy'
    img=np.load(vol_path)

    for j in range(1):
        X[j,:,:,0] = img[...,0]
        X[j,:,:,1] = img[...,2]
        X[j,:,:,2] = img[...,3]

    return effienet_1c2.predict(X, verbose=1)
def predictByPath_X(case_path,case):
    files = next(os.walk(case_path))[2]
    X = np.empty((VOLUME_SLICES, IMG_SIZE, IMG_SIZE, 1))
    y = np.empty((VOLUME_SLICES, IMG_SIZE, IMG_SIZE))
    vol_path = case_path + case + '_img.npy'
    img=np.load(vol_path)

    vol_path = case_path + case + '_seg.npy'
    seg=np.load(vol_path)
    for j in range(1):
        X[j,:,:,0] = img[...,0]
        y[j,...] = seg

    return X,y
import numpy as np
def showPredictsById(case, start_slice = 60):
    path = TRAIN_DATASET_PATH + case + '/'
    p = predictByPath(path,case)

    origImage,gt=predictByPath_X(path,case)

    plt.figure(figsize=(18, 50))
    f, axarr = plt.subplots(1,3, figsize = (18, 50))

    axarr[0].imshow(origImage[0,:,:], cmap="gray")
    axarr[0].title.set_text('Original image flair')
    axarr[1].imshow(gt[0,:,:], cmap="Reds", interpolation='none', alpha=0.3)
    axarr[1].title.set_text('Ground truth')
    axarr[2].imshow(np.round(p[0,:,]), cmap="OrRd", interpolation='none', alpha=0.3)
    axarr[2].title.set_text('predicted')
    plt.show()

showPredictsById(case="BraTS19_CBICA_ATN_1_93")

effienet_1c2.compile(loss=generalized_dice_loss, optimizer=tf.keras.optimizers.Adam(learning_rate=0.001), metrics = ['accuracy', dice_test] )
# Evaluate the model on the test data using `evaluate`
print("Evaluate on test data")
results = effienet_1c2.evaluate(test_generator, batch_size=100)
print("test loss, test acc:", results)

"""# ***模型比較***"""

unet = tf.keras.models.load_model('/content/drive/MyDrive/unet_3_zxy.h5',custom_objects={ 'accuracy': dice_test,"dice_test": dice_test}, compile=False)
effienet_fc1 = tf.keras.models.load_model('/content/drive/MyDrive/effienet_tc1_zxy.h5',custom_objects={ 'accuracy': dice_test,"dice_test": dice_test}, compile=False)
effienet_fc2 = tf.keras.models.load_model('/content/drive/MyDrive/effienet_tc2_zxy.h5',custom_objects={ 'accuracy': dice_test,"dice_test": dice_test}, compile=False)
effienet_2c1 = tf.keras.models.load_model('/content/drive/MyDrive/effienet_2c1_zxy.h5',custom_objects={ 'accuracy': dice_test,"dice_test": dice_test}, compile=False)
effienet_f12 = tf.keras.models.load_model('/content/drive/MyDrive/effienet_f12_zxy.h5',custom_objects={ 'accuracy': dice_test,"dice_test": dice_test}, compile=False)

def predictByPath_unet(case_path,case):
    files = next(os.walk(case_path))[2]
    X = np.empty((1, IMG_SIZE, IMG_SIZE, 4))

    vol_path = case_path + case + '_img.npy'
    img=np.load(vol_path)

    for j in range(1):
        X[j,:,:,0] = img[...,0]
        X[j,:,:,1] = img[...,2]
        X[j,:,:,2] = img[...,3]
        X[j,:,:,3] = img[...,4]

    return unet.predict(X, verbose=1)
def predictByPath_effienet_fc1(case_path,case):
    files = next(os.walk(case_path))[2]
    X = np.empty((1, IMG_SIZE, IMG_SIZE, 3))

    vol_path = case_path + case + '_img.npy'
    img=np.load(vol_path)



    for j in range(1):
        X[j,:,:,0] = img[...,0]
        X[j,:,:,1] = img[...,1]
        X[j,:,:,2] = img[...,2]

    return effienet_fc1.predict(X, verbose=1)
def predictByPath_effienet_fc2(case_path,case):
    files = next(os.walk(case_path))[2]
    X = np.empty((1, IMG_SIZE, IMG_SIZE, 3))

    vol_path = case_path + case + '_img.npy'
    img=np.load(vol_path)

    for j in range(1):
        X[j,:,:,0] = img[...,0]
        X[j,:,:,1] = img[...,1]
        X[j,:,:,2] = img[...,3]

    return effienet_fc2.predict(X, verbose=1)
def predictByPath_effienet_2c1(case_path,case):
    files = next(os.walk(case_path))[2]
    X = np.empty((1, IMG_SIZE, IMG_SIZE, 3))

    vol_path = case_path + case + '_img.npy'
    img=np.load(vol_path)

    for j in range(1):
        X[j,:,:,0] = img[...,3]
        X[j,:,:,1] = img[...,1]
        X[j,:,:,2] = img[...,2]

    return effienet_2c1.predict(X, verbose=1)
def predictByPath_effienet_f12(case_path,case):
    files = next(os.walk(case_path))[2]
    X = np.empty((1, IMG_SIZE, IMG_SIZE, 3))

    vol_path = case_path + case + '_img.npy'
    img=np.load(vol_path)

    for j in range(1):
        X[j,:,:,0] = img[...,0]
        X[j,:,:,1] = img[...,2]
        X[j,:,:,2] = img[...,3]

    return effienet_f12.predict(X, verbose=1)
import numpy as np
def predictByPath_seg(case_path,case):
    files = next(os.walk(case_path))[2]
    X = np.empty((1, IMG_SIZE, IMG_SIZE, 1))
    y = np.empty((1, IMG_SIZE, IMG_SIZE))
    vol_path = case_path + case + '_img.npy'
    img=np.load(vol_path)
    vol_path = case_path + case + '_seg.npy'
    seg=nib.load(vol_path)
    for j in range(1):
        X[j,:,:,0] = img[...,0]
        y[j,...] = seg
    return X,y
def showPredictsById(case, start_slice = 60):
    path = TRAIN_DATASET_PATH + case + '/'

    gt = nib.load(path + case +'_seg.nii').get_fdata()
    origImage = nib.load(path + case +'_flair.nii').get_fdata()
    p = predictByPath_unet(path,case)
    p1 = predictByPath_effienet_fc1(path,case)
    p2= predictByPath_effienet_fc2(path,case)
    p3= predictByPath_effienet_2c1(path,case)
    p4= predictByPath_effienet_f12(path,case)
    o,gt_check=predictByPath_seg(path,case)


    f, axarr = plt.subplots(1,5, figsize = (25, 15))


    asd=p
    asc=gt_check
    axarr[0].imshow(np.round(asd)+asc, cmap="hot")
    axarr[0].title.set_text('U-Net predicted')
    asd=p1
    axarr[1].imshow(np.round(asd)+asc, cmap="hot")
    axarr[1].title.set_text('EfficienUnet_fc1 predicted')
    asd=p2
    axarr[2].imshow(np.round(asd)+asc, cmap="hot")
    axarr[2].title.set_text('EfficienUnet_fc2 predicted')
    asd=p3
    axarr[3].imshow(np.round(asd)+asc, cmap="hot")
    axarr[3].title.set_text('EfficienUnet_2c1 predicted')
    asd=p4
    axarr[4].imshow(np.round(asd)+asc, cmap="hot")
    axarr[4].title.set_text('EfficienUnet_f12 predicted')
    axarr[0].axis('off')
    axarr[1].axis('off')
    axarr[2].axis('off')
    axarr[3].axis('off')
    axarr[4].axis('off')
    plt.show()

showPredictsById(case="BraTS19_CBICA_ATN_1_93")