import os
import json
import csv
import random
import pickle
import cv2
import numpy as np

import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torchvision.transforms as transforms

from PIL import Image
from torch.utils.data import Dataset, DataLoader
from scipy.ndimage.measurements import label
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import roc_auc_score, roc_curve
import matplotlib.pyplot as plt
import cv2

from keras.preprocessing.image import ImageDataGenerator, load_img, array_to_img, img_to_array
from PIL import Image
from sklearn.model_selection import train_test_split

Dataset

class GlaucomaDataset(Dataset):

    def __init__(self, root_dir, split='train', output_size=(256,256)):
        self.output_size = output_size
        self.root_dir = root_dir
        self.split = split
        self.images = []
        self.segs = []
        # Load data index
        for direct in self.root_dir:
            self.image_filenames = []
            for path in os.listdir(os.path.join(direct, "Images_Square")):
                if(not path.startswith('.')):
                    self.image_filenames.append(path)


            for k in range(len(self.image_filenames)):
                print('Loading {} image {}/{}...'.format(split, k, len(self.image_filenames)), end='\r')
                img_name = os.path.join(direct, "Images_Square", self.image_filenames[k])
                #img = remove_nerves(np.array(Image.open(img_name).convert('RGB'))).astype(np.float32)
                img = np.array(Image.open(img_name).convert('RGB'))
                img = transforms.functional.to_tensor(img)
                img = transforms.functional.resize(img, output_size, interpolation=Image.BILINEAR)
                self.images.append(img)
            if split != 'test':
                for k in range(len(self.image_filenames)):
                    print('Loading {} segmentation {}/{}...'.format(split, k, len(self.image_filenames)), end='\r')
                    seg_name = os.path.join(direct, "Masks_Square", self.image_filenames[k][:-3] + "png")
                    mask = np.array(Image.open(seg_name, mode='r'))
                    od = (mask==1.).astype(np.float32)
                    oc = (mask==2.).astype(np.float32)
                    od = torch.from_numpy(od[None,:,:])
                    oc = torch.from_numpy(oc[None,:,:])
                    od = transforms.functional.resize(od, output_size, interpolation=Image.NEAREST)
                    oc = transforms.functional.resize(oc, output_size, interpolation=Image.NEAREST)
                    self.segs.append(torch.cat([od, oc], dim=0))

            print('Succesfully loaded {} dataset.'.format(split) + ' '*50)
            
            
    def __len__(self):
        return len(self.images)

    def __getitem__(self, idx):
        img = self.images[idx]
        if self.split == 'test':
            return img
        else:
            seg = self.segs[idx]
            return img, seg

Preprocessing

def remove_nerves(image):
    img = array_to_img(image)
    
    img = cv2.cvtColor(np.array(img), cv2.COLOR_BGR2RGB)
    # convert image to grayScale
    grayScale = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
   
    # kernel for morphologyEx
    kernel = cv2.getStructuringElement(1,(17,17))
   
    # apply MORPH_BLACKHAT to grayScale image
    blackhat = cv2.morphologyEx(grayScale, cv2.MORPH_BLACKHAT, kernel)
  
    # apply thresholding to blackhat
    _,threshold = cv2.threshold(blackhat,10,255,cv2.THRESH_BINARY)

    # inpaint with original image and threshold image
    final_image = cv2.inpaint(img,threshold,1,cv2.INPAINT_TELEA)
    final_image = cv2.cvtColor(final_image, cv2.COLOR_BGR2RGB)
    
    return final_image.astype(np.float64)/255.0

Metrics

EPS = 1e-7

def compute_dice_coef(input, target):
    '''
    Compute dice score metric.
    '''
    batch_size = input.shape[0]
    return sum([dice_coef_sample(input[k,:,:], target[k,:,:]) for k in range(batch_size)])/batch_size

def dice_coef_sample(input, target):
    iflat = input.contiguous().view(-1)
    tflat = target.contiguous().view(-1)
    intersection = (iflat * tflat).sum()
    return (2. * intersection) / (iflat.sum() + tflat.sum())


def vertical_diameter(binary_segmentation):
    '''
    Get the vertical diameter from a binary segmentation.
    The vertical diameter is defined as the "fattest" area of the binary_segmentation parameter.
    '''

    # get the sum of the pixels in the vertical axis
    vertical_axis_diameter = np.sum(binary_segmentation, axis=1)

    # pick the maximum value
    diameter = np.max(vertical_axis_diameter, axis=1)

    # return it
    return diameter



def vertical_cup_to_disc_ratio(od, oc):
    '''
    Compute the vertical cup-to-disc ratio from a given labelling map.
    '''
    # compute the cup diameter
    cup_diameter = vertical_diameter(oc)
    # compute the disc diameter
    disc_diameter = vertical_diameter(od)

    return cup_diameter / (disc_diameter + EPS)

def compute_vCDR_error(pred_od, pred_oc, gt_od, gt_oc):
    '''
    Compute vCDR prediction error, along with predicted vCDR and ground truth vCDR.
    '''
    pred_vCDR = vertical_cup_to_disc_ratio(pred_od, pred_oc)
    gt_vCDR = vertical_cup_to_disc_ratio(gt_od, gt_oc)
    vCDR_err = np.mean(np.abs(gt_vCDR - pred_vCDR))
    return vCDR_err, pred_vCDR, gt_vCDR


def classif_eval(classif_preds, classif_gts):
    '''
    Compute AUC classification score.
    '''
    auc = roc_auc_score(classif_gts, classif_preds)
    return auc

Post Processing

def refine_seg(pred):
    '''
    Only retain the biggest connected component of a segmentation map.
    '''
    np_pred = pred.numpy()
        
    largest_ccs = []
    for i in range(np_pred.shape[0]):
        labeled, ncomponents = label(np_pred[i,:,:])
        bincounts = np.bincount(labeled.flat)[1:]
        if len(bincounts) == 0:
            largest_cc = labeled == 0
        else:
            largest_cc = labeled == np.argmax(bincounts)+1
        largest_cc = torch.tensor(largest_cc, dtype=torch.float32)
        largest_ccs.append(largest_cc)
    largest_ccs = torch.stack(largest_ccs)
    
    return largest_ccs

Network

class UNet(nn.Module):
    def __init__(self, n_channels=3, n_classes=2):
        super(UNet, self).__init__()
        self.n_channels = n_channels
        self.n_classes = n_classes
        self.epoch = 0

        self.inc = DoubleConv(n_channels, 64)
        self.down1 = Down(64, 128)
        self.down2 = Down(128, 256)
        self.down3 = Down(256, 512)
        self.down4 = Down(512, 1024)
        self.down5 = Down(1024, 2048)
        factor = 2 
        self.down6 = Down(2048, 4096 // factor)
        self.up1 = Up(4096, 2048 // factor)
        self.up2 = Up(2048, 1024 // factor)
        self.up3 = Up(1024, 512 // factor)
        self.up4 = Up(512, 256 // factor)
        self.up5 = Up(256, 128 // factor)
        self.up6 = Up(128, 64)
        self.output_layer = OutConv(64, n_classes)



    def forward(self, x):
        x1 = self.inc(x)
        x2 = self.down1(x1)
        x3 = self.down2(x2)
        x4 = self.down3(x3)
        x5 = self.down4(x4)
        x6 = self.down5(x5)
        x7 = self.down6(x6)
        out = self.up1(x7, x6)
        out = self.up2(out, x5)
        out = self.up3(out, x4)
        out = self.up4(out, x3)
        out = self.up5(out, x2)
        out = self.up6(out, x1)
        out = self.output_layer(out)
        out = torch.sigmoid(out)
        return out

    
class DoubleConv(nn.Module):
    """(convolution => [BN] => ReLU) * 2"""

    def __init__(self, in_channels, out_channels, mid_channels=None):
        super().__init__()
        if not mid_channels:
            mid_channels = out_channels
        self.double_conv = nn.Sequential(
            nn.Conv2d(in_channels, mid_channels, kernel_size=3, padding=1),
            nn.BatchNorm2d(mid_channels),
            nn.ReLU(inplace=True),
            nn.Conv2d(mid_channels, out_channels, kernel_size=3, padding=1),
            nn.BatchNorm2d(out_channels),
            nn.ReLU(inplace=True)
            
        )
        

    def forward(self, x):
        return self.double_conv(x)


class Down(nn.Module):
    """Downscaling with maxpool then double conv"""

    def __init__(self, in_channels, out_channels):
        super().__init__()
        self.maxpool_conv = nn.Sequential(
            nn.MaxPool2d(2),
            DoubleConv(in_channels, out_channels)
            
        )
        

    def forward(self, x):
        return self.maxpool_conv(x)


class Up(nn.Module):
    """Upscaling then double conv"""

    def __init__(self, in_channels, out_channels):
        super().__init__()

        # Use the normal convolutions to reduce the number of channels
        self.up = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True)
        self.conv = DoubleConv(in_channels, out_channels, in_channels // 2)
        


    def forward(self, x1, x2):
        x1 = self.up(x1)
        # input is CHW
        diffY = x2.size()[2] - x1.size()[2]
        diffX = x2.size()[3] - x1.size()[3]

        x1 = F.pad(x1, [diffX // 2, diffX - diffX // 2,
                        diffY // 2, diffY - diffY // 2])
        x = torch.cat([x2, x1], dim=1)
        return self.conv(x)
        


class OutConv(nn.Module):
    '''
    Simple convolution.
    '''
    def __init__(self, in_channels, out_channels):
        super(OutConv, self).__init__()
        self.conv = nn.Conv2d(in_channels, out_channels, kernel_size=1)
        

    def forward(self, x):
        return self.conv(x)

Settings

root_dirs = [ "../input/glaucoma-datasets/ORIGA","../input/glaucoma-datasets/G1020"]
val_dir = [ "../input/glaucoma-datasets/REFUGE"]
lr = 1e-4
batch_size = 8
num_workers = 8
total_epoch = 40

Load Data

train_set = GlaucomaDataset(root_dirs, 
                          split='train')

val_set = GlaucomaDataset(val_dir, 
                        split='val')

train_loader = DataLoader(train_set, 
                          batch_size=batch_size, 
                          shuffle=True, 
                          num_workers=num_workers,
                          pin_memory=True,
                         )
val_loader = DataLoader(val_set, 
                        batch_size=batch_size, 
                        shuffle=False, 
                        num_workers=num_workers,
                        pin_memory=True,
                        )

Loading train image 13/650...

/opt/conda/lib/python3.7/site-packages/ipykernel_launcher.py:23: DeprecationWarning: BILINEAR is deprecated and will be removed in Pillow 10 (2023-07-01). Use Resampling.BILINEAR instead.
/opt/conda/lib/python3.7/site-packages/torchvision/transforms/functional.py:424: UserWarning: Argument interpolation should be of type InterpolationMode instead of int. Please, use InterpolationMode enum.
  "Argument interpolation should be of type InterpolationMode instead of int. "

Loading train segmentation 13/650...

/opt/conda/lib/python3.7/site-packages/ipykernel_launcher.py:34: DeprecationWarning: NEAREST is deprecated and will be removed in Pillow 10 (2023-07-01). Use Resampling.NEAREST or Dither.NONE instead.
/opt/conda/lib/python3.7/site-packages/ipykernel_launcher.py:35: DeprecationWarning: NEAREST is deprecated and will be removed in Pillow 10 (2023-07-01). Use Resampling.NEAREST or Dither.NONE instead.

Succesfully loaded train dataset.                                                  
Succesfully loaded train dataset.                                                  
Succesfully loaded val dataset.                                                  

/opt/conda/lib/python3.7/site-packages/torch/utils/data/dataloader.py:490: UserWarning: This DataLoader will create 8 worker processes in total. Our suggested max number of worker in current system is 2, which is smaller than what this DataLoader is going to create. Please be aware that excessive worker creation might get DataLoader running slow or even freeze, lower the worker number to avoid potential slowness/freeze if necessary.
  cpuset_checked))

Init Model

# Device
device = torch.device("cuda:0")

# Network
model = UNet(n_channels=3, n_classes=2).to(device)

# Loss
seg_loss = torch.nn.BCELoss(reduction='mean')

# Optimizer
optimizer = optim.Adam(model.parameters(), lr=lr)
#optim.SGD(model.parameters(), lr=0.01, momentum=0.9)

Train

# Define parameters
nb_train_batches = len(train_loader)
nb_val_batches = len(val_loader)
nb_iter = 0
best_val_auc = 0.
iters = list(range(1, 10))
val_losses = []
train_losses = []
train_accuracy=[]
val_accuracy=[]


while model.epoch < total_epoch:
    # Accumulators
    train_vCDRs, val_vCDRs = [], []
    train_loss, val_loss = 0., 0.
    train_dsc_od, val_dsc_od = 0., 0.
    train_dsc_oc, val_dsc_oc = 0., 0.
    train_vCDR_error, val_vCDR_error = 0., 0.
    
    ############
    # TRAINING #
    ############
    model.train()
    train_data = iter(train_loader)
    for k in range(nb_train_batches):
        # Loads data
        imgs, seg_gts = train_data.next()
        imgs, seg_gts = imgs.to(device), seg_gts.to(device)

        # Forward pass
        logits = model(imgs)
        loss = seg_loss(logits, seg_gts)
 
        # Backward pass
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
        train_loss += loss.item() / nb_train_batches
        #for printing the loss curves
        
        train_losses.append(train_loss)
        
        with torch.no_grad():
            # Compute segmentation metric
            pred_od = refine_seg((logits[:,0,:,:]>=0.5).type(torch.int8).cpu()).to(device)
            #pred_od = refine_seg((logits[:,0,:,:]>=0.5).type(torch.int8).cpu())
            pred_oc = refine_seg((logits[:,1,:,:]>=0.5).type(torch.int8).cpu()).to(device)
            #pred_oc = refine_seg((logits[:,1,:,:]>=0.5).type(torch.int8).cpu())
            gt_od = seg_gts[:,0,:,:].type(torch.int8)
            gt_oc = seg_gts[:,1,:,:].type(torch.int8)
            dsc_od = compute_dice_coef(pred_od, gt_od)
            dsc_oc = compute_dice_coef(pred_oc, gt_oc)
            train_dsc_od += dsc_od.item()/nb_train_batches
            train_dsc_oc += dsc_oc.item()/nb_train_batches


            # Compute and store vCDRs
            vCDR_error, pred_vCDR, gt_vCDR = compute_vCDR_error(pred_od.cpu().numpy(), pred_oc.cpu().numpy(), gt_od.cpu().numpy(), gt_oc.cpu().numpy())
            train_vCDRs += pred_vCDR.tolist()
            train_vCDR_error += vCDR_error  / nb_train_batches
            
        # Increase iterations
        nb_iter += 1
        
        # Std out
        print('Epoch {}, iter {}/{}, loss {:.6f}'.format(model.epoch+1, k+1, nb_train_batches, loss.item()) + ' '*20, 
              end='\r')
    
    ##############
    # VALIDATION #
    ##############
    model.eval()
    with torch.no_grad():
        val_data = iter(val_loader)
        for k in range(nb_val_batches):
            # Loads data
            imgs, seg_gts = val_data.next()
            imgs, seg_gts = imgs.to(device), seg_gts.to(device)
            
            # Forward pass
            logits = model(imgs)
            val_loss += seg_loss(logits, seg_gts).item() / nb_val_batches
            
            val_losses.append(val_loss)

            # Std out
            print('Validation iter {}/{}'.format(k+1, nb_val_batches) + ' '*50, 
                  end='\r')
            
            # Compute segmentation metric
            pred_od = refine_seg((logits[:,0,:,:]>=0.5).type(torch.int8).cpu()).to(device)
            pred_oc = refine_seg((logits[:,1,:,:]>=0.5).type(torch.int8).cpu()).to(device)
            gt_od = seg_gts[:,0,:,:].type(torch.int8)
            gt_oc = seg_gts[:,1,:,:].type(torch.int8)
            dsc_od = compute_dice_coef(pred_od, gt_od)
            dsc_oc = compute_dice_coef(pred_oc, gt_oc)
            val_dsc_od += dsc_od.item()/nb_val_batches
            val_dsc_oc += dsc_oc.item()/nb_val_batches
            
        
            vCDR_error, pred_vCDR, gt_vCDR = compute_vCDR_error(pred_od.cpu().numpy(), pred_oc.cpu().numpy(), gt_od.cpu().numpy(), gt_oc.cpu().numpy())
            val_vCDRs += pred_vCDR.tolist()
            val_vCDR_error += vCDR_error / nb_val_batches
    print('VALIDATION epoch {}'.format(model.epoch+1)+' '*50)
    print('LOSSES: {:.4f} (train), {:.4f} (val)'.format(train_loss, val_loss))
    print('OD segmentation (Dice Score): {:.4f} (train), {:.4f} (val)'.format(train_dsc_od, val_dsc_od))
    print('OC segmentation (Dice Score): {:.4f} (train), {:.4f} (val)'.format(train_dsc_oc, val_dsc_oc))
    print('vCDR error: {:.4f} (train), {:.4f} (val)'.format(train_vCDR_error, val_vCDR_error))
    # Save model if best validation AUC is reached
    if val_dsc_od + val_dsc_oc > best_val_auc:
        torch.save(model.state_dict(), '/kaggle/working/best_seg.pth')
        best_val_auc = val_dsc_od + val_dsc_oc
        print('Best validation AUC reached. Saved model weights.')
    print('_'*50)
        
    # End of epoch
    model.epoch += 1

VALIDATION epoch 1                                                  
LOSSES: 0.4995 (train), 0.3994 (val)
OD segmentation (Dice Score): 0.3531 (train), 0.5370 (val)
OC segmentation (Dice Score): 0.1653 (train), 0.1160 (val)
vCDR error: 8.6617 (train), 5.7519 (val)
Best validation AUC reached. Saved model weights.
__________________________________________________
VALIDATION epoch 2                                                  
LOSSES: 0.3392 (train), 0.2836 (val)
OD segmentation (Dice Score): 0.5450 (train), 0.6851 (val)
OC segmentation (Dice Score): 0.3129 (train), 0.6201 (val)
vCDR error: 3.9439 (train), 1.9759 (val)
Best validation AUC reached. Saved model weights.
__________________________________________________
VALIDATION epoch 3                                                  
LOSSES: 0.2276 (train), 0.2009 (val)
OD segmentation (Dice Score): 0.6889 (train), 0.7708 (val)
OC segmentation (Dice Score): 0.5606 (train), 0.7326 (val)
vCDR error: 1.1119 (train), 0.3612 (val)
Best validation AUC reached. Saved model weights.
__________________________________________________
VALIDATION epoch 4                                                  
LOSSES: 0.1568 (train), 0.1576 (val)
OD segmentation (Dice Score): 0.7143 (train), 0.7459 (val)
OC segmentation (Dice Score): 0.5964 (train), 0.7713 (val)
vCDR error: 1.1536 (train), 0.1621 (val)
Best validation AUC reached. Saved model weights.
__________________________________________________
VALIDATION epoch 5                                                  
LOSSES: 0.1106 (train), 0.1067 (val)
OD segmentation (Dice Score): 0.7425 (train), 0.7667 (val)
OC segmentation (Dice Score): 0.6068 (train), 0.7499 (val)
vCDR error: 0.8591 (train), 0.2202 (val)
__________________________________________________
VALIDATION epoch 6                                                  
LOSSES: 0.0792 (train), 0.0680 (val)
OD segmentation (Dice Score): 0.7625 (train), 0.7374 (val)
OC segmentation (Dice Score): 0.6251 (train), 0.7435 (val)
vCDR error: 0.7833 (train), 0.3436 (val)
__________________________________________________
VALIDATION epoch 7                                                  
LOSSES: 0.0577 (train), 0.0519 (val)
OD segmentation (Dice Score): 0.7798 (train), 0.7981 (val)
OC segmentation (Dice Score): 0.6356 (train), 0.8089 (val)
vCDR error: 0.6444 (train), 0.1099 (val)
Best validation AUC reached. Saved model weights.
__________________________________________________
VALIDATION epoch 8                                                  
LOSSES: 0.0445 (train), 0.0424 (val)
OD segmentation (Dice Score): 0.7932 (train), 0.6702 (val)
OC segmentation (Dice Score): 0.6434 (train), 0.7996 (val)
vCDR error: 0.6738 (train), 0.2564 (val)
__________________________________________________
VALIDATION epoch 9                                                  
LOSSES: 0.0356 (train), 0.0324 (val)
OD segmentation (Dice Score): 0.8012 (train), 0.8065 (val)
OC segmentation (Dice Score): 0.6482 (train), 0.8091 (val)
vCDR error: 0.7382 (train), 0.1586 (val)
Best validation AUC reached. Saved model weights.
__________________________________________________
VALIDATION epoch 10                                                  
LOSSES: 0.0289 (train), 0.0310 (val)
OD segmentation (Dice Score): 0.8188 (train), 0.6857 (val)
OC segmentation (Dice Score): 0.6598 (train), 0.7167 (val)
vCDR error: 0.7524 (train), 2.2238 (val)
__________________________________________________
VALIDATION epoch 11                                                  
LOSSES: 0.0241 (train), 0.0263 (val)
OD segmentation (Dice Score): 0.8251 (train), 0.7683 (val)
OC segmentation (Dice Score): 0.6648 (train), 0.6091 (val)
vCDR error: 0.7852 (train), 0.3976 (val)
__________________________________________________
VALIDATION epoch 12                                                  
LOSSES: 0.0205 (train), 0.0204 (val)
OD segmentation (Dice Score): 0.8310 (train), 0.7964 (val)
OC segmentation (Dice Score): 0.6720 (train), 0.8165 (val)
vCDR error: 0.8110 (train), 0.1195 (val)
__________________________________________________
VALIDATION epoch 13                                                  
LOSSES: 0.0174 (train), 0.0178 (val)
OD segmentation (Dice Score): 0.8414 (train), 0.7825 (val)
OC segmentation (Dice Score): 0.6784 (train), 0.8078 (val)
vCDR error: 0.7701 (train), 0.1931 (val)
__________________________________________________
VALIDATION epoch 14                                                  
LOSSES: 0.0151 (train), 0.0148 (val)
OD segmentation (Dice Score): 0.8468 (train), 0.8240 (val)
OC segmentation (Dice Score): 0.6851 (train), 0.8420 (val)
vCDR error: 0.7495 (train), 0.0883 (val)
Best validation AUC reached. Saved model weights.
__________________________________________________
VALIDATION epoch 15                                                  
LOSSES: 0.0133 (train), 0.0146 (val)
OD segmentation (Dice Score): 0.8527 (train), 0.7773 (val)
OC segmentation (Dice Score): 0.6914 (train), 0.8128 (val)
vCDR error: 0.8001 (train), 0.5041 (val)
__________________________________________________
VALIDATION epoch 16                                                  
LOSSES: 0.0117 (train), 0.0138 (val)
OD segmentation (Dice Score): 0.8605 (train), 0.7872 (val)
OC segmentation (Dice Score): 0.7016 (train), 0.8331 (val)
vCDR error: 0.7869 (train), 0.1009 (val)
__________________________________________________
VALIDATION epoch 17                                                  
LOSSES: 0.0107 (train), 0.0122 (val)
OD segmentation (Dice Score): 0.8630 (train), 0.7978 (val)
OC segmentation (Dice Score): 0.7012 (train), 0.8533 (val)
vCDR error: 0.7680 (train), 0.1034 (val)
__________________________________________________
VALIDATION epoch 18                                                  
LOSSES: 0.0097 (train), 0.0116 (val)
OD segmentation (Dice Score): 0.8669 (train), 0.7917 (val)
OC segmentation (Dice Score): 0.7068 (train), 0.8393 (val)
vCDR error: 0.8058 (train), 0.2036 (val)
__________________________________________________
VALIDATION epoch 19                                                  
LOSSES: 0.0088 (train), 0.0113 (val)
OD segmentation (Dice Score): 0.8725 (train), 0.7765 (val)
OC segmentation (Dice Score): 0.7117 (train), 0.8286 (val)
vCDR error: 0.8312 (train), 0.1506 (val)
__________________________________________________
VALIDATION epoch 20                                                  
LOSSES: 0.0083 (train), 0.0097 (val)
OD segmentation (Dice Score): 0.8694 (train), 0.8167 (val)
OC segmentation (Dice Score): 0.7160 (train), 0.8142 (val)
vCDR error: 0.8498 (train), 0.1001 (val)
__________________________________________________
VALIDATION epoch 21                                                  
LOSSES: 0.0080 (train), 0.0103 (val)
OD segmentation (Dice Score): 0.8677 (train), 0.7832 (val)
OC segmentation (Dice Score): 0.7117 (train), 0.8348 (val)
vCDR error: 0.8541 (train), 0.1551 (val)
__________________________________________________
VALIDATION epoch 22                                                  
LOSSES: 0.0070 (train), 0.0085 (val)
OD segmentation (Dice Score): 0.8807 (train), 0.8248 (val)
OC segmentation (Dice Score): 0.7225 (train), 0.8454 (val)
vCDR error: 0.8814 (train), 0.1015 (val)
Best validation AUC reached. Saved model weights.
__________________________________________________
VALIDATION epoch 23                                                  
LOSSES: 0.0066 (train), 0.0078 (val)
OD segmentation (Dice Score): 0.8823 (train), 0.8498 (val)
OC segmentation (Dice Score): 0.7273 (train), 0.8385 (val)
vCDR error: 0.8874 (train), 0.1055 (val)
Best validation AUC reached. Saved model weights.
__________________________________________________
VALIDATION epoch 24                                                  
LOSSES: 0.0061 (train), 0.0090 (val)
OD segmentation (Dice Score): 0.8862 (train), 0.7946 (val)
OC segmentation (Dice Score): 0.7344 (train), 0.8406 (val)
vCDR error: 0.9295 (train), 0.0886 (val)
__________________________________________________
VALIDATION epoch 25                                                  
LOSSES: 0.0057 (train), 0.0114 (val)
OD segmentation (Dice Score): 0.8897 (train), 0.7426 (val)
OC segmentation (Dice Score): 0.7387 (train), 0.7514 (val)
vCDR error: 0.9121 (train), 0.2715 (val)
__________________________________________________
VALIDATION epoch 26                                                  
LOSSES: 0.0055 (train), 0.0085 (val)
OD segmentation (Dice Score): 0.8911 (train), 0.8229 (val)
OC segmentation (Dice Score): 0.7441 (train), 0.8232 (val)
vCDR error: 0.9411 (train), 0.1522 (val)
__________________________________________________
VALIDATION epoch 27                                                  
LOSSES: 0.0055 (train), 0.0097 (val)
OD segmentation (Dice Score): 0.8844 (train), 0.8031 (val)
OC segmentation (Dice Score): 0.7364 (train), 0.7679 (val)
vCDR error: 0.9211 (train), 0.1534 (val)
__________________________________________________
VALIDATION epoch 28                                                  
LOSSES: 0.0050 (train), 0.0075 (val)
OD segmentation (Dice Score): 0.8933 (train), 0.8256 (val)
OC segmentation (Dice Score): 0.7437 (train), 0.8468 (val)
vCDR error: 0.9988 (train), 0.1010 (val)
__________________________________________________
VALIDATION epoch 29                                                  
LOSSES: 0.0048 (train), 0.0086 (val)
OD segmentation (Dice Score): 0.8950 (train), 0.7839 (val)
OC segmentation (Dice Score): 0.7550 (train), 0.8421 (val)
vCDR error: 0.9836 (train), 0.1214 (val)
__________________________________________________
VALIDATION epoch 30                                                  
LOSSES: 0.0044 (train), 0.0083 (val)
OD segmentation (Dice Score): 0.9009 (train), 0.7993 (val)
OC segmentation (Dice Score): 0.7592 (train), 0.8405 (val)
vCDR error: 0.9153 (train), 0.1565 (val)
__________________________________________________
VALIDATION epoch 31                                                  
LOSSES: 0.0042 (train), 0.0074 (val)
OD segmentation (Dice Score): 0.9030 (train), 0.8235 (val)
OC segmentation (Dice Score): 0.7630 (train), 0.8437 (val)
vCDR error: 1.0067 (train), 0.1419 (val)
__________________________________________________
VALIDATION epoch 32                                                  
LOSSES: 0.0038 (train), 0.0070 (val)
OD segmentation (Dice Score): 0.9108 (train), 0.8288 (val)
OC segmentation (Dice Score): 0.7752 (train), 0.8519 (val)
vCDR error: 1.0172 (train), 0.1210 (val)
__________________________________________________
VALIDATION epoch 33                                                  
LOSSES: 0.0036 (train), 0.0083 (val)
OD segmentation (Dice Score): 0.9136 (train), 0.8010 (val)
OC segmentation (Dice Score): 0.7757 (train), 0.8498 (val)
vCDR error: 1.0102 (train), 0.1273 (val)
__________________________________________________
VALIDATION epoch 34                                                  
LOSSES: 0.0035 (train), 0.0084 (val)
OD segmentation (Dice Score): 0.9144 (train), 0.8056 (val)
OC segmentation (Dice Score): 0.7776 (train), 0.8487 (val)
vCDR error: 0.9639 (train), 0.1004 (val)
__________________________________________________
VALIDATION epoch 35                                                  
LOSSES: 0.0034 (train), 0.0083 (val)
OD segmentation (Dice Score): 0.9153 (train), 0.8036 (val)
OC segmentation (Dice Score): 0.7778 (train), 0.8525 (val)
vCDR error: 0.9778 (train), 0.1126 (val)
__________________________________________________
VALIDATION epoch 36                                                  
LOSSES: 0.0033 (train), 0.0067 (val)
OD segmentation (Dice Score): 0.9164 (train), 0.8362 (val)
OC segmentation (Dice Score): 0.7824 (train), 0.8564 (val)
vCDR error: 0.9830 (train), 0.1085 (val)
Best validation AUC reached. Saved model weights.
__________________________________________________
VALIDATION epoch 37                                                  
LOSSES: 0.0031 (train), 0.0081 (val)
OD segmentation (Dice Score): 0.9194 (train), 0.8076 (val)
OC segmentation (Dice Score): 0.7868 (train), 0.8473 (val)
vCDR error: 1.0167 (train), 0.1434 (val)
__________________________________________________
VALIDATION epoch 38                                                  
LOSSES: 0.0028 (train), 0.0070 (val)
OD segmentation (Dice Score): 0.9258 (train), 0.8465 (val)
OC segmentation (Dice Score): 0.7911 (train), 0.8469 (val)
vCDR error: 1.0796 (train), 0.0924 (val)
Best validation AUC reached. Saved model weights.
__________________________________________________
VALIDATION epoch 39                                                  
LOSSES: 0.0030 (train), 0.0082 (val)
OD segmentation (Dice Score): 0.9216 (train), 0.8264 (val)
OC segmentation (Dice Score): 0.7894 (train), 0.8499 (val)
vCDR error: 1.0004 (train), 0.1101 (val)
__________________________________________________
VALIDATION epoch 40                                                  
LOSSES: 0.0030 (train), 0.0093 (val)
OD segmentation (Dice Score): 0.9214 (train), 0.7946 (val)
OC segmentation (Dice S

core): 0.7873 (train), 0.8345 (val)
vCDR error: 0.9929 (train), 0.1277 (val)
 