#Semantic segmentation
#Importing the required libraries
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import os
import zipfile
import urllib.request
import tensorflow as tf
import tensorflow_datasets as tfds
from PIL import Image
from PIL import ImageFont
from PIL import ImageDraw
#Downloading the dataset
data_url = "https://storage.googleapis.com/laurencemoroney-blog.appspot.com/fcnn-dataset.zip"
data_zipfile_name = 'semantic_data.zip'
download_dir = 'Semantic Segmentation'
urllib.request.urlretrieve(data_url,data_zipfile_name)
zip_ref = zipfile.ZipFile(data_zipfile_name,'r')
zip_ref.extractall(download_dir)
zip_ref.close()
class_names = ['sky', 'building','column/pole', 'road',
'side walk', 'vegetation', 'traffic light',
'fence', 'vehicle', 'pedestrian', 'byciclist', 'void']
num_classes = len(class_names)
#function that returns Image and Annotation tensor
def preprocess_dataset(image_path,annot_path,height = 224,width = 224):
raw_image = tf.io.read_file(image_path)
raw_annotation = tf.io.read_file(annot_path)
image = tf.image.decode_jpeg(raw_image)
annotation = tf.image.decode_jpeg(raw_annotation)
image = tf.image.resize(image,(height,width))
annotation = tf.image.resize(annotation,(height,width))
image = tf.reshape(image,(height,width,3))
annotation = tf.reshape(annotation,(height,width,1))
annotation = tf.cast(annotation,dtype = tf.int32)
final_annotation = []
for i in range(num_classes):
class_label = tf.ones(shape = (height,width))
class_label = i*class_label
class_label = tf.cast(class_label,tf.int32)
mask = tf.equal(annotation[:,:,0],class_label)
mask = tf.cast(mask,tf.int32)
final_annotation.append(mask)
final_annotation = tf.stack(final_annotation,axis=2)
image = image/127.5
image = image - 1
return image,final_annotation
batch_size = 8
def get_training_and_validation_dataset(train_image_dir,train_annot_dir,
valid_image_dir,valid_annot_dir):
train_image_paths = []
train_annot_paths = []
train_image_path_list = os.listdir(train_image_dir)
train_annot_path_list = os.listdir(train_annot_dir)
for temp_image_path,temp_annot_path in zip(train_image_path_list,train_annot_path_list):
temp_image_path = os.path.join(train_image_dir,temp_image_path)
temp_annot_path = os.path.join(train_annot_dir,temp_annot_path)
train_image_paths.append(temp_image_path)
train_annot_paths.append(temp_annot_path)
valid_image_paths = []
valid_annot_paths = []
valid_image_paths_list = os.listdir(valid_image_dir)
valid_annot_paths_list = os.listdir(valid_annot_dir)
for temp_image_path,temp_annot_path in zip(valid_image_paths_list,valid_annot_paths_list):
temp_image_path = os.path.join(valid_image_dir,temp_image_path)
temp_annot_path = os.path.join(valid_annot_dir,temp_annot_path)
valid_image_paths.append(temp_image_path)
valid_annot_paths.append(temp_annot_path)
training_dataset = tf.data.Dataset.from_tensor_slices((train_image_paths,train_annot_paths))
training_dataset = training_dataset.map(preprocess_dataset)
training_dataset = training_dataset.shuffle(100, reshuffle_each_iteration=True)
training_dataset = training_dataset.batch(batch_size)
training_dataset = training_dataset.repeat()
training_dataset = training_dataset.prefetch(-1)
validation_dataset = tf.data.Dataset.from_tensor_slices((valid_image_paths,valid_annot_paths))
validation_dataset = validation_dataset.map(preprocess_dataset)
validation_dataset = validation_dataset.shuffle(100, reshuffle_each_iteration=True)
validation_dataset = validation_dataset.batch(batch_size)
validation_dataset = validation_dataset.prefetch(-1)
return training_dataset,validation_dataset
train_image_dir = "Semantic Segmentation\dataset1\images_prepped_train"
train_annot_dir = "Semantic Segmentation/dataset1/annotations_prepped_train"
valid_image_dir = "Semantic Segmentation\dataset1\images_prepped_test"
valid_annot_dir = "Semantic Segmentation/dataset1/annotations_prepped_test"
training_dataset,validation_dataset = get_training_and_validation_dataset(
train_image_dir,train_annot_dir,valid_image_dir,valid_annot_dir)
for element in training_dataset:
print(tf.shape(element[0]))
print(tf.shape(element[1]))
break
tf.Tensor([ 8 224 224 3], shape=(4,), dtype=int32) tf.Tensor([ 8 224 224 12], shape=(4,), dtype=int32)
#Colors Palette
colors = sns.color_palette(None,len(class_names))
for class_name,color in zip(class_names,colors):
print("CLASS NAME : {} , COLOR: {}".format(class_name,color))
CLASS NAME : sky , COLOR: (0.12156862745098039, 0.4666666666666667, 0.7058823529411765) CLASS NAME : building , COLOR: (1.0, 0.4980392156862745, 0.054901960784313725) CLASS NAME : column/pole , COLOR: (0.17254901960784313, 0.6274509803921569, 0.17254901960784313) CLASS NAME : road , COLOR: (0.8392156862745098, 0.15294117647058825, 0.1568627450980392) CLASS NAME : side walk , COLOR: (0.5803921568627451, 0.403921568627451, 0.7411764705882353) CLASS NAME : vegetation , COLOR: (0.5490196078431373, 0.33725490196078434, 0.29411764705882354) CLASS NAME : traffic light , COLOR: (0.8901960784313725, 0.4666666666666667, 0.7607843137254902) CLASS NAME : fence , COLOR: (0.4980392156862745, 0.4980392156862745, 0.4980392156862745) CLASS NAME : vehicle , COLOR: (0.7372549019607844, 0.7411764705882353, 0.13333333333333333) CLASS NAME : pedestrian , COLOR: (0.09019607843137255, 0.7450980392156863, 0.8117647058823529) CLASS NAME : byciclist , COLOR: (0.12156862745098039, 0.4666666666666667, 0.7058823529411765) CLASS NAME : void , COLOR: (1.0, 0.4980392156862745, 0.054901960784313725)
print(colors[1][2])
0.054901960784313725
#Visualisation utilities
def fuse_tensors(images):
heights_list = (image.shape[0] for image in images)
widths_list = (images.shape[1] for image in images)
max_height = max(heights_list)
total_width = sum(widths_list)
fused_image = Image.new('RGB',(total_width,max_height))
x_coordinate = 0
y_coordinate = 0
for image in images:
pil_image = Image.fromarray(np.uint8(image))
fused_image.paste(pil_image,(x_coordinate,y_coordinate))
image_width = image.shape[1]
x_coordinate = x_coordinate + image_width
return fused_image
def give_colored_annotation(annotation):
colored_annotation = np.zeros( (annotation.shape[0],annotation.shape[1],3) ).astype('float')
for c in range(num_classes):
mask = (annotation == c)
colored_annotation[:,:,0] += mask*(255.0 * colors[c][0])
colored_annotation[:,:,1] += mask*(255.0 * colors[c][1])
colored_annotation[:,:,2] += mask*(255.0 * colors[c][2])
return colored_annotation
def show_truth_and_pred(image,labelmaps,titles,iou_list,dice_score_list):
pred_annotation = labelmaps[0]
true_annotation = labelmaps[1]
pred_annotation = give_colored_annotation(pred_annotation)
true_annotation = give_colored_annotation(true_annotation)
image = image + 1
image = image*127.5
images = np.uint8([image,pred_annotation,true_annotation])
metrics = []
for itr,(iou,dice_score) in enumerate(zip(iou_list,dice_score_list)):
metrics.append((itr,iou,dice_score))
metrics.sort(key=lambda tup: tup[1], reverse=True)
display_string_list = []
for itr,iou,dice_score in metrics:
string = "CLASS NAME: {}, IOU: {}, DS: {}".format(class_names[itr],iou,dice_score)
display_string_list.append(string)
display_string = "\n\n".join(display_string_list)
plt.figure(figsize = (15,4))
for idx, image in enumerate(images):
plt.subplot(1, 3, idx+1)
if idx == 1:
plt.xlabel(display_string)
plt.xticks([])
plt.yticks([])
plt.title(titles[idx], fontsize=12)
plt.imshow(image)
def display_image_and_annotation(image,annotation):
annotation = np.argmax(annotation,axis=2)
annotation = give_colored_annotation(annotation)
image = image + 1
image = 127.5*image
images = [image,annotation]
images = np.uint8(images)
fused_image = fuse_tensors(images)
plt.imshow(fused_image)
def show_image_and_annotation_list(dataset):
ds = dataset.unbatch()
ds = ds.shuffle(buffer_size = 100)
plt.figure(figsize = (25,15))
plt.title("Image and Annotations")
plt.subplots_adjust(bottom=0.1, top=0.9, hspace=0.05)
for index,(image,annotation) in enumerate(ds.take(6)):
plt.subplot(3, 3, index + 1)
plt.yticks([])
plt.xticks([])
display_image_and_annotation(image.numpy(), annotation.numpy())
show_image_and_annotation_list(training_dataset)
show_image_and_annotation_list(validation_dataset)
def block(x,num_convs,kernel_size,filters,pool_size,pool_stride,
activation,block_name):
for i in range(num_convs):
x = tf.keras.layers.Conv2D(filters = filters, kernel_size = kernel_size,
activation = activation, padding = 'same',
name = "{}_conv{}".format(block_name,i+1))(x)
x = tf.keras.layers.MaxPooling2D(pool_size = pool_size,strides = pool_stride,
name = "{}_pool{}".format(block_name,i+1))(x)
return x
def encoder(input_image):
x = block(input_image,num_convs=2,filters = 64,kernel_size= (3,3),
pool_size = (2,2),pool_stride = (2,2),activation = 'relu',
block_name = 'block1')
f1 = x
x = block(x,num_convs=2,filters = 128,kernel_size= (3,3),
pool_size = (2,2),pool_stride = (2,2),activation = 'relu',
block_name = 'block2')
f2 = x
x = block(x,num_convs=2,filters = 256,kernel_size= (3,3),
pool_size = (2,2),pool_stride = (2,2),activation = 'relu',
block_name = 'block3')
f3 = x
x = block(x,num_convs=2,filters = 512,kernel_size= (3,3),
pool_size = (2,2),pool_stride = (2,2),activation = 'relu',
block_name = 'block4')
f4 = x
x = block(x,num_convs=2,filters = 512,kernel_size= (3,3),
pool_size = (2,2),pool_stride = (2,2),activation = 'relu',
block_name = 'block5')
f5 = x
return (f1,f2,f3,f4,f5)
def bottleneck(input_tensor,filters,filter_size,activation):
x = tf.keras.layers.Conv2D(filters = filters,kernel_size = filter_size,padding = 'same',
activation = activation,name = 'conv6')(input_tensor)
x = tf.keras.layers.Conv2D(filters = filters, kernel_size = (1,1),padding = 'same',
activation = activation,name = 'conv7')(x)
return x
def decoder(encoder_output,bottleneck_output,num_classes):
f1,f2,f3,f4,f5 = encoder_output
x = tf.keras.layers.Conv2DTranspose(num_classes,kernel_size = (4,4),strides = (2,2),
use_bias = False)(bottleneck_output)
x = tf.keras.layers.Cropping2D(cropping = (1,1))(x)
x_temp = tf.keras.layers.Conv2D(num_classes,kernel_size = (1,1),activation = 'relu',
padding = 'same')(f4)
x = tf.keras.layers.Add()([x,x_temp])
x = tf.keras.layers.Conv2DTranspose(num_classes,kernel_size = (4,4),strides = (2,2),
use_bias = False)(x)
x = tf.keras.layers.Cropping2D(cropping = (1,1))(x)
x_temp = tf.keras.layers.Conv2D(num_classes,kernel_size = (1,1),activation = 'relu',
padding = 'same')(f3)
x = tf.keras.layers.Add()([x,x_temp])
x = tf.keras.layers.Conv2DTranspose(num_classes,kernel_size = (8,8),strides = (8,8),
use_bias = False)(x)
x = (tf.keras.layers.Activation('softmax'))(x)
return x
def final_segmentation_model(num_classes):
input_image = tf.keras.Input(shape = (224,224,3,))
encoder_output = encoder(input_image)
f1,f2,f3,f4,f5 = encoder_output
bottleneck_output = bottleneck(f5,filters = 1024,filter_size = (7,7),activation = 'relu')
output = decoder(encoder_output,bottleneck_output,num_classes)
model = tf.keras.Model(inputs = input_image,outputs = output)
return model
model = final_segmentation_model(num_classes)
model.summary()
Model: "model"
__________________________________________________________________________________________________
Layer (type) Output Shape Param # Connected to
==================================================================================================
input_1 (InputLayer) [(None, 224, 224, 3) 0
__________________________________________________________________________________________________
block1_conv1 (Conv2D) (None, 224, 224, 64) 1792 input_1[0][0]
__________________________________________________________________________________________________
block1_conv2 (Conv2D) (None, 224, 224, 64) 36928 block1_conv1[0][0]
__________________________________________________________________________________________________
block1_pool2 (MaxPooling2D) (None, 112, 112, 64) 0 block1_conv2[0][0]
__________________________________________________________________________________________________
block2_conv1 (Conv2D) (None, 112, 112, 128 73856 block1_pool2[0][0]
__________________________________________________________________________________________________
block2_conv2 (Conv2D) (None, 112, 112, 128 147584 block2_conv1[0][0]
__________________________________________________________________________________________________
block2_pool2 (MaxPooling2D) (None, 56, 56, 128) 0 block2_conv2[0][0]
__________________________________________________________________________________________________
block3_conv1 (Conv2D) (None, 56, 56, 256) 295168 block2_pool2[0][0]
__________________________________________________________________________________________________
block3_conv2 (Conv2D) (None, 56, 56, 256) 590080 block3_conv1[0][0]
__________________________________________________________________________________________________
block3_pool2 (MaxPooling2D) (None, 28, 28, 256) 0 block3_conv2[0][0]
__________________________________________________________________________________________________
block4_conv1 (Conv2D) (None, 28, 28, 512) 1180160 block3_pool2[0][0]
__________________________________________________________________________________________________
block4_conv2 (Conv2D) (None, 28, 28, 512) 2359808 block4_conv1[0][0]
__________________________________________________________________________________________________
block4_pool2 (MaxPooling2D) (None, 14, 14, 512) 0 block4_conv2[0][0]
__________________________________________________________________________________________________
block5_conv1 (Conv2D) (None, 14, 14, 512) 2359808 block4_pool2[0][0]
__________________________________________________________________________________________________
block5_conv2 (Conv2D) (None, 14, 14, 512) 2359808 block5_conv1[0][0]
__________________________________________________________________________________________________
block5_pool2 (MaxPooling2D) (None, 7, 7, 512) 0 block5_conv2[0][0]
__________________________________________________________________________________________________
conv6 (Conv2D) (None, 7, 7, 1024) 25691136 block5_pool2[0][0]
__________________________________________________________________________________________________
conv7 (Conv2D) (None, 7, 7, 1024) 1049600 conv6[0][0]
__________________________________________________________________________________________________
conv2d_transpose (Conv2DTranspo (None, 16, 16, 12) 196608 conv7[0][0]
__________________________________________________________________________________________________
cropping2d (Cropping2D) (None, 14, 14, 12) 0 conv2d_transpose[0][0]
__________________________________________________________________________________________________
conv2d (Conv2D) (None, 14, 14, 12) 6156 block4_pool2[0][0]
__________________________________________________________________________________________________
add (Add) (None, 14, 14, 12) 0 cropping2d[0][0]
conv2d[0][0]
__________________________________________________________________________________________________
conv2d_transpose_1 (Conv2DTrans (None, 30, 30, 12) 2304 add[0][0]
__________________________________________________________________________________________________
cropping2d_1 (Cropping2D) (None, 28, 28, 12) 0 conv2d_transpose_1[0][0]
__________________________________________________________________________________________________
conv2d_1 (Conv2D) (None, 28, 28, 12) 3084 block3_pool2[0][0]
__________________________________________________________________________________________________
add_1 (Add) (None, 28, 28, 12) 0 cropping2d_1[0][0]
conv2d_1[0][0]
__________________________________________________________________________________________________
conv2d_transpose_2 (Conv2DTrans (None, 224, 224, 12) 9216 add_1[0][0]
__________________________________________________________________________________________________
activation (Activation) (None, 224, 224, 12) 0 conv2d_transpose_2[0][0]
==================================================================================================
Total params: 36,363,096
Trainable params: 36,363,096
Non-trainable params: 0
__________________________________________________________________________________________________
#Compile the Model
opt = tf.keras.optimizers.SGD(lr=1E-2, momentum=0.9, nesterov=True)
model.compile(loss = 'categorical_crossentropy',
optimizer = opt,
metrics = ['accuracy'])
C:\Users\harsh\anaconda3\lib\site-packages\tensorflow\python\keras\optimizer_v2\optimizer_v2.py:374: UserWarning: The `lr` argument is deprecated, use `learning_rate` instead. warnings.warn(
num_training_images = len(os.listdir(train_image_dir))
num_validation_images = len(os.listdir(valid_image_dir))
epochs = 170
train_steps_per_epoch = num_training_images//batch_size
valid_steps_per_epoch = num_validation_images//batch_size
history = model.fit(training_dataset,steps_per_epoch = train_steps_per_epoch,
validation_data = validation_dataset,
validation_steps = valid_steps_per_epoch,epochs = epochs)
Epoch 1/170 45/45 [==============================] - 77s 1s/step - loss: 2.4849 - accuracy: 0.0924 - val_loss: 2.4847 - val_accuracy: 0.1029 Epoch 2/170 45/45 [==============================] - 57s 1s/step - loss: 2.4846 - accuracy: 0.0958 - val_loss: 2.4843 - val_accuracy: 0.1038 Epoch 3/170 45/45 [==============================] - 48s 1s/step - loss: 2.4841 - accuracy: 0.1005 - val_loss: 2.4835 - val_accuracy: 0.1110 Epoch 4/170 45/45 [==============================] - 48s 1s/step - loss: 2.4826 - accuracy: 0.1098 - val_loss: 2.4803 - val_accuracy: 0.1193 Epoch 5/170 45/45 [==============================] - 48s 1s/step - loss: 2.4728 - accuracy: 0.1264 - val_loss: 2.4433 - val_accuracy: 0.1556 Epoch 6/170 45/45 [==============================] - 48s 1s/step - loss: 2.2269 - accuracy: 0.2464 - val_loss: 2.0009 - val_accuracy: 0.2882 Epoch 7/170 45/45 [==============================] - 49s 1s/step - loss: 1.9650 - accuracy: 0.3188 - val_loss: 1.9742 - val_accuracy: 0.2888 Epoch 8/170 45/45 [==============================] - 50s 1s/step - loss: 1.8548 - accuracy: 0.3222 - val_loss: 1.9691 - val_accuracy: 0.2940 Epoch 9/170 45/45 [==============================] - 49s 1s/step - loss: 1.6305 - accuracy: 0.4415 - val_loss: 1.5263 - val_accuracy: 0.5452 Epoch 10/170 45/45 [==============================] - 49s 1s/step - loss: 1.2831 - accuracy: 0.6247 - val_loss: 1.2689 - val_accuracy: 0.6116 Epoch 11/170 45/45 [==============================] - 49s 1s/step - loss: 1.1379 - accuracy: 0.6604 - val_loss: 1.2102 - val_accuracy: 0.6174 Epoch 12/170 45/45 [==============================] - 49s 1s/step - loss: 1.0838 - accuracy: 0.6757 - val_loss: 1.2365 - val_accuracy: 0.6042 Epoch 13/170 45/45 [==============================] - 49s 1s/step - loss: 1.0490 - accuracy: 0.6784 - val_loss: 1.2379 - val_accuracy: 0.6107 Epoch 14/170 45/45 [==============================] - 48s 1s/step - loss: 1.0240 - accuracy: 0.6853 - val_loss: 1.2135 - val_accuracy: 0.6013 Epoch 15/170 45/45 [==============================] - 49s 1s/step - loss: 1.0046 - accuracy: 0.6872 - val_loss: 1.1719 - val_accuracy: 0.6096 Epoch 16/170 45/45 [==============================] - 49s 1s/step - loss: 0.9958 - accuracy: 0.6888 - val_loss: 1.1887 - val_accuracy: 0.6091 Epoch 17/170 45/45 [==============================] - 48s 1s/step - loss: 0.9804 - accuracy: 0.6921 - val_loss: 1.1539 - val_accuracy: 0.5971 Epoch 18/170 45/45 [==============================] - 48s 1s/step - loss: 0.9803 - accuracy: 0.6894 - val_loss: 1.1271 - val_accuracy: 0.5964 Epoch 19/170 45/45 [==============================] - 48s 1s/step - loss: 0.9687 - accuracy: 0.6923 - val_loss: 1.1682 - val_accuracy: 0.6058 Epoch 20/170 45/45 [==============================] - 48s 1s/step - loss: 0.9593 - accuracy: 0.6944 - val_loss: 1.1564 - val_accuracy: 0.6085 Epoch 21/170 45/45 [==============================] - 48s 1s/step - loss: 0.9531 - accuracy: 0.6955 - val_loss: 1.0973 - val_accuracy: 0.6054 Epoch 22/170 45/45 [==============================] - 48s 1s/step - loss: 0.9435 - accuracy: 0.6955 - val_loss: 1.0999 - val_accuracy: 0.6041 Epoch 23/170 45/45 [==============================] - 48s 1s/step - loss: 0.9436 - accuracy: 0.6963 - val_loss: 1.0876 - val_accuracy: 0.6131 Epoch 24/170 45/45 [==============================] - 49s 1s/step - loss: 0.9321 - accuracy: 0.7013 - val_loss: 1.0813 - val_accuracy: 0.6107 Epoch 25/170 45/45 [==============================] - 49s 1s/step - loss: 0.9120 - accuracy: 0.7060 - val_loss: 1.0555 - val_accuracy: 0.6203 Epoch 26/170 45/45 [==============================] - 49s 1s/step - loss: 0.9267 - accuracy: 0.7010 - val_loss: 1.0684 - val_accuracy: 0.6174 Epoch 27/170 45/45 [==============================] - 49s 1s/step - loss: 0.9166 - accuracy: 0.7015 - val_loss: 1.1049 - val_accuracy: 0.5960 Epoch 28/170 45/45 [==============================] - 49s 1s/step - loss: 0.8930 - accuracy: 0.7140 - val_loss: 1.0766 - val_accuracy: 0.6092 Epoch 29/170 45/45 [==============================] - 49s 1s/step - loss: 0.8978 - accuracy: 0.7121 - val_loss: 1.0558 - val_accuracy: 0.6162 Epoch 30/170 45/45 [==============================] - 49s 1s/step - loss: 0.8989 - accuracy: 0.7118 - val_loss: 1.0581 - val_accuracy: 0.6167 Epoch 31/170 45/45 [==============================] - 50s 1s/step - loss: 0.8857 - accuracy: 0.7183 - val_loss: 1.1026 - val_accuracy: 0.6150 Epoch 32/170 45/45 [==============================] - 49s 1s/step - loss: 0.8600 - accuracy: 0.7270 - val_loss: 1.0995 - val_accuracy: 0.6227 Epoch 33/170 45/45 [==============================] - 49s 1s/step - loss: 0.8750 - accuracy: 0.7232 - val_loss: 1.0470 - val_accuracy: 0.6610 Epoch 34/170 45/45 [==============================] - 49s 1s/step - loss: 0.8507 - accuracy: 0.7296 - val_loss: 1.0960 - val_accuracy: 0.6330 Epoch 35/170 45/45 [==============================] - 49s 1s/step - loss: 0.8512 - accuracy: 0.7339 - val_loss: 0.9803 - val_accuracy: 0.6975 Epoch 36/170 45/45 [==============================] - 49s 1s/step - loss: 0.8460 - accuracy: 0.7336 - val_loss: 0.9980 - val_accuracy: 0.6695 Epoch 37/170 45/45 [==============================] - 49s 1s/step - loss: 0.8182 - accuracy: 0.7442 - val_loss: 0.9718 - val_accuracy: 0.7015 Epoch 38/170 45/45 [==============================] - 49s 1s/step - loss: 0.7999 - accuracy: 0.7538 - val_loss: 0.9489 - val_accuracy: 0.7342 Epoch 39/170 45/45 [==============================] - 49s 1s/step - loss: 0.7926 - accuracy: 0.7565 - val_loss: 1.0044 - val_accuracy: 0.6882 Epoch 40/170 45/45 [==============================] - 49s 1s/step - loss: 0.7755 - accuracy: 0.7611 - val_loss: 0.9633 - val_accuracy: 0.7181 Epoch 41/170 45/45 [==============================] - 49s 1s/step - loss: 0.7660 - accuracy: 0.7653 - val_loss: 0.9558 - val_accuracy: 0.7161 Epoch 42/170 45/45 [==============================] - 49s 1s/step - loss: 0.7430 - accuracy: 0.7745 - val_loss: 0.9745 - val_accuracy: 0.6866 Epoch 43/170 45/45 [==============================] - 49s 1s/step - loss: 0.7370 - accuracy: 0.7757 - val_loss: 0.8831 - val_accuracy: 0.7364 Epoch 44/170 45/45 [==============================] - 49s 1s/step - loss: 0.7222 - accuracy: 0.7823 - val_loss: 0.9389 - val_accuracy: 0.7175 Epoch 45/170 45/45 [==============================] - 49s 1s/step - loss: 0.6934 - accuracy: 0.7936 - val_loss: 1.0032 - val_accuracy: 0.7160 Epoch 46/170 45/45 [==============================] - 49s 1s/step - loss: 0.7065 - accuracy: 0.7894 - val_loss: 0.8921 - val_accuracy: 0.7307 Epoch 47/170 45/45 [==============================] - 49s 1s/step - loss: 0.6637 - accuracy: 0.8045 - val_loss: 0.8241 - val_accuracy: 0.7455 Epoch 48/170 45/45 [==============================] - 49s 1s/step - loss: 0.6467 - accuracy: 0.8099 - val_loss: 0.8084 - val_accuracy: 0.7518 Epoch 49/170 45/45 [==============================] - 49s 1s/step - loss: 0.6275 - accuracy: 0.8166 - val_loss: 0.7784 - val_accuracy: 0.7652 Epoch 50/170 45/45 [==============================] - 49s 1s/step - loss: 0.6177 - accuracy: 0.8197 - val_loss: 0.7870 - val_accuracy: 0.7700 Epoch 51/170 45/45 [==============================] - 49s 1s/step - loss: 0.5917 - accuracy: 0.8288 - val_loss: 0.7190 - val_accuracy: 0.7935 Epoch 52/170 45/45 [==============================] - 49s 1s/step - loss: 0.5840 - accuracy: 0.8307 - val_loss: 0.7421 - val_accuracy: 0.7836 Epoch 53/170 45/45 [==============================] - 49s 1s/step - loss: 0.5859 - accuracy: 0.8307 - val_loss: 0.8049 - val_accuracy: 0.7632 Epoch 54/170 45/45 [==============================] - 49s 1s/step - loss: 0.5551 - accuracy: 0.8403 - val_loss: 0.7168 - val_accuracy: 0.7910 Epoch 55/170 45/45 [==============================] - 49s 1s/step - loss: 0.5576 - accuracy: 0.8398 - val_loss: 0.7414 - val_accuracy: 0.7826 Epoch 56/170 45/45 [==============================] - 49s 1s/step - loss: 0.5264 - accuracy: 0.8507 - val_loss: 0.7256 - val_accuracy: 0.7886 Epoch 57/170 45/45 [==============================] - 49s 1s/step - loss: 0.5125 - accuracy: 0.8547 - val_loss: 0.7534 - val_accuracy: 0.7840 Epoch 58/170 45/45 [==============================] - 49s 1s/step - loss: 0.5111 - accuracy: 0.8554 - val_loss: 0.7081 - val_accuracy: 0.7954 Epoch 59/170 45/45 [==============================] - 49s 1s/step - loss: 0.4910 - accuracy: 0.8620 - val_loss: 0.7221 - val_accuracy: 0.7897 Epoch 60/170 45/45 [==============================] - 49s 1s/step - loss: 0.4800 - accuracy: 0.8650 - val_loss: 0.7344 - val_accuracy: 0.7922 Epoch 61/170 45/45 [==============================] - 49s 1s/step - loss: 0.4665 - accuracy: 0.8699 - val_loss: 0.7019 - val_accuracy: 0.8037 Epoch 62/170 45/45 [==============================] - 49s 1s/step - loss: 0.4651 - accuracy: 0.8693 - val_loss: 0.7291 - val_accuracy: 0.7976 Epoch 63/170 45/45 [==============================] - 49s 1s/step - loss: 0.4491 - accuracy: 0.8756 - val_loss: 0.7306 - val_accuracy: 0.7920 Epoch 64/170 45/45 [==============================] - 49s 1s/step - loss: 0.4462 - accuracy: 0.8754 - val_loss: 0.7102 - val_accuracy: 0.8018 Epoch 65/170 45/45 [==============================] - 49s 1s/step - loss: 0.4439 - accuracy: 0.8770 - val_loss: 0.7248 - val_accuracy: 0.7972 Epoch 66/170 45/45 [==============================] - 49s 1s/step - loss: 0.4231 - accuracy: 0.8826 - val_loss: 0.7445 - val_accuracy: 0.7898 Epoch 67/170 45/45 [==============================] - 48s 1s/step - loss: 0.4226 - accuracy: 0.8834 - val_loss: 0.7560 - val_accuracy: 0.7962 Epoch 68/170 45/45 [==============================] - 49s 1s/step - loss: 0.4144 - accuracy: 0.8850 - val_loss: 0.7411 - val_accuracy: 0.7976 Epoch 69/170 45/45 [==============================] - 49s 1s/step - loss: 0.4069 - accuracy: 0.8873 - val_loss: 0.7382 - val_accuracy: 0.8028 Epoch 70/170 45/45 [==============================] - 49s 1s/step - loss: 0.4044 - accuracy: 0.8875 - val_loss: 0.7281 - val_accuracy: 0.8034 Epoch 71/170 45/45 [==============================] - 49s 1s/step - loss: 0.3971 - accuracy: 0.8898 - val_loss: 0.7583 - val_accuracy: 0.8012 Epoch 72/170 45/45 [==============================] - 49s 1s/step - loss: 0.3936 - accuracy: 0.8902 - val_loss: 0.7451 - val_accuracy: 0.8051 Epoch 73/170 45/45 [==============================] - 49s 1s/step - loss: 0.3758 - accuracy: 0.8961 - val_loss: 0.7660 - val_accuracy: 0.8002 Epoch 74/170 45/45 [==============================] - 49s 1s/step - loss: 0.3788 - accuracy: 0.8949 - val_loss: 0.7752 - val_accuracy: 0.7957 Epoch 75/170 45/45 [==============================] - 49s 1s/step - loss: 0.3738 - accuracy: 0.8960 - val_loss: 0.7765 - val_accuracy: 0.7982 Epoch 76/170 45/45 [==============================] - 49s 1s/step - loss: 0.3752 - accuracy: 0.8955 - val_loss: 0.7781 - val_accuracy: 0.7990 Epoch 77/170 45/45 [==============================] - 49s 1s/step - loss: 0.3656 - accuracy: 0.8981 - val_loss: 0.8008 - val_accuracy: 0.7978 Epoch 78/170 45/45 [==============================] - 49s 1s/step - loss: 0.3548 - accuracy: 0.9012 - val_loss: 0.8427 - val_accuracy: 0.7892 Epoch 79/170 45/45 [==============================] - 49s 1s/step - loss: 0.3636 - accuracy: 0.8986 - val_loss: 0.7851 - val_accuracy: 0.8094 Epoch 80/170 45/45 [==============================] - 49s 1s/step - loss: 0.3507 - accuracy: 0.9023 - val_loss: 0.7516 - val_accuracy: 0.8098 Epoch 81/170 45/45 [==============================] - 50s 1s/step - loss: 0.3469 - accuracy: 0.9036 - val_loss: 0.7896 - val_accuracy: 0.8028 Epoch 82/170 45/45 [==============================] - 49s 1s/step - loss: 0.3449 - accuracy: 0.9036 - val_loss: 0.7886 - val_accuracy: 0.8024 Epoch 83/170 45/45 [==============================] - 49s 1s/step - loss: 0.3433 - accuracy: 0.9044 - val_loss: 0.8769 - val_accuracy: 0.7858 Epoch 84/170 45/45 [==============================] - 49s 1s/step - loss: 0.3374 - accuracy: 0.9059 - val_loss: 0.8745 - val_accuracy: 0.7905 Epoch 85/170 45/45 [==============================] - 49s 1s/step - loss: 0.3383 - accuracy: 0.9057 - val_loss: 0.8191 - val_accuracy: 0.8018 Epoch 86/170 45/45 [==============================] - 49s 1s/step - loss: 0.3272 - accuracy: 0.9088 - val_loss: 0.8304 - val_accuracy: 0.7954 Epoch 87/170 45/45 [==============================] - 49s 1s/step - loss: 0.3352 - accuracy: 0.9065 - val_loss: 0.8144 - val_accuracy: 0.8053 Epoch 88/170 45/45 [==============================] - 50s 1s/step - loss: 0.3277 - accuracy: 0.9085 - val_loss: 0.7927 - val_accuracy: 0.8087 Epoch 89/170 45/45 [==============================] - 49s 1s/step - loss: 0.3265 - accuracy: 0.9087 - val_loss: 0.8338 - val_accuracy: 0.7985 Epoch 90/170 45/45 [==============================] - 49s 1s/step - loss: 0.3246 - accuracy: 0.9092 - val_loss: 0.8572 - val_accuracy: 0.7907 Epoch 91/170 45/45 [==============================] - 49s 1s/step - loss: 0.3240 - accuracy: 0.9093 - val_loss: 0.8275 - val_accuracy: 0.8009 Epoch 92/170 45/45 [==============================] - 49s 1s/step - loss: 0.3158 - accuracy: 0.9119 - val_loss: 0.8075 - val_accuracy: 0.8019 Epoch 93/170 45/45 [==============================] - 49s 1s/step - loss: 0.3170 - accuracy: 0.9113 - val_loss: 0.8307 - val_accuracy: 0.7985 Epoch 94/170 45/45 [==============================] - 50s 1s/step - loss: 0.3141 - accuracy: 0.9124 - val_loss: 0.8320 - val_accuracy: 0.8040 Epoch 95/170 45/45 [==============================] - 49s 1s/step - loss: 0.3133 - accuracy: 0.9123 - val_loss: 0.7895 - val_accuracy: 0.8145 Epoch 96/170 45/45 [==============================] - 49s 1s/step - loss: 0.3115 - accuracy: 0.9129 - val_loss: 0.8252 - val_accuracy: 0.8063 Epoch 97/170 45/45 [==============================] - 50s 1s/step - loss: 0.3131 - accuracy: 0.9123 - val_loss: 0.8185 - val_accuracy: 0.8087 Epoch 98/170 45/45 [==============================] - 50s 1s/step - loss: 0.3092 - accuracy: 0.9133 - val_loss: 0.8426 - val_accuracy: 0.8031 Epoch 99/170 45/45 [==============================] - 49s 1s/step - loss: 0.3093 - accuracy: 0.9135 - val_loss: 0.8648 - val_accuracy: 0.8031 Epoch 100/170 45/45 [==============================] - 49s 1s/step - loss: 0.2981 - accuracy: 0.9168 - val_loss: 0.8157 - val_accuracy: 0.8142 Epoch 101/170 45/45 [==============================] - 49s 1s/step - loss: 0.3023 - accuracy: 0.9155 - val_loss: 0.8707 - val_accuracy: 0.8037 Epoch 102/170 45/45 [==============================] - 50s 1s/step - loss: 0.2926 - accuracy: 0.9181 - val_loss: 0.8492 - val_accuracy: 0.8059 Epoch 103/170 45/45 [==============================] - 49s 1s/step - loss: 0.2999 - accuracy: 0.9163 - val_loss: 0.8166 - val_accuracy: 0.8098 Epoch 104/170 45/45 [==============================] - 49s 1s/step - loss: 0.2945 - accuracy: 0.9174 - val_loss: 0.8204 - val_accuracy: 0.8095 Epoch 105/170 45/45 [==============================] - 50s 1s/step - loss: 0.2977 - accuracy: 0.9168 - val_loss: 0.8766 - val_accuracy: 0.7952 Epoch 106/170 45/45 [==============================] - 49s 1s/step - loss: 0.2871 - accuracy: 0.9197 - val_loss: 0.8437 - val_accuracy: 0.8074 Epoch 107/170 45/45 [==============================] - 49s 1s/step - loss: 0.2953 - accuracy: 0.9170 - val_loss: 0.8952 - val_accuracy: 0.7969 Epoch 108/170 45/45 [==============================] - 49s 1s/step - loss: 0.2966 - accuracy: 0.9170 - val_loss: 0.8710 - val_accuracy: 0.8066 Epoch 109/170 45/45 [==============================] - 49s 1s/step - loss: 0.2992 - accuracy: 0.9159 - val_loss: 0.8366 - val_accuracy: 0.8065 Epoch 110/170 45/45 [==============================] - 49s 1s/step - loss: 0.2819 - accuracy: 0.9212 - val_loss: 0.8642 - val_accuracy: 0.8083 Epoch 111/170 45/45 [==============================] - 49s 1s/step - loss: 0.2882 - accuracy: 0.9192 - val_loss: 0.8512 - val_accuracy: 0.8101 Epoch 112/170 45/45 [==============================] - 49s 1s/step - loss: 0.2827 - accuracy: 0.9208 - val_loss: 0.8861 - val_accuracy: 0.8024 Epoch 113/170 45/45 [==============================] - 49s 1s/step - loss: 0.2829 - accuracy: 0.9210 - val_loss: 0.9093 - val_accuracy: 0.7991 Epoch 114/170 45/45 [==============================] - 49s 1s/step - loss: 0.2837 - accuracy: 0.9206 - val_loss: 0.8901 - val_accuracy: 0.8024 Epoch 115/170 45/45 [==============================] - 49s 1s/step - loss: 0.2763 - accuracy: 0.9225 - val_loss: 0.9086 - val_accuracy: 0.7997 Epoch 116/170 45/45 [==============================] - 49s 1s/step - loss: 0.2767 - accuracy: 0.9227 - val_loss: 0.8933 - val_accuracy: 0.8071 Epoch 117/170 45/45 [==============================] - 49s 1s/step - loss: 0.2744 - accuracy: 0.9231 - val_loss: 0.9098 - val_accuracy: 0.8026 Epoch 118/170 45/45 [==============================] - 50s 1s/step - loss: 0.2789 - accuracy: 0.9217 - val_loss: 0.9117 - val_accuracy: 0.8020 Epoch 119/170 45/45 [==============================] - 49s 1s/step - loss: 0.2744 - accuracy: 0.9231 - val_loss: 0.9228 - val_accuracy: 0.7943 Epoch 120/170 45/45 [==============================] - 49s 1s/step - loss: 0.2743 - accuracy: 0.9231 - val_loss: 0.8892 - val_accuracy: 0.8007 Epoch 121/170 45/45 [==============================] - 49s 1s/step - loss: 0.2789 - accuracy: 0.9216 - val_loss: 0.9135 - val_accuracy: 0.8037 Epoch 122/170 45/45 [==============================] - 49s 1s/step - loss: 0.2675 - accuracy: 0.9250 - val_loss: 0.9049 - val_accuracy: 0.7976 Epoch 123/170 45/45 [==============================] - 49s 1s/step - loss: 0.2665 - accuracy: 0.9253 - val_loss: 0.8957 - val_accuracy: 0.8107 Epoch 124/170 45/45 [==============================] - 49s 1s/step - loss: 0.2743 - accuracy: 0.9230 - val_loss: 0.9375 - val_accuracy: 0.8008 Epoch 125/170 45/45 [==============================] - 49s 1s/step - loss: 0.2613 - accuracy: 0.9269 - val_loss: 0.9423 - val_accuracy: 0.8004 Epoch 126/170 45/45 [==============================] - 49s 1s/step - loss: 0.2653 - accuracy: 0.9257 - val_loss: 0.9435 - val_accuracy: 0.8054 Epoch 127/170 45/45 [==============================] - 49s 1s/step - loss: 0.2644 - accuracy: 0.9259 - val_loss: 0.8881 - val_accuracy: 0.8106 Epoch 128/170 45/45 [==============================] - 49s 1s/step - loss: 0.2620 - accuracy: 0.9264 - val_loss: 0.9387 - val_accuracy: 0.8017 Epoch 129/170 45/45 [==============================] - 49s 1s/step - loss: 0.2642 - accuracy: 0.9258 - val_loss: 0.9420 - val_accuracy: 0.8096 Epoch 130/170 45/45 [==============================] - 49s 1s/step - loss: 0.2593 - accuracy: 0.9273 - val_loss: 0.9356 - val_accuracy: 0.8033 Epoch 131/170 45/45 [==============================] - 49s 1s/step - loss: 0.2642 - accuracy: 0.9259 - val_loss: 0.9324 - val_accuracy: 0.8055 Epoch 132/170 45/45 [==============================] - 49s 1s/step - loss: 0.2581 - accuracy: 0.9275 - val_loss: 0.9709 - val_accuracy: 0.8003 Epoch 133/170 45/45 [==============================] - 48s 1s/step - loss: 0.2587 - accuracy: 0.9274 - val_loss: 0.9213 - val_accuracy: 0.8063 Epoch 134/170 45/45 [==============================] - 49s 1s/step - loss: 0.2607 - accuracy: 0.9268 - val_loss: 0.9499 - val_accuracy: 0.8076 Epoch 135/170 45/45 [==============================] - 49s 1s/step - loss: 0.2586 - accuracy: 0.9274 - val_loss: 0.9291 - val_accuracy: 0.8110 Epoch 136/170 45/45 [==============================] - 48s 1s/step - loss: 0.2574 - accuracy: 0.9275 - val_loss: 0.9443 - val_accuracy: 0.8058 Epoch 137/170 45/45 [==============================] - 49s 1s/step - loss: 0.2549 - accuracy: 0.9283 - val_loss: 0.9059 - val_accuracy: 0.8127 Epoch 138/170 45/45 [==============================] - 49s 1s/step - loss: 0.2548 - accuracy: 0.9285 - val_loss: 0.9152 - val_accuracy: 0.8075 Epoch 139/170 45/45 [==============================] - 49s 1s/step - loss: 0.2568 - accuracy: 0.9276 - val_loss: 0.9384 - val_accuracy: 0.8037 Epoch 140/170 45/45 [==============================] - 49s 1s/step - loss: 0.2558 - accuracy: 0.9283 - val_loss: 0.9682 - val_accuracy: 0.8013 Epoch 141/170 45/45 [==============================] - 48s 1s/step - loss: 0.2539 - accuracy: 0.9285 - val_loss: 0.9234 - val_accuracy: 0.8097 Epoch 142/170 45/45 [==============================] - 49s 1s/step - loss: 0.2537 - accuracy: 0.9287 - val_loss: 0.9692 - val_accuracy: 0.8004 Epoch 143/170 45/45 [==============================] - 49s 1s/step - loss: 0.2531 - accuracy: 0.9287 - val_loss: 0.9514 - val_accuracy: 0.8031 Epoch 144/170 45/45 [==============================] - 49s 1s/step - loss: 0.2528 - accuracy: 0.9288 - val_loss: 0.9174 - val_accuracy: 0.8077 Epoch 145/170 45/45 [==============================] - 49s 1s/step - loss: 0.2514 - accuracy: 0.9292 - val_loss: 0.9640 - val_accuracy: 0.8050 Epoch 146/170 45/45 [==============================] - 49s 1s/step - loss: 0.2490 - accuracy: 0.9301 - val_loss: 0.9601 - val_accuracy: 0.8051 Epoch 147/170 45/45 [==============================] - 49s 1s/step - loss: 0.2488 - accuracy: 0.9303 - val_loss: 0.9713 - val_accuracy: 0.8003 Epoch 148/170 45/45 [==============================] - 49s 1s/step - loss: 0.2460 - accuracy: 0.9310 - val_loss: 0.9593 - val_accuracy: 0.8067 Epoch 149/170 45/45 [==============================] - 49s 1s/step - loss: 0.2473 - accuracy: 0.9305 - val_loss: 0.9585 - val_accuracy: 0.8050 Epoch 150/170 45/45 [==============================] - 49s 1s/step - loss: 0.2439 - accuracy: 0.9316 - val_loss: 0.9828 - val_accuracy: 0.8053 Epoch 151/170 45/45 [==============================] - 48s 1s/step - loss: 0.2469 - accuracy: 0.9306 - val_loss: 0.9592 - val_accuracy: 0.8063 Epoch 152/170 45/45 [==============================] - 49s 1s/step - loss: 0.2439 - accuracy: 0.9312 - val_loss: 1.0218 - val_accuracy: 0.7959 Epoch 153/170 45/45 [==============================] - 49s 1s/step - loss: 0.2451 - accuracy: 0.9313 - val_loss: 0.9584 - val_accuracy: 0.8066 Epoch 154/170 45/45 [==============================] - 49s 1s/step - loss: 0.2424 - accuracy: 0.9318 - val_loss: 0.9744 - val_accuracy: 0.8044 Epoch 155/170 45/45 [==============================] - 49s 1s/step - loss: 0.2418 - accuracy: 0.9322 - val_loss: 0.9971 - val_accuracy: 0.8037 Epoch 156/170 45/45 [==============================] - 49s 1s/step - loss: 0.2430 - accuracy: 0.9317 - val_loss: 1.0517 - val_accuracy: 0.7959 Epoch 157/170 45/45 [==============================] - 49s 1s/step - loss: 0.2413 - accuracy: 0.9324 - val_loss: 1.0095 - val_accuracy: 0.8019 Epoch 158/170 45/45 [==============================] - 49s 1s/step - loss: 0.2380 - accuracy: 0.9331 - val_loss: 0.9833 - val_accuracy: 0.8064 Epoch 159/170 45/45 [==============================] - 49s 1s/step - loss: 0.2411 - accuracy: 0.9322 - val_loss: 1.0313 - val_accuracy: 0.7998 Epoch 160/170 45/45 [==============================] - 49s 1s/step - loss: 0.2371 - accuracy: 0.9332 - val_loss: 0.9978 - val_accuracy: 0.8003 Epoch 161/170 45/45 [==============================] - 49s 1s/step - loss: 0.2389 - accuracy: 0.9327 - val_loss: 1.0359 - val_accuracy: 0.7987 Epoch 162/170 45/45 [==============================] - 49s 1s/step - loss: 0.2406 - accuracy: 0.9323 - val_loss: 1.0451 - val_accuracy: 0.7949 Epoch 163/170 45/45 [==============================] - 49s 1s/step - loss: 0.2365 - accuracy: 0.9334 - val_loss: 1.0090 - val_accuracy: 0.8051 Epoch 164/170 45/45 [==============================] - 49s 1s/step - loss: 0.2364 - accuracy: 0.9335 - val_loss: 0.9961 - val_accuracy: 0.8063 Epoch 165/170 45/45 [==============================] - 49s 1s/step - loss: 0.2341 - accuracy: 0.9341 - val_loss: 1.0391 - val_accuracy: 0.8002 Epoch 166/170 45/45 [==============================] - 49s 1s/step - loss: 0.2341 - accuracy: 0.9341 - val_loss: 1.0309 - val_accuracy: 0.8003 Epoch 167/170 45/45 [==============================] - 49s 1s/step - loss: 0.2323 - accuracy: 0.9347 - val_loss: 1.0461 - val_accuracy: 0.7991 Epoch 168/170 45/45 [==============================] - 49s 1s/step - loss: 0.2388 - accuracy: 0.9325 - val_loss: 1.0265 - val_accuracy: 0.7992 Epoch 169/170 45/45 [==============================] - 48s 1s/step - loss: 0.2343 - accuracy: 0.9340 - val_loss: 1.0805 - val_accuracy: 0.7973 Epoch 170/170 45/45 [==============================] - 49s 1s/step - loss: 0.2301 - accuracy: 0.9352 - val_loss: 1.0369 - val_accuracy: 0.8024
model.save("trained_model.h5")
C:\Users\harsh\anaconda3\lib\site-packages\tensorflow\python\keras\utils\generic_utils.py:494: CustomMaskWarning: Custom mask layers require a config and must override get_config. When loading, the custom mask layer must be passed to the custom_objects argument.
warnings.warn('Custom mask layers require a config and must override '
from tensorflow.python.client import device_lib
print(device_lib.list_local_devices())
import tensorflow as tf
print("NUm Gpus: ",len(tf.config.experimental.list_physical_devices('GPU')))
from tensorflow import keras
model = keras.models.load_model('trained_model.h5')
model.summary()
Model: "model"
__________________________________________________________________________________________________
Layer (type) Output Shape Param # Connected to
==================================================================================================
input_1 (InputLayer) [(None, 224, 224, 3) 0
__________________________________________________________________________________________________
block1_conv1 (Conv2D) (None, 224, 224, 64) 1792 input_1[0][0]
__________________________________________________________________________________________________
block1_conv2 (Conv2D) (None, 224, 224, 64) 36928 block1_conv1[0][0]
__________________________________________________________________________________________________
block1_pool2 (MaxPooling2D) (None, 112, 112, 64) 0 block1_conv2[0][0]
__________________________________________________________________________________________________
block2_conv1 (Conv2D) (None, 112, 112, 128 73856 block1_pool2[0][0]
__________________________________________________________________________________________________
block2_conv2 (Conv2D) (None, 112, 112, 128 147584 block2_conv1[0][0]
__________________________________________________________________________________________________
block2_pool2 (MaxPooling2D) (None, 56, 56, 128) 0 block2_conv2[0][0]
__________________________________________________________________________________________________
block3_conv1 (Conv2D) (None, 56, 56, 256) 295168 block2_pool2[0][0]
__________________________________________________________________________________________________
block3_conv2 (Conv2D) (None, 56, 56, 256) 590080 block3_conv1[0][0]
__________________________________________________________________________________________________
block3_pool2 (MaxPooling2D) (None, 28, 28, 256) 0 block3_conv2[0][0]
__________________________________________________________________________________________________
block4_conv1 (Conv2D) (None, 28, 28, 512) 1180160 block3_pool2[0][0]
__________________________________________________________________________________________________
block4_conv2 (Conv2D) (None, 28, 28, 512) 2359808 block4_conv1[0][0]
__________________________________________________________________________________________________
block4_pool2 (MaxPooling2D) (None, 14, 14, 512) 0 block4_conv2[0][0]
__________________________________________________________________________________________________
block5_conv1 (Conv2D) (None, 14, 14, 512) 2359808 block4_pool2[0][0]
__________________________________________________________________________________________________
block5_conv2 (Conv2D) (None, 14, 14, 512) 2359808 block5_conv1[0][0]
__________________________________________________________________________________________________
block5_pool2 (MaxPooling2D) (None, 7, 7, 512) 0 block5_conv2[0][0]
__________________________________________________________________________________________________
conv6 (Conv2D) (None, 7, 7, 1024) 25691136 block5_pool2[0][0]
__________________________________________________________________________________________________
conv7 (Conv2D) (None, 7, 7, 1024) 1049600 conv6[0][0]
__________________________________________________________________________________________________
conv2d_transpose (Conv2DTranspo (None, 16, 16, 12) 196608 conv7[0][0]
__________________________________________________________________________________________________
cropping2d (Cropping2D) (None, 14, 14, 12) 0 conv2d_transpose[0][0]
__________________________________________________________________________________________________
conv2d (Conv2D) (None, 14, 14, 12) 6156 block4_pool2[0][0]
__________________________________________________________________________________________________
add (Add) (None, 14, 14, 12) 0 cropping2d[0][0]
conv2d[0][0]
__________________________________________________________________________________________________
conv2d_transpose_1 (Conv2DTrans (None, 30, 30, 12) 2304 add[0][0]
__________________________________________________________________________________________________
cropping2d_1 (Cropping2D) (None, 28, 28, 12) 0 conv2d_transpose_1[0][0]
__________________________________________________________________________________________________
conv2d_1 (Conv2D) (None, 28, 28, 12) 3084 block3_pool2[0][0]
__________________________________________________________________________________________________
add_1 (Add) (None, 28, 28, 12) 0 cropping2d_1[0][0]
conv2d_1[0][0]
__________________________________________________________________________________________________
conv2d_transpose_2 (Conv2DTrans (None, 224, 224, 12) 9216 add_1[0][0]
__________________________________________________________________________________________________
activation (Activation) (None, 224, 224, 12) 0 conv2d_transpose_2[0][0]
==================================================================================================
Total params: 36,363,096
Trainable params: 36,363,096
Non-trainable params: 0
__________________________________________________________________________________________________
def get_testing_set(validation_dataset):
true_test_annotations = []
true_test_images = []
num_test_count = 16
test_dataset = validation_dataset.unbatch()
test_dataset = test_dataset.batch(50)
for image,annotation in test_dataset.take(1):
true_test_images = image
true_test_annotations = annotation
true_test_images = true_test_images[:num_test_count,:,:,:]
true_test_annotations = true_test_annotations[:num_test_count,:,:,:]
true_test_annotations = np.argmax(true_test_annotations,axis=3)
return true_test_images,true_test_annotations
true_test_images,true_test_annotations = get_testing_set(validation_dataset)
print(true_test_images.shape)
print(true_test_annotations.shape)
(16, 224, 224, 3) (16, 224, 224)
results = model.predict(validation_dataset,steps = valid_steps_per_epoch)
predicted_annotations = np.argmax(results,axis=3)
print(predicted_annotations.shape)
(96, 224, 224)
def compute_metrics(pred_labelmap,true_labelmap):
class_wise_iou = []
class_wise_dice_score = []
eps = 0.00001
for i in range(num_classes):
overlap_area = np.sum((pred_labelmap==i)*(true_labelmap==i))
pred_labelmap_area = np.sum((pred_labelmap==i))
true_labelmap_area = np.sum((true_labelmap==i))
union_area = pred_labelmap_area + true_labelmap_area - overlap_area
combined_area = pred_labelmap_area + true_labelmap_area
iou = (overlap_area+eps)/(union_area+eps)
dice_score = 2*(overlap_area+eps)/(combined_area+eps)
class_wise_iou.append(iou)
class_wise_dice_score.append(dice_score)
return class_wise_iou,class_wise_dice_score
pred_labelmap = predicted_annotations[10]
true_labelmap = true_test_annotations[10]
class_wise_iou,class_wise_dice_score = compute_metrics(pred_labelmap,true_labelmap)
test_image = true_test_images[10]
show_truth_and_pred(test_image,[pred_labelmap,true_labelmap],["Image", "Predicted Mask", "True Mask"]
,class_wise_iou,class_wise_dice_score)
print(pred_labelmap.shape)
(224, 224)
print(true_labelmap.shape)
(224, 224)
accuracy = np.sum((pred_labelmap==true_labelmap))
accuracy = accuracy/(224*224)
print(accuracy)
0.7577726403061225