From d75527f7eecc4e2fcdd18ab157412506717c8adb Mon Sep 17 00:00:00 2001 From: navanchauhan Date: Mon, 7 Nov 2022 23:36:11 -0700 Subject: add blog post --- .../2019-12-08-Image-Classifier-Tensorflow.html | 60 ++++++++++++++-------- 1 file changed, 40 insertions(+), 20 deletions(-) (limited to 'docs/posts/2019-12-08-Image-Classifier-Tensorflow.html') diff --git a/docs/posts/2019-12-08-Image-Classifier-Tensorflow.html b/docs/posts/2019-12-08-Image-Classifier-Tensorflow.html index ac305ac..9ecfff0 100644 --- a/docs/posts/2019-12-08-Image-Classifier-Tensorflow.html +++ b/docs/posts/2019-12-08-Image-Classifier-Tensorflow.html @@ -47,7 +47,8 @@

Imports

-
%tensorflow_version 2.x #This is for telling Colab that you want to use TF 2.0, ignore if running on local machine
+
+
%tensorflow_version 2.x #This is for telling Colab that you want to use TF 2.0, ignore if running on local machine
 
 from PIL import Image # We use the PIL Library to resize images
 import numpy as np
@@ -59,21 +60,25 @@
 import matplotlib.pyplot as plt
 from keras.models import Sequential
 from keras.layers import Conv2D,MaxPooling2D,Dense,Flatten,Dropout
-
+
+

Dataset

Fetching the Data

-
!wget ftp://lhcftp.nlm.nih.gov/Open-Access-Datasets/Malaria/cell_images.zip
+
+
!wget ftp://lhcftp.nlm.nih.gov/Open-Access-Datasets/Malaria/cell_images.zip
 !unzip cell_images.zip
-
+
+

Processing the Data

We resize all the images as 50x50 and add the numpy array of that image as well as their label names (Infected or Not) to common arrays.

-
data = []
+
+
data = []
 labels = []
 
 Parasitized = os.listdir("./cell_images/Parasitized/")
@@ -97,15 +102,18 @@
         labels.append(1)
     except AttributeError:
         print("")
-
+
+

Splitting Data

-
df = np.array(data)
+
+
df = np.array(data)
 labels = np.array(labels)
 (X_train, X_test) = df[(int)(0.1*len(df)):],df[:(int)(0.1*len(df))]
 (y_train, y_test) = labels[(int)(0.1*len(labels)):],labels[:(int)(0.1*len(labels))]
-
+
+
s=np.arange(X_train.shape[0])
 np.random.shuffle(s)
@@ -122,7 +130,8 @@ X_train = X_train/255.0
 
 

Note: The input shape for the first layer is 50,50 which corresponds with the sizes of the resized images

-
model = models.Sequential()
+
+
model = models.Sequential()
 model.add(layers.Conv2D(filters=16, kernel_size=2, padding='same', activation='relu', input_shape=(50,50,3)))
 model.add(layers.MaxPooling2D(pool_size=2))
 model.add(layers.Conv2D(filters=32,kernel_size=2,padding='same',activation='relu'))
@@ -135,25 +144,31 @@ X_train = X_train/255.0
 model.add(layers.Dropout(0.2))
 model.add(layers.Dense(2,activation="softmax"))#2 represent output layer neurons 
 model.summary()
-
+
+

Compiling Model

We use the Adam optimiser as it is an adaptive learning rate optimisation algorithm that's been designed specifically for training deep neural networks, which means it changes its learning rate automatically to get the best results

-
model.compile(optimizer="adam",
+
+
model.compile(optimizer="adam",
               loss="sparse_categorical_crossentropy", 
              metrics=["accuracy"])
-
+
+

Training Model

We train the model for 10 epochs on the training data and then validate it using the testing data

-
history = model.fit(X_train,y_train, epochs=10, validation_data=(X_test,y_test))
-
+
+
history = model.fit(X_train,y_train, epochs=10, validation_data=(X_test,y_test))
+
+
-
Train on 24803 samples, validate on 2755 samples
+
+
Train on 24803 samples, validate on 2755 samples
 Epoch 1/10
 24803/24803 [==============================] - 57s 2ms/sample - loss: 0.0786 - accuracy: 0.9729 - val_loss: 0.0000e+00 - val_accuracy: 1.0000
 Epoch 2/10
@@ -174,11 +189,13 @@ X_train = X_train/255.0
 24803/24803 [==============================] - 58s 2ms/sample - loss: 0.0352 - accuracy: 0.9878 - val_loss: 0.0000e+00 - val_accuracy: 1.0000
 Epoch 10/10
 24803/24803 [==============================] - 58s 2ms/sample - loss: 0.0373 - accuracy: 0.9865 - val_loss: 0.0000e+00 - val_accuracy: 1.0000
-
+
+

Results

-
accuracy = history.history['accuracy'][-1]*100
+
+
accuracy = history.history['accuracy'][-1]*100
 loss = history.history['loss'][-1]*100
 val_accuracy = history.history['val_accuracy'][-1]*100
 val_loss = history.history['val_loss'][-1]*100
@@ -189,13 +206,16 @@ X_train = X_train/255.0
     '\nValidation Accuracy:', val_accuracy,
     '\nValidation Loss:', val_loss
 )
-
+
+
-
Accuracy: 98.64532351493835 
+
+
Accuracy: 98.64532351493835 
 Loss: 3.732407123270176 
 Validation Accuracy: 100.0 
 Validation Loss: 0.0
-
+
+

We have achieved 98% Accuracy!

-- cgit v1.2.3