From dde266a520b03a991e49cac94509b58e7b10e7f2 Mon Sep 17 00:00:00 2001 From: Navan Chauhan Date: Mon, 2 Mar 2020 14:06:59 +0530 Subject: Publish deploy 2020-03-02 14:06 --- Themes/styles 4.css | 398 +++++++++ about/index 4.html | 1 + assets/disqus 4.js | 5 + .../gciTales/01-teachableMachines/01-collect 4.png | Bin 0 -> 3148125 bytes .../gciTales/01-teachableMachines/01-collect 5.png | Bin 0 -> 3148125 bytes .../gciTales/01-teachableMachines/02-train 4.png | Bin 0 -> 1673773 bytes .../gciTales/01-teachableMachines/03-label 4.png | Bin 0 -> 134577 bytes .../gciTales/01-teachableMachines/03-label 5.png | Bin 0 -> 134577 bytes .../gciTales/01-teachableMachines/04-alert 4.png | Bin 0 -> 45710 bytes .../gciTales/01-teachableMachines/04-alert 5.png | Bin 0 -> 45710 bytes assets/gciTales/01-teachableMachines/05-html 4.png | Bin 0 -> 15084 bytes assets/gciTales/01-teachableMachines/05-html 5.png | Bin 0 -> 15084 bytes assets/gciTales/01-teachableMachines/06-js 4.png | Bin 0 -> 117349 bytes assets/gciTales/01-teachableMachines/06-js 5.png | Bin 0 -> 117349 bytes assets/gciTales/01-teachableMachines/07-eg 4.png | Bin 0 -> 787109 bytes assets/gciTales/01-teachableMachines/07-eg 5.png | Bin 0 -> 787109 bytes assets/gciTales/01-teachableMachines/08-eg 4.png | Bin 0 -> 832865 bytes assets/gciTales/01-teachableMachines/08-eg 5.png | Bin 0 -> 832865 bytes assets/gciTales/03-regression/1 4.png | Bin 0 -> 39478 bytes assets/gciTales/03-regression/1 5.png | Bin 0 -> 39478 bytes assets/gciTales/03-regression/2 4.png | Bin 0 -> 67134 bytes assets/gciTales/03-regression/2 5.png | Bin 0 -> 67134 bytes assets/gciTales/03-regression/3 4.png | Bin 0 -> 59514 bytes assets/gciTales/03-regression/3 5.png | Bin 0 -> 59514 bytes assets/gciTales/03-regression/4 4.png | Bin 0 -> 58153 bytes assets/gciTales/03-regression/4 5.png | Bin 0 -> 58153 bytes assets/gciTales/03-regression/5 4.png | Bin 0 -> 58824 bytes assets/gciTales/03-regression/5 5.png | Bin 0 -> 58824 bytes assets/gciTales/03-regression/6 4.png | Bin 0 -> 59554 bytes assets/gciTales/03-regression/6 5.png | Bin 0 -> 59554 bytes assets/manup.min 4.js | 1 + assets/posts/open-peeps/ex-1 2.svg | 1 + assets/posts/open-peeps/ex-1 3.svg | 1 + assets/posts/open-peeps/ex-1.svg | 1 + "assets/r\303\251sum\303\251 4.pdf" | Bin 0 -> 90006 bytes feed 4.rss | 941 +++++++++++++++++++++ feed.rss | 5 +- ...b-d347-476a-232d-9568839851cd.webPlatform 3.png | Bin 0 -> 1711 bytes ...b-d347-476a-232d-9568839851cd.webPlatform 4.png | Bin 0 -> 1711 bytes ...b-d347-476a-232d-9568839851cd.webPlatform 5.png | Bin 0 -> 1711 bytes ...6-4866-93de-8df5-e0e6a3c65da1.webPlatform 3.png | Bin 0 -> 748 bytes ...6-4866-93de-8df5-e0e6a3c65da1.webPlatform 4.png | Bin 0 -> 748 bytes ...6-4866-93de-8df5-e0e6a3c65da1.webPlatform 5.png | Bin 0 -> 748 bytes ...c-6c7c-ffb8-df8d-d2fad23f50b0.webPlatform 4.png | Bin 0 -> 471 bytes ...c-6c7c-ffb8-df8d-d2fad23f50b0.webPlatform 5.png | Bin 0 -> 471 bytes ...0-557f-0e4b-3d76-127534525db9.webPlatform 4.png | Bin 0 -> 600 bytes ...0-557f-0e4b-3d76-127534525db9.webPlatform 5.png | Bin 0 -> 600 bytes ...7-2e71-90d8-67a7-587163282ebf.webPlatform 3.png | Bin 0 -> 3127 bytes ...7-2e71-90d8-67a7-587163282ebf.webPlatform 4.png | Bin 0 -> 3127 bytes ...7-2e71-90d8-67a7-587163282ebf.webPlatform 5.png | Bin 0 -> 3127 bytes ...e-b615-96cd-3e18-ab4307c859a0.webPlatform 4.png | Bin 0 -> 1674 bytes ...e-b615-96cd-3e18-ab4307c859a0.webPlatform 5.png | Bin 0 -> 1674 bytes ...b-2a6c-0abc-136c-8c8faf49c71b.webPlatform 4.png | Bin 0 -> 1061 bytes ...b-2a6c-0abc-136c-8c8faf49c71b.webPlatform 5.png | Bin 0 -> 1061 bytes ...8-92e3-932f-5388-7731928b5692.webPlatform 3.png | Bin 0 -> 879 bytes ...8-92e3-932f-5388-7731928b5692.webPlatform 4.png | Bin 0 -> 879 bytes ...8-92e3-932f-5388-7731928b5692.webPlatform 5.png | Bin 0 -> 879 bytes ...6-fd1b-b2d3-3627-cef4fa224e25.webPlatform 3.png | Bin 0 -> 789 bytes ...6-fd1b-b2d3-3627-cef4fa224e25.webPlatform 4.png | Bin 0 -> 789 bytes ...6-fd1b-b2d3-3627-cef4fa224e25.webPlatform 5.png | Bin 0 -> 789 bytes ...3-cfd0-b52e-ca49-1db0cc292b7d.webPlatform 3.png | Bin 0 -> 577 bytes ...3-cfd0-b52e-ca49-1db0cc292b7d.webPlatform 4.png | Bin 0 -> 577 bytes ...3-cfd0-b52e-ca49-1db0cc292b7d.webPlatform 5.png | Bin 0 -> 577 bytes ...9-56cb-2a63-7e8b-ac62a038a023.webPlatform 4.png | Bin 0 -> 1033 bytes ...9-56cb-2a63-7e8b-ac62a038a023.webPlatform 5.png | Bin 0 -> 1033 bytes ...6-4612-c284-055f-58850c0730bd.webPlatform 3.png | Bin 0 -> 984 bytes ...6-4612-c284-055f-58850c0730bd.webPlatform 4.png | Bin 0 -> 984 bytes ...6-4612-c284-055f-58850c0730bd.webPlatform 5.png | Bin 0 -> 984 bytes ...3-85f5-62b0-c68f-2faa4aaea42b.webPlatform 4.png | Bin 0 -> 11426 bytes ...3-85f5-62b0-c68f-2faa4aaea42b.webPlatform 5.png | Bin 0 -> 11426 bytes ...d-0299-9db6-3747-c7aeaaaa37d0.webPlatform 3.png | Bin 0 -> 1517 bytes ...d-0299-9db6-3747-c7aeaaaa37d0.webPlatform 4.png | Bin 0 -> 1517 bytes ...d-0299-9db6-3747-c7aeaaaa37d0.webPlatform 5.png | Bin 0 -> 1517 bytes ...8-7e86-1f02-565e-39dfab41fe36.webPlatform 3.png | Bin 0 -> 24811 bytes ...8-7e86-1f02-565e-39dfab41fe36.webPlatform 4.png | Bin 0 -> 24811 bytes ...8-7e86-1f02-565e-39dfab41fe36.webPlatform 5.png | Bin 0 -> 24811 bytes ...1-f17f-ff49-3f97-e942f202bebf.webPlatform 3.png | Bin 0 -> 1293 bytes ...1-f17f-ff49-3f97-e942f202bebf.webPlatform 4.png | Bin 0 -> 1293 bytes ...1-f17f-ff49-3f97-e942f202bebf.webPlatform 5.png | Bin 0 -> 1293 bytes ...f-630b-bafd-7c7d-e1287b98a969.webPlatform 3.png | Bin 0 -> 814 bytes ...f-630b-bafd-7c7d-e1287b98a969.webPlatform 4.png | Bin 0 -> 814 bytes ...f-630b-bafd-7c7d-e1287b98a969.webPlatform 5.png | Bin 0 -> 814 bytes ...a-861c-78c0-0919-07a886e57304.webPlatform 3.png | Bin 0 -> 3966 bytes ...a-861c-78c0-0919-07a886e57304.webPlatform 4.png | Bin 0 -> 3966 bytes ...a-861c-78c0-0919-07a886e57304.webPlatform 5.png | Bin 0 -> 3966 bytes ...5-fff5-aa39-9f7f-fdd3024d4056.webPlatform 3.png | Bin 0 -> 1363 bytes ...5-fff5-aa39-9f7f-fdd3024d4056.webPlatform 4.png | Bin 0 -> 1363 bytes ...5-fff5-aa39-9f7f-fdd3024d4056.webPlatform 5.png | Bin 0 -> 1363 bytes images/favicon 3.png | Bin 0 -> 411 bytes images/favicon 4.png | Bin 0 -> 411 bytes images/favicon 5.png | Bin 0 -> 411 bytes images/logo 3.png | Bin 0 -> 498 bytes images/logo 4.png | Bin 0 -> 498 bytes images/logo 5.png | Bin 0 -> 498 bytes images/me 3.jpeg | Bin 0 -> 105079 bytes images/me 4.jpeg | Bin 0 -> 105079 bytes images/me 5.jpeg | Bin 0 -> 105079 bytes index 4.html | 1 + index.html | 2 +- manifest 4.json | 119 +++ posts/2010-01-24-experiments/index 4.html | 1 + posts/2010-01-24-experiments/index 8.html | 1 + .../index 2.html | 123 +++ .../index 5.html | 123 +++ posts/2019-12-08-Splitting-Zips/index 2.html | 10 + posts/2019-12-08-Splitting-Zips/index 5.html | 10 + .../index 2.html | 23 + .../index 5.html | 23 + .../index 2.html | 369 ++++++++ .../index 5.html | 369 ++++++++ posts/2019-12-22-Fake-News-Detector/index 2.html | 173 ++++ posts/2019-12-22-Fake-News-Detector/index 5.html | 173 ++++ .../index 2.html | 19 + .../index 5.html | 19 + .../index 2.html | 9 + .../index 5.html | 9 + .../index 2.html | 213 +++++ .../index 5.html | 213 +++++ .../index 2.html | 1 + .../index 5.html | 1 + posts/2020-03-02-Open-Peeps/index 2.html | 1 + posts/2020-03-02-Open-Peeps/index 4.html | 4 + posts/2020-03-02-Open-Peeps/index.html | 4 + posts/hello-world/index 2.html | 1 + posts/hello-world/index 5.html | 1 + posts/index 4.html | 1 + posts/index.html | 2 +- .../index 4.html | 7 + .../index 8.html | 7 + publications/index 4.html | 1 + pwabuilder-sw 4.js | 83 ++ pwabuilder-sw-register 4.js | 19 + sitemap 4.xml | 1 + sitemap.xml | 2 +- styles 4.css | 398 +++++++++ tags/article/index 4.html | 1 + tags/article/index 8.html | 1 + tags/article/index.html | 2 +- tags/codesnippet/index 11.html | 1 + tags/codesnippet/index 5.html | 1 + tags/colab/index 4.html | 1 + tags/colab/index 8.html | 1 + tags/digitalart/index 2.html | 1 + tags/digitalart/index 4.html | 1 + tags/digitalart/index.html | 1 + tags/experiment/index 11.html | 1 + tags/experiment/index 5.html | 1 + tags/helloworld/index 2.html | 1 + tags/helloworld/index 5.html | 1 + tags/index 4.html | 1 + tags/index.html | 2 +- tags/kaggle/index 4.html | 1 + tags/kaggle/index 8.html | 1 + tags/linux/index 4.html | 1 + tags/linux/index 8.html | 1 + tags/publication/index 4.html | 1 + tags/publication/index 8.html | 1 + tags/raspberrypi/index 4.html | 1 + tags/raspberrypi/index 6.html | 1 + tags/swiftui/index 2.html | 1 + tags/swiftui/index 5.html | 1 + tags/tensorflow/index 4.html | 1 + tags/tensorflow/index 8.html | 1 + tags/turicreate/index 4.html | 1 + tags/turicreate/index 6.html | 1 + tags/tutorial/index 4.html | 1 + tags/tutorial/index 6.html | 1 + 167 files changed, 3918 insertions(+), 6 deletions(-) create mode 100644 Themes/styles 4.css create mode 100644 about/index 4.html create mode 100644 assets/disqus 4.js create mode 100644 assets/gciTales/01-teachableMachines/01-collect 4.png create mode 100644 assets/gciTales/01-teachableMachines/01-collect 5.png create mode 100644 assets/gciTales/01-teachableMachines/02-train 4.png create mode 100644 assets/gciTales/01-teachableMachines/03-label 4.png create mode 100644 assets/gciTales/01-teachableMachines/03-label 5.png create mode 100644 assets/gciTales/01-teachableMachines/04-alert 4.png create mode 100644 assets/gciTales/01-teachableMachines/04-alert 5.png create mode 100644 assets/gciTales/01-teachableMachines/05-html 4.png create mode 100644 assets/gciTales/01-teachableMachines/05-html 5.png create mode 100644 assets/gciTales/01-teachableMachines/06-js 4.png create mode 100644 assets/gciTales/01-teachableMachines/06-js 5.png create mode 100644 assets/gciTales/01-teachableMachines/07-eg 4.png create mode 100644 assets/gciTales/01-teachableMachines/07-eg 5.png create mode 100644 assets/gciTales/01-teachableMachines/08-eg 4.png create mode 100644 assets/gciTales/01-teachableMachines/08-eg 5.png create mode 100644 assets/gciTales/03-regression/1 4.png create mode 100644 assets/gciTales/03-regression/1 5.png create mode 100644 assets/gciTales/03-regression/2 4.png create mode 100644 assets/gciTales/03-regression/2 5.png create mode 100644 assets/gciTales/03-regression/3 4.png create mode 100644 assets/gciTales/03-regression/3 5.png create mode 100644 assets/gciTales/03-regression/4 4.png create mode 100644 assets/gciTales/03-regression/4 5.png create mode 100644 assets/gciTales/03-regression/5 4.png create mode 100644 assets/gciTales/03-regression/5 5.png create mode 100644 assets/gciTales/03-regression/6 4.png create mode 100644 assets/gciTales/03-regression/6 5.png create mode 100644 assets/manup.min 4.js create mode 100644 assets/posts/open-peeps/ex-1 2.svg create mode 100644 assets/posts/open-peeps/ex-1 3.svg create mode 100644 assets/posts/open-peeps/ex-1.svg create mode 100644 "assets/r\303\251sum\303\251 4.pdf" create mode 100644 feed 4.rss create mode 100644 images/04d0580b-d347-476a-232d-9568839851cd.webPlatform 3.png create mode 100644 images/04d0580b-d347-476a-232d-9568839851cd.webPlatform 4.png create mode 100644 images/04d0580b-d347-476a-232d-9568839851cd.webPlatform 5.png create mode 100644 images/14a6e126-4866-93de-8df5-e0e6a3c65da1.webPlatform 3.png create mode 100644 images/14a6e126-4866-93de-8df5-e0e6a3c65da1.webPlatform 4.png create mode 100644 images/14a6e126-4866-93de-8df5-e0e6a3c65da1.webPlatform 5.png create mode 100644 images/15294abc-6c7c-ffb8-df8d-d2fad23f50b0.webPlatform 4.png create mode 100644 images/15294abc-6c7c-ffb8-df8d-d2fad23f50b0.webPlatform 5.png create mode 100644 images/6b5f7f70-557f-0e4b-3d76-127534525db9.webPlatform 4.png create mode 100644 images/6b5f7f70-557f-0e4b-3d76-127534525db9.webPlatform 5.png create mode 100644 images/82e24f17-2e71-90d8-67a7-587163282ebf.webPlatform 3.png create mode 100644 images/82e24f17-2e71-90d8-67a7-587163282ebf.webPlatform 4.png create mode 100644 images/82e24f17-2e71-90d8-67a7-587163282ebf.webPlatform 5.png create mode 100644 images/8c0ffe9e-b615-96cd-3e18-ab4307c859a0.webPlatform 4.png create mode 100644 images/8c0ffe9e-b615-96cd-3e18-ab4307c859a0.webPlatform 5.png create mode 100644 images/9384518b-2a6c-0abc-136c-8c8faf49c71b.webPlatform 4.png create mode 100644 images/9384518b-2a6c-0abc-136c-8c8faf49c71b.webPlatform 5.png create mode 100644 images/9bf4aee8-92e3-932f-5388-7731928b5692.webPlatform 3.png create mode 100644 images/9bf4aee8-92e3-932f-5388-7731928b5692.webPlatform 4.png create mode 100644 images/9bf4aee8-92e3-932f-5388-7731928b5692.webPlatform 5.png create mode 100644 images/9dc22996-fd1b-b2d3-3627-cef4fa224e25.webPlatform 3.png create mode 100644 images/9dc22996-fd1b-b2d3-3627-cef4fa224e25.webPlatform 4.png create mode 100644 images/9dc22996-fd1b-b2d3-3627-cef4fa224e25.webPlatform 5.png create mode 100644 images/afd91c53-cfd0-b52e-ca49-1db0cc292b7d.webPlatform 3.png create mode 100644 images/afd91c53-cfd0-b52e-ca49-1db0cc292b7d.webPlatform 4.png create mode 100644 images/afd91c53-cfd0-b52e-ca49-1db0cc292b7d.webPlatform 5.png create mode 100644 images/b0cac729-56cb-2a63-7e8b-ac62a038a023.webPlatform 4.png create mode 100644 images/b0cac729-56cb-2a63-7e8b-ac62a038a023.webPlatform 5.png create mode 100644 images/bb0aca46-4612-c284-055f-58850c0730bd.webPlatform 3.png create mode 100644 images/bb0aca46-4612-c284-055f-58850c0730bd.webPlatform 4.png create mode 100644 images/bb0aca46-4612-c284-055f-58850c0730bd.webPlatform 5.png create mode 100644 images/c5840a63-85f5-62b0-c68f-2faa4aaea42b.webPlatform 4.png create mode 100644 images/c5840a63-85f5-62b0-c68f-2faa4aaea42b.webPlatform 5.png create mode 100644 images/cbac5b1d-0299-9db6-3747-c7aeaaaa37d0.webPlatform 3.png create mode 100644 images/cbac5b1d-0299-9db6-3747-c7aeaaaa37d0.webPlatform 4.png create mode 100644 images/cbac5b1d-0299-9db6-3747-c7aeaaaa37d0.webPlatform 5.png create mode 100644 images/e429a798-7e86-1f02-565e-39dfab41fe36.webPlatform 3.png create mode 100644 images/e429a798-7e86-1f02-565e-39dfab41fe36.webPlatform 4.png create mode 100644 images/e429a798-7e86-1f02-565e-39dfab41fe36.webPlatform 5.png create mode 100644 images/f1579c61-f17f-ff49-3f97-e942f202bebf.webPlatform 3.png create mode 100644 images/f1579c61-f17f-ff49-3f97-e942f202bebf.webPlatform 4.png create mode 100644 images/f1579c61-f17f-ff49-3f97-e942f202bebf.webPlatform 5.png create mode 100644 images/f178697f-630b-bafd-7c7d-e1287b98a969.webPlatform 3.png create mode 100644 images/f178697f-630b-bafd-7c7d-e1287b98a969.webPlatform 4.png create mode 100644 images/f178697f-630b-bafd-7c7d-e1287b98a969.webPlatform 5.png create mode 100644 images/f400aaaa-861c-78c0-0919-07a886e57304.webPlatform 3.png create mode 100644 images/f400aaaa-861c-78c0-0919-07a886e57304.webPlatform 4.png create mode 100644 images/f400aaaa-861c-78c0-0919-07a886e57304.webPlatform 5.png create mode 100644 images/f7842765-fff5-aa39-9f7f-fdd3024d4056.webPlatform 3.png create mode 100644 images/f7842765-fff5-aa39-9f7f-fdd3024d4056.webPlatform 4.png create mode 100644 images/f7842765-fff5-aa39-9f7f-fdd3024d4056.webPlatform 5.png create mode 100644 images/favicon 3.png create mode 100644 images/favicon 4.png create mode 100644 images/favicon 5.png create mode 100644 images/logo 3.png create mode 100644 images/logo 4.png create mode 100644 images/logo 5.png create mode 100644 images/me 3.jpeg create mode 100644 images/me 4.jpeg create mode 100644 images/me 5.jpeg create mode 100644 index 4.html create mode 100644 manifest 4.json create mode 100644 posts/2010-01-24-experiments/index 4.html create mode 100644 posts/2010-01-24-experiments/index 8.html create mode 100644 posts/2019-12-08-Image-Classifier-Tensorflow/index 2.html create mode 100644 posts/2019-12-08-Image-Classifier-Tensorflow/index 5.html create mode 100644 posts/2019-12-08-Splitting-Zips/index 2.html create mode 100644 posts/2019-12-08-Splitting-Zips/index 5.html create mode 100644 posts/2019-12-10-TensorFlow-Model-Prediction/index 2.html create mode 100644 posts/2019-12-10-TensorFlow-Model-Prediction/index 5.html create mode 100644 posts/2019-12-16-TensorFlow-Polynomial-Regression/index 2.html create mode 100644 posts/2019-12-16-TensorFlow-Polynomial-Regression/index 5.html create mode 100644 posts/2019-12-22-Fake-News-Detector/index 2.html create mode 100644 posts/2019-12-22-Fake-News-Detector/index 5.html create mode 100644 posts/2020-01-14-Converting-between-PIL-NumPy/index 2.html create mode 100644 posts/2020-01-14-Converting-between-PIL-NumPy/index 5.html create mode 100644 posts/2020-01-15-Setting-up-Kaggle-to-use-with-Colab/index 2.html create mode 100644 posts/2020-01-15-Setting-up-Kaggle-to-use-with-Colab/index 5.html create mode 100644 posts/2020-01-16-Image-Classifier-Using-Turicreate/index 2.html create mode 100644 posts/2020-01-16-Image-Classifier-Using-Turicreate/index 5.html create mode 100644 posts/2020-01-19-Connect-To-Bluetooth-Devices-Linux-Terminal/index 2.html create mode 100644 posts/2020-01-19-Connect-To-Bluetooth-Devices-Linux-Terminal/index 5.html create mode 100644 posts/2020-03-02-Open-Peeps/index 2.html create mode 100644 posts/2020-03-02-Open-Peeps/index 4.html create mode 100644 posts/2020-03-02-Open-Peeps/index.html create mode 100644 posts/hello-world/index 2.html create mode 100644 posts/hello-world/index 5.html create mode 100644 posts/index 4.html create mode 100644 publications/2019-05-14-Detecting-Driver-Fatigue-Over-Speeding-and-Speeding-up-Post-Accident-Response/index 4.html create mode 100644 publications/2019-05-14-Detecting-Driver-Fatigue-Over-Speeding-and-Speeding-up-Post-Accident-Response/index 8.html create mode 100644 publications/index 4.html create mode 100644 pwabuilder-sw 4.js create mode 100644 pwabuilder-sw-register 4.js create mode 100644 sitemap 4.xml create mode 100644 styles 4.css create mode 100644 tags/article/index 4.html create mode 100644 tags/article/index 8.html create mode 100644 tags/codesnippet/index 11.html create mode 100644 tags/codesnippet/index 5.html create mode 100644 tags/colab/index 4.html create mode 100644 tags/colab/index 8.html create mode 100644 tags/digitalart/index 2.html create mode 100644 tags/digitalart/index 4.html create mode 100644 tags/digitalart/index.html create mode 100644 tags/experiment/index 11.html create mode 100644 tags/experiment/index 5.html create mode 100644 tags/helloworld/index 2.html create mode 100644 tags/helloworld/index 5.html create mode 100644 tags/index 4.html create mode 100644 tags/kaggle/index 4.html create mode 100644 tags/kaggle/index 8.html create mode 100644 tags/linux/index 4.html create mode 100644 tags/linux/index 8.html create mode 100644 tags/publication/index 4.html create mode 100644 tags/publication/index 8.html create mode 100644 tags/raspberrypi/index 4.html create mode 100644 tags/raspberrypi/index 6.html create mode 100644 tags/swiftui/index 2.html create mode 100644 tags/swiftui/index 5.html create mode 100644 tags/tensorflow/index 4.html create mode 100644 tags/tensorflow/index 8.html create mode 100644 tags/turicreate/index 4.html create mode 100644 tags/turicreate/index 6.html create mode 100644 tags/tutorial/index 4.html create mode 100644 tags/tutorial/index 6.html diff --git a/Themes/styles 4.css b/Themes/styles 4.css new file mode 100644 index 0000000..089337b --- /dev/null +++ b/Themes/styles 4.css @@ -0,0 +1,398 @@ +* { + margin: 0; + padding: 0; + box-sizing: border-box; + font-size: 16px; + -webkit-text-size-adjust: 100%; +} + +body { + background: #fff; + color: #000; + font-family: -apple-system, BlinkMacSystemFont, Helvetica, Arial; + text-align: center; +} + +.wrapper { + max-width: 900px; + margin-left: auto; + margin-right: auto; + padding: 40px; + text-align: left; +} + +header { + + + position: relative; + color: #ededed; + line-height: 1.5em; + padding: 0 20px; +} + +/* + +header { + background: #ededed; + margin-bottom: 0em; + padding-bottom: 2em; + left: 0px; + top: 0px; + height: 8em; + width: 100%; +} +.header-background { + background-image: url(images/logo.png); + background-size: 100% 100%; + background-repeat: no-repeat; + background-size: cover; + background-position: center; + height: 200px; +} +*/ + +header .wrapper { + padding-top: 20px; + padding-bottom: 20px; + text-align: left; +} + +header a { + text-decoration: none; +} + +header .site-name { + color: #000; + margin: 0; + cursor: pointer; + font-weight: 200; + font-size: 2.3em; + letter-spacing: 1px; +} + +nav { + /*margin-top: 0.5em;*/ + text-align: left; /* right */ +} + +nav li { + margin-top: 0.5em; + display: inline-block; + background-color: #000; + color: #ddd; + padding: 4px 6px; + border-radius: 5px; + margin-right: 5px; + +} + +nav li:hover { + color: #000; + background-color: #ddd; +} +h1 { + margin-bottom: 20px; + font-size: 2em; +} + +h2 { + margin: 20px 0; +} + +p { + margin-bottom: 10px; +} + +a { + color: inherit; + +} + +.description { + margin-bottom: 20px; +} + +.item-list > li { + display: block; + padding: 20px; + border-radius: 20px; + background-color: #eee; + margin-bottom: 20px +} + +.item-list > li:last-child { + margin-bottom: 0; +} + +.item-list h1 { + margin-bottom: 0px; /*15px*/ + font-size: 1.3em; +} +.item-list a { + text-decoration: none; +} + +.item-list p { + margin-bottom: 0; +} + +.reading-time { + display: inline-block; + border-radius: 5px; + background-color: #ddd; + color: #000; + padding: 4px 4px; + margin-bottom: 5px; + margin-right: 5px; + +} + +.tag-list { + margin-bottom: 5px; /* 15px */ +} + +.tag-list li, +.tag { + display: inline-block; + background-color: #000; + color: #ddd; + padding: 4px 6px; + border-radius: 5px; + margin-right: 5px; + margin-top: 0.5em; +} + +.tag-list a, +.tag a { + text-decoration: none; +} + +.item-page .tag-list { + display: inline-block; +} + +.content { + margin-bottom: 40px; +} + +.browse-all { + display: block; + margin-bottom: 30px; +} + +.all-tags li { + font-size: 1.4em; + margin-right: 10px; + padding: 6px 10px; + margin-top: 1em; +} + +img { + max-width: 100%; + margin-bottom: 1em; + margin-top: 1em; + width: auto\9; + height: auto; + vertical-align: middle; + border: 0; + -ms-interpolation-mode: bicubic; +} + +footer { + color: #000; +} + + + +pre { + overflow-x: auto; + font-family: Monaco,Consolas,"Lucida Console",monospace; + display: block; + background-color: #fdf6e3; + color: #586e75; + margin-bottom: 1em; + margin-top: 1em; + border-radius: 4px; +} + +.highlight { background-color: #fdf6e3; color: #586e75; } +.highlight .c { color: #627272; } +.highlight .err { color: #586e75; } +.highlight .g { color: #586e75; } +.highlight .k { color: #677600; } +.highlight .l { color: #586e75; } +.highlight .n { color: #586e75; } +.highlight .o { color: #677600; } +.highlight .x { color: #c14715; } +.highlight .p { color: #586e75; } +.highlight .cm { color: #627272; } +.highlight .cp { color: #677600; } +.highlight .c1 { color: #627272; } +.highlight .cs { color: #677600; } +.highlight .gd { color: #217d74; } +.highlight .ge { color: #586e75; font-style: italic; } +.highlight .gr { color: #d72825; } +.highlight .gh { color: #c14715; } +.highlight .gi { color: #677600; } +.highlight .go { color: #586e75; } +.highlight .gp { color: #586e75; } +.highlight .gs { color: #586e75; font-weight: bold; } +.highlight .gu { color: #c14715; } +.highlight .gt { color: #586e75; } +.highlight .kc { color: #c14715; } +.highlight .kd { color: #1f76b6; } +.highlight .kn { color: #677600; } +.highlight .kp { color: #677600; } +.highlight .kr { color: #1f76b6; } +.highlight .kt { color: #d72825; } +.highlight .ld { color: #586e75; } +.highlight .m { color: #217d74; } +.highlight .s { color: #217d74; } +.highlight .na { color: #586e75; } +.highlight .nb { color: #8d6900; } +.highlight .nc { color: #1f76b6; } +.highlight .no { color: #c14715; } +.highlight .nd { color: #1f76b6; } +.highlight .ni { color: #c14715; } +.highlight .ne { color: #c14715; } +.highlight .nf { color: #1f76b6; } +.highlight .nl { color: #586e75; } +.highlight .nn { color: #586e75; } +.highlight .nx { color: #586e75; } +.highlight .py { color: #586e75; } +.highlight .nt { color: #1f76b6; } +.highlight .nv { color: #1f76b6; } +.highlight .ow { color: #677600; } +.highlight .w { color: #586e75; } +.highlight .mf { color: #217d74; } +.highlight .mh { color: #217d74; } +.highlight .mi { color: #217d74; } +.highlight .mo { color: #217d74; } +.highlight .sb { color: #627272; } +.highlight .sc { color: #217d74; } +.highlight .sd { color: #586e75; } +.highlight .s2 { color: #217d74; } +.highlight .se { color: #c14715; } +.highlight .sh { color: #586e75; } +.highlight .si { color: #217d74; } +.highlight .sx { color: #217d74; } +.highlight .sr { color: #d72825; } +.highlight .s1 { color: #217d74; } +.highlight .ss { color: #217d74; } +.highlight .bp { color: #1f76b6; } +.highlight .vc { color: #1f76b6; } +.highlight .vg { color: #1f76b6; } +.highlight .vi { color: #1f76b6; } +.highlight .il { color: #217d74; } + + +@media (prefers-color-scheme: dark) { + .reading-time { + background-color: #000; + color: #ddd; + } + body { + background-color: #222; + } + + body, + header .site-name { + color: #ddd; + } + nav li { + background-color: #ddd; + color: #000; + + } + nav li:hover { + color: #ddd; + background-color: #000; + } + + .item-list > li { + background-color: #333; + } + + header { + background-color: #000; + } + footer { + color: #ddd; + } + + pre { + background-color: #002b36; + color: #93a1a1; + } + + .highlight { background-color: #002b36; color: #93a1a1; } + .highlight .c { color: #759299; } + .highlight .err { color: #93a1a1; } + .highlight .g { color: #93a1a1; } + .highlight .k { color: #859900; } + .highlight .l { color: #93a1a1; } + .highlight .n { color: #93a1a1; } + .highlight .o { color: #859900; } + .highlight .x { color: #e9662f; } + .highlight .p { color: #93a1a1; } + .highlight .cm { color: #759299; } + .highlight .cp { color: #859900; } + .highlight .c1 { color: #759299; } + .highlight .cs { color: #859900; } + .highlight .gd { color: #2aa198; } + .highlight .ge { color: #93a1a1; font-style: italic; } + .highlight .gr { color: #e8625f; } + .highlight .gh { color: #e9662f; } + .highlight .gi { color: #859900; } + .highlight .go { color: #93a1a1; } + .highlight .gp { color: #93a1a1; } + .highlight .gs { color: #93a1a1; font-weight: bold; } + .highlight .gu { color: #e9662f; } + .highlight .gt { color: #93a1a1; } + .highlight .kc { color: #e9662f; } + .highlight .kd { color: #3294da; } + .highlight .kn { color: #859900; } + .highlight .kp { color: #859900; } + .highlight .kr { color: #3294da; } + .highlight .kt { color: #e8625f; } + .highlight .ld { color: #93a1a1; } + .highlight .m { color: #2aa198; } + .highlight .s { color: #2aa198; } + .highlight .na { color: #93a1a1; } + .highlight .nb { color: #B58900; } + .highlight .nc { color: #3294da; } + .highlight .no { color: #e9662f; } + .highlight .nd { color: #3294da; } + .highlight .ni { color: #e9662f; } + .highlight .ne { color: #e9662f; } + .highlight .nf { color: #3294da; } + .highlight .nl { color: #93a1a1; } + .highlight .nn { color: #93a1a1; } + .highlight .nx { color: #93a1a1; } + .highlight .py { color: #93a1a1; } + .highlight .nt { color: #3294da; } + .highlight .nv { color: #3294da; } + .highlight .ow { color: #859900; } + .highlight .w { color: #93a1a1; } + .highlight .mf { color: #2aa198; } + .highlight .mh { color: #2aa198; } + .highlight .mi { color: #2aa198; } + .highlight .mo { color: #2aa198; } + .highlight .sb { color: #759299; } + .highlight .sc { color: #2aa198; } + .highlight .sd { color: #93a1a1; } + .highlight .s2 { color: #2aa198; } + .highlight .se { color: #e9662f; } + .highlight .sh { color: #93a1a1; } + .highlight .si { color: #2aa198; } + .highlight .sx { color: #2aa198; } + .highlight .sr { color: #e8625f; } + .highlight .s1 { color: #2aa198; } + .highlight .ss { color: #2aa198; } + .highlight .bp { color: #3294da; } + .highlight .vc { color: #3294da; } + .highlight .vg { color: #3294da; } + .highlight .vi { color: #3294da; } + .highlight .il { color: #2aa198; } +} + diff --git a/about/index 4.html b/about/index 4.html new file mode 100644 index 0000000..2d98172 --- /dev/null +++ b/about/index 4.html @@ -0,0 +1 @@ +About Me | Navan Chauhan

About Me

Hi! My name is Navan Chauhan.

What do I like?

\ No newline at end of file diff --git a/assets/disqus 4.js b/assets/disqus 4.js new file mode 100644 index 0000000..0c52381 --- /dev/null +++ b/assets/disqus 4.js @@ -0,0 +1,5 @@ +(function() { + var t = document, + e = t.createElement("script"); + e.src = "https://navan-chauhan.disqus.com/embed.js", e.setAttribute("data-timestamp", +new Date), (t.head || t.body).appendChild(e) +})(); diff --git a/assets/gciTales/01-teachableMachines/01-collect 4.png b/assets/gciTales/01-teachableMachines/01-collect 4.png new file mode 100644 index 0000000..58e0b54 Binary files /dev/null and b/assets/gciTales/01-teachableMachines/01-collect 4.png differ diff --git a/assets/gciTales/01-teachableMachines/01-collect 5.png b/assets/gciTales/01-teachableMachines/01-collect 5.png new file mode 100644 index 0000000..58e0b54 Binary files /dev/null and b/assets/gciTales/01-teachableMachines/01-collect 5.png differ diff --git a/assets/gciTales/01-teachableMachines/02-train 4.png b/assets/gciTales/01-teachableMachines/02-train 4.png new file mode 100644 index 0000000..a69fd63 Binary files /dev/null and b/assets/gciTales/01-teachableMachines/02-train 4.png differ diff --git a/assets/gciTales/01-teachableMachines/03-label 4.png b/assets/gciTales/01-teachableMachines/03-label 4.png new file mode 100644 index 0000000..efe450d Binary files /dev/null and b/assets/gciTales/01-teachableMachines/03-label 4.png differ diff --git a/assets/gciTales/01-teachableMachines/03-label 5.png b/assets/gciTales/01-teachableMachines/03-label 5.png new file mode 100644 index 0000000..efe450d Binary files /dev/null and b/assets/gciTales/01-teachableMachines/03-label 5.png differ diff --git a/assets/gciTales/01-teachableMachines/04-alert 4.png b/assets/gciTales/01-teachableMachines/04-alert 4.png new file mode 100644 index 0000000..f648bad Binary files /dev/null and b/assets/gciTales/01-teachableMachines/04-alert 4.png differ diff --git a/assets/gciTales/01-teachableMachines/04-alert 5.png b/assets/gciTales/01-teachableMachines/04-alert 5.png new file mode 100644 index 0000000..f648bad Binary files /dev/null and b/assets/gciTales/01-teachableMachines/04-alert 5.png differ diff --git a/assets/gciTales/01-teachableMachines/05-html 4.png b/assets/gciTales/01-teachableMachines/05-html 4.png new file mode 100644 index 0000000..f917c07 Binary files /dev/null and b/assets/gciTales/01-teachableMachines/05-html 4.png differ diff --git a/assets/gciTales/01-teachableMachines/05-html 5.png b/assets/gciTales/01-teachableMachines/05-html 5.png new file mode 100644 index 0000000..f917c07 Binary files /dev/null and b/assets/gciTales/01-teachableMachines/05-html 5.png differ diff --git a/assets/gciTales/01-teachableMachines/06-js 4.png b/assets/gciTales/01-teachableMachines/06-js 4.png new file mode 100644 index 0000000..173a8aa Binary files /dev/null and b/assets/gciTales/01-teachableMachines/06-js 4.png differ diff --git a/assets/gciTales/01-teachableMachines/06-js 5.png b/assets/gciTales/01-teachableMachines/06-js 5.png new file mode 100644 index 0000000..173a8aa Binary files /dev/null and b/assets/gciTales/01-teachableMachines/06-js 5.png differ diff --git a/assets/gciTales/01-teachableMachines/07-eg 4.png b/assets/gciTales/01-teachableMachines/07-eg 4.png new file mode 100644 index 0000000..cc8198e Binary files /dev/null and b/assets/gciTales/01-teachableMachines/07-eg 4.png differ diff --git a/assets/gciTales/01-teachableMachines/07-eg 5.png b/assets/gciTales/01-teachableMachines/07-eg 5.png new file mode 100644 index 0000000..cc8198e Binary files /dev/null and b/assets/gciTales/01-teachableMachines/07-eg 5.png differ diff --git a/assets/gciTales/01-teachableMachines/08-eg 4.png b/assets/gciTales/01-teachableMachines/08-eg 4.png new file mode 100644 index 0000000..b1261fa Binary files /dev/null and b/assets/gciTales/01-teachableMachines/08-eg 4.png differ diff --git a/assets/gciTales/01-teachableMachines/08-eg 5.png b/assets/gciTales/01-teachableMachines/08-eg 5.png new file mode 100644 index 0000000..b1261fa Binary files /dev/null and b/assets/gciTales/01-teachableMachines/08-eg 5.png differ diff --git a/assets/gciTales/03-regression/1 4.png b/assets/gciTales/03-regression/1 4.png new file mode 100644 index 0000000..b07d172 Binary files /dev/null and b/assets/gciTales/03-regression/1 4.png differ diff --git a/assets/gciTales/03-regression/1 5.png b/assets/gciTales/03-regression/1 5.png new file mode 100644 index 0000000..b07d172 Binary files /dev/null and b/assets/gciTales/03-regression/1 5.png differ diff --git a/assets/gciTales/03-regression/2 4.png b/assets/gciTales/03-regression/2 4.png new file mode 100644 index 0000000..53531ad Binary files /dev/null and b/assets/gciTales/03-regression/2 4.png differ diff --git a/assets/gciTales/03-regression/2 5.png b/assets/gciTales/03-regression/2 5.png new file mode 100644 index 0000000..53531ad Binary files /dev/null and b/assets/gciTales/03-regression/2 5.png differ diff --git a/assets/gciTales/03-regression/3 4.png b/assets/gciTales/03-regression/3 4.png new file mode 100644 index 0000000..542d76e Binary files /dev/null and b/assets/gciTales/03-regression/3 4.png differ diff --git a/assets/gciTales/03-regression/3 5.png b/assets/gciTales/03-regression/3 5.png new file mode 100644 index 0000000..542d76e Binary files /dev/null and b/assets/gciTales/03-regression/3 5.png differ diff --git a/assets/gciTales/03-regression/4 4.png b/assets/gciTales/03-regression/4 4.png new file mode 100644 index 0000000..16101cd Binary files /dev/null and b/assets/gciTales/03-regression/4 4.png differ diff --git a/assets/gciTales/03-regression/4 5.png b/assets/gciTales/03-regression/4 5.png new file mode 100644 index 0000000..16101cd Binary files /dev/null and b/assets/gciTales/03-regression/4 5.png differ diff --git a/assets/gciTales/03-regression/5 4.png b/assets/gciTales/03-regression/5 4.png new file mode 100644 index 0000000..36b9c26 Binary files /dev/null and b/assets/gciTales/03-regression/5 4.png differ diff --git a/assets/gciTales/03-regression/5 5.png b/assets/gciTales/03-regression/5 5.png new file mode 100644 index 0000000..36b9c26 Binary files /dev/null and b/assets/gciTales/03-regression/5 5.png differ diff --git a/assets/gciTales/03-regression/6 4.png b/assets/gciTales/03-regression/6 4.png new file mode 100644 index 0000000..479d0e5 Binary files /dev/null and b/assets/gciTales/03-regression/6 4.png differ diff --git a/assets/gciTales/03-regression/6 5.png b/assets/gciTales/03-regression/6 5.png new file mode 100644 index 0000000..479d0e5 Binary files /dev/null and b/assets/gciTales/03-regression/6 5.png differ diff --git a/assets/manup.min 4.js b/assets/manup.min 4.js new file mode 100644 index 0000000..20bc3a1 --- /dev/null +++ b/assets/manup.min 4.js @@ -0,0 +1 @@ +var manUpObject,tagArray=[],linkArray=[],validMetaValues=[{name:"mobile-web-app-capable",manifestName:"display"},{name:"apple-mobile-web-app-capable",manifestName:"display"},{name:"apple-mobile-web-app-title",manifestName:"short_name"},{name:"application-name",manifestName:"short_name"},{name:"msapplication-TileColor",manifestName:"ms_TileColor"},{name:"msapplication-square70x70logo",manifestName:"icons",imageSize:"70x70"},{name:"msapplication-square150x150logo",manifestName:"icons",imageSize:"150x150"},{name:"msapplication-wide310x150logo",manifestName:"icons",imageSize:"310x150"},{name:"msapplication-square310x310logo",manifestName:"icons",imageSize:"310x310"}],validLinkValues=[{name:"apple-touch-icon",manifestName:"icons",imageSize:"152x152"},{name:"apple-touch-icon",manifestName:"icons",imageSize:"120x120"},{name:"apple-touch-icon",manifestName:"icons",imageSize:"76x76"},{name:"apple-touch-icon",manifestName:"icons",imageSize:"60x60"},{name:"apple-touch-icon",manifestName:"icons",imageSize:"57x57"},{name:"apple-touch-icon",manifestName:"icons",imageSize:"72x72"},{name:"apple-touch-icon",manifestName:"icons",imageSize:"114x114"},{name:"icon",manifestName:"icons",imageSize:"128x128"},{name:"icon",manifestName:"icons",imageSize:"192x192"}],generateFullMetaData=function(){for(var e=0;eAsset 1 \ No newline at end of file diff --git a/assets/posts/open-peeps/ex-1 3.svg b/assets/posts/open-peeps/ex-1 3.svg new file mode 100644 index 0000000..7831d9b --- /dev/null +++ b/assets/posts/open-peeps/ex-1 3.svg @@ -0,0 +1 @@ +Asset 1 \ No newline at end of file diff --git a/assets/posts/open-peeps/ex-1.svg b/assets/posts/open-peeps/ex-1.svg new file mode 100644 index 0000000..7831d9b --- /dev/null +++ b/assets/posts/open-peeps/ex-1.svg @@ -0,0 +1 @@ +Asset 1 \ No newline at end of file diff --git "a/assets/r\303\251sum\303\251 4.pdf" "b/assets/r\303\251sum\303\251 4.pdf" new file mode 100644 index 0000000..924ddb0 Binary files /dev/null and "b/assets/r\303\251sum\303\251 4.pdf" differ diff --git a/feed 4.rss b/feed 4.rss new file mode 100644 index 0000000..d81bfd7 --- /dev/null +++ b/feed 4.rss @@ -0,0 +1,941 @@ +Navan ChauhanWelcome to my personal fragment of the internet.https://navanchauhan.github.io/enMon, 2 Mar 2020 14:01:38 +0530Mon, 2 Mar 2020 14:01:38 +0530250https://navanchauhan.github.io/posts/2020-03-02-Open-PeepsOpen PeepsTrying out Open Peeps, a CC0 Libraryhttps://navanchauhan.github.io/posts/2020-03-02-Open-PeepsMon, 2 Mar 2020 13:52:00 +0530Open Peeps

About Open Peeps

Open Peeps is a hand-drawn illustration library to create scenes of people. You can use them in product illustration, marketing, comics, product states, user flows, personas, storyboarding, quinceañera invitations, or whatever you want! - Product Hunt

Some Examples

+ + +]]>
https://navanchauhan.github.io/posts/2020-01-19-Connect-To-Bluetooth-Devices-Linux-TerminalHow to setup Bluetooth on a Raspberry PiConnecting to Bluetooth Devices using terminal, tested on Raspberry Pi Zero Whttps://navanchauhan.github.io/posts/2020-01-19-Connect-To-Bluetooth-Devices-Linux-TerminalSun, 19 Jan 2020 15:27:00 +0530How to setup Bluetooth on a Raspberry Pi

This was tested on a Raspberry Pi Zero W

Enter in the Bluetooth Mode

pi@raspberrypi:~ $ bluetoothctl

[bluetooth]# agent on

[bluetooth]# default-agent

[bluetooth]# scan on

To Pair

While being in bluetooth mode

[bluetooth]# pair XX:XX:XX:XX:XX:XX

To Exit out of bluetoothctl anytime, just type exit

]]>
https://navanchauhan.github.io/posts/2020-01-16-Image-Classifier-Using-TuricreateCreating a Custom Image Classifier using Turicreate to detect Smoke and FireTutorial on creating a custom Image Classifier using Turicreate and a dataset from Kagglehttps://navanchauhan.github.io/posts/2020-01-16-Image-Classifier-Using-TuricreateThu, 16 Jan 2020 10:36:00 +0530Creating a Custom Image Classifier using Turicreate to detect Smoke and Fire

For setting up Kaggle with Google Colab, please refer to my previous post

Dataset

Mounting Google Drive

import os +from google.colab import drive +drive.mount('/content/drive') +
+ +

Downloading Dataset from Kaggle

os.environ['KAGGLE_CONFIG_DIR'] = "/content/drive/My Drive/" +!kaggle datasets download ashutosh69/fire-and-smoke-dataset +!unzip "fire-and-smoke-dataset.zip" +
+ +

Pre-Processing

!mkdir default smoke fire +
+ +


!ls data/data/img_data/train/default/*.jpg +
+ +


img_1002.jpg img_20.jpg img_519.jpg img_604.jpg img_80.jpg +img_1003.jpg img_21.jpg img_51.jpg img_60.jpg img_8.jpg +img_1007.jpg img_22.jpg img_520.jpg img_61.jpg img_900.jpg +img_100.jpg img_23.jpg img_521.jpg 'img_62 (2).jpg' img_920.jpg +img_1014.jpg img_24.jpg 'img_52 (2).jpg' img_62.jpg img_921.jpg +img_1018.jpg img_29.jpg img_522.jpg 'img_63 (2).jpg' img_922.jpg +img_101.jpg img_3000.jpg img_523.jpg img_63.jpg img_923.jpg +img_1027.jpg img_335.jpg img_524.jpg img_66.jpg img_924.jpg +img_102.jpg img_336.jpg img_52.jpg img_67.jpg img_925.jpg +img_1042.jpg img_337.jpg img_530.jpg img_68.jpg img_926.jpg +img_1043.jpg img_338.jpg img_531.jpg img_700.jpg img_927.jpg +img_1046.jpg img_339.jpg 'img_53 (2).jpg' img_701.jpg img_928.jpg +img_1052.jpg img_340.jpg img_532.jpg img_702.jpg img_929.jpg +img_107.jpg img_341.jpg img_533.jpg img_703.jpg img_930.jpg +img_108.jpg img_3.jpg img_537.jpg img_704.jpg img_931.jpg +img_109.jpg img_400.jpg img_538.jpg img_705.jpg img_932.jpg +img_10.jpg img_471.jpg img_539.jpg img_706.jpg img_933.jpg +img_118.jpg img_472.jpg img_53.jpg img_707.jpg img_934.jpg +img_12.jpg img_473.jpg img_540.jpg img_708.jpg img_935.jpg +img_14.jpg img_488.jpg img_541.jpg img_709.jpg img_938.jpg +img_15.jpg img_489.jpg 'img_54 (2).jpg' img_70.jpg img_958.jpg +img_16.jpg img_490.jpg img_542.jpg img_710.jpg img_971.jpg +img_17.jpg img_491.jpg img_543.jpg 'img_71 (2).jpg' img_972.jpg +img_18.jpg img_492.jpg img_54.jpg img_71.jpg img_973.jpg +img_19.jpg img_493.jpg 'img_55 (2).jpg' img_72.jpg img_974.jpg +img_1.jpg img_494.jpg img_55.jpg img_73.jpg img_975.jpg +img_200.jpg img_495.jpg img_56.jpg img_74.jpg img_980.jpg +img_201.jpg img_496.jpg img_57.jpg img_75.jpg img_988.jpg +img_202.jpg img_497.jpg img_58.jpg img_76.jpg img_9.jpg +img_203.jpg img_4.jpg img_59.jpg img_77.jpg +img_204.jpg img_501.jpg img_601.jpg img_78.jpg +img_205.jpg img_502.jpg img_602.jpg img_79.jpg +img_206.jpg img_50.jpg img_603.jpg img_7.jpg +
+ +

The image files are not actually JPEG, thus we first need to save them in the correct format for Turicreate

from PIL import Image +import glob + + +folders = ["default","smoke","fire"] +for folder in folders: + n = 1 + for file in glob.glob("./data/data/img_data/train/" + folder + "/*.jpg"): + im = Image.open(file) + rgb_im = im.convert('RGB') + rgb_im.save((folder + "/" + str(n) + ".jpg"), quality=100) + n +=1 + for file in glob.glob("./data/data/img_data/train/" + folder + "/*.jpg"): + im = Image.open(file) + rgb_im = im.convert('RGB') + rgb_im.save((folder + "/" + str(n) + ".jpg"), quality=100) + n +=1 +
+ +


!mkdir train +!mv default ./train +!mv smoke ./train +!mv fire ./train +
+ +

Making the Image Classifier

Making an SFrame

!pip install turicreate +
+ +


import turicreate as tc +import os + +data = tc.image_analysis.load_images("./train", with_path=True) + +data["label"] = data["path"].apply(lambda path: os.path.basename(os.path.dirname(path))) + +print(data) + +data.save('fire-smoke.sframe') +
+ +


+-------------------------+------------------------+ +| path | image | ++-------------------------+------------------------+ +| ./train/default/1.jpg | Height: 224 Width: 224 | +| ./train/default/10.jpg | Height: 224 Width: 224 | +| ./train/default/100.jpg | Height: 224 Width: 224 | +| ./train/default/101.jpg | Height: 224 Width: 224 | +| ./train/default/102.jpg | Height: 224 Width: 224 | +| ./train/default/103.jpg | Height: 224 Width: 224 | +| ./train/default/104.jpg | Height: 224 Width: 224 | +| ./train/default/105.jpg | Height: 224 Width: 224 | +| ./train/default/106.jpg | Height: 224 Width: 224 | +| ./train/default/107.jpg | Height: 224 Width: 224 | ++-------------------------+------------------------+ +[2028 rows x 2 columns] +Note: Only the head of the SFrame is printed. +You can use print_rows(num_rows=m, num_columns=n) to print more rows and columns. ++-------------------------+------------------------+---------+ +| path | image | label | ++-------------------------+------------------------+---------+ +| ./train/default/1.jpg | Height: 224 Width: 224 | default | +| ./train/default/10.jpg | Height: 224 Width: 224 | default | +| ./train/default/100.jpg | Height: 224 Width: 224 | default | +| ./train/default/101.jpg | Height: 224 Width: 224 | default | +| ./train/default/102.jpg | Height: 224 Width: 224 | default | +| ./train/default/103.jpg | Height: 224 Width: 224 | default | +| ./train/default/104.jpg | Height: 224 Width: 224 | default | +| ./train/default/105.jpg | Height: 224 Width: 224 | default | +| ./train/default/106.jpg | Height: 224 Width: 224 | default | +| ./train/default/107.jpg | Height: 224 Width: 224 | default | ++-------------------------+------------------------+---------+ +[2028 rows x 3 columns] +Note: Only the head of the SFrame is printed. +You can use print_rows(num_rows=m, num_columns=n) to print more rows and columns. +
+ +

Making the Model

import turicreate as tc + +# Load the data +data = tc.SFrame('fire-smoke.sframe') + +# Make a train-test split +train_data, test_data = data.random_split(0.8) + +# Create the model +model = tc.image_classifier.create(train_data, target='label') + +# Save predictions to an SArray +predictions = model.predict(test_data) + +# Evaluate the model and print the results +metrics = model.evaluate(test_data) +print(metrics['accuracy']) + +# Save the model for later use in Turi Create +model.save('fire-smoke.model') + +# Export for use in Core ML +model.export_coreml('fire-smoke.mlmodel') +
+ +


Performing feature extraction on resized images... +Completed 64/1633 +Completed 128/1633 +Completed 192/1633 +Completed 256/1633 +Completed 320/1633 +Completed 384/1633 +Completed 448/1633 +Completed 512/1633 +Completed 576/1633 +Completed 640/1633 +Completed 704/1633 +Completed 768/1633 +Completed 832/1633 +Completed 896/1633 +Completed 960/1633 +Completed 1024/1633 +Completed 1088/1633 +Completed 1152/1633 +Completed 1216/1633 +Completed 1280/1633 +Completed 1344/1633 +Completed 1408/1633 +Completed 1472/1633 +Completed 1536/1633 +Completed 1600/1633 +Completed 1633/1633 +PROGRESS: Creating a validation set from 5 percent of training data. This may take a while. + You can set ``validation_set=None`` to disable validation tracking. + +Logistic regression: +-------------------------------------------------------- +Number of examples : 1551 +Number of classes : 3 +Number of feature columns : 1 +Number of unpacked features : 2048 +Number of coefficients : 4098 +Starting L-BFGS +-------------------------------------------------------- ++-----------+----------+-----------+--------------+-------------------+---------------------+ +| Iteration | Passes | Step size | Elapsed Time | Training Accuracy | Validation Accuracy | ++-----------+----------+-----------+--------------+-------------------+---------------------+ +| 0 | 6 | 0.018611 | 0.891830 | 0.553836 | 0.560976 | +| 1 | 10 | 0.390832 | 1.622383 | 0.744681 | 0.792683 | +| 2 | 11 | 0.488541 | 1.943987 | 0.733075 | 0.804878 | +| 3 | 14 | 2.442703 | 2.512545 | 0.727917 | 0.841463 | +| 4 | 15 | 2.442703 | 2.826964 | 0.861380 | 0.853659 | +| 9 | 28 | 2.340435 | 5.492035 | 0.941328 | 0.975610 | ++-----------+----------+-----------+--------------+-------------------+---------------------+ +Performing feature extraction on resized images... +Completed 64/395 +Completed 128/395 +Completed 192/395 +Completed 256/395 +Completed 320/395 +Completed 384/395 +Completed 395/395 +0.9316455696202531 +
+ +

We just got an accuracy of 94% on Training Data and 97% on Validation Data!

]]>
https://navanchauhan.github.io/posts/2020-01-15-Setting-up-Kaggle-to-use-with-ColabSetting up Kaggle to use with Google ColabTutorial on setting up kaggle, to use with Google Colabhttps://navanchauhan.github.io/posts/2020-01-15-Setting-up-Kaggle-to-use-with-ColabWed, 15 Jan 2020 23:36:00 +0530Setting up Kaggle to use with Google Colab

In order to be able to access Kaggle Datasets, you will need to have an account on Kaggle (which is Free)

Grabbing Our Tokens

Go to Kaggle

Click on your User Profile and Click on My Account

Scroll Down untill you see Create New API Token

This will download your token as a JSON file

Copy the File to the root folder of your Google Drive

Setting up Colab

Mounting Google Drive

import os +from google.colab import drive +drive.mount('/content/drive') +
+ +

After this click on the URL in the output section, login and then paste the Auth Code

Configuring Kaggle

os.environ['KAGGLE_CONFIG_DIR'] = "/content/drive/My Drive/" +
+ +

Voila! You can now download kaggel datasets

]]>
https://navanchauhan.github.io/posts/2020-01-14-Converting-between-PIL-NumPyConverting between image and NumPy arrayShort code snippet for converting between PIL image and NumPy arrays.https://navanchauhan.github.io/posts/2020-01-14-Converting-between-PIL-NumPyTue, 14 Jan 2020 00:10:00 +0530Converting between image and NumPy array
import numpy +import PIL + +# Convert PIL Image to NumPy array +img = PIL.Image.open("foo.jpg") +arr = numpy.array(img) + +# Convert array to Image +img = PIL.Image.fromarray(arr) +
+ +

Saving an Image

try: + img.save(destination, "JPEG", quality=80, optimize=True, progressive=True) +except IOError: + PIL.ImageFile.MAXBLOCK = img.size[0] * img.size[1] + img.save(destination, "JPEG", quality=80, optimize=True, progressive=True) +
+ +
]]>
https://navanchauhan.github.io/posts/2019-12-22-Fake-News-DetectorBuilding a Fake News Detector with TuricreateIn this tutorial we will build a fake news detecting app from scratch, using Turicreate for the machine learning model and SwiftUI for building the apphttps://navanchauhan.github.io/posts/2019-12-22-Fake-News-DetectorSun, 22 Dec 2019 11:10:00 +0530Building a Fake News Detector with Turicreate

In this tutorial we will build a fake news detecting app from scratch, using Turicreate for the machine learning model and SwiftUI for building the app

Note: These commands are written as if you are running a jupyter notebook.

Building the Machine Learning Model

Data Gathering

To build a classifier, you need a lot of data. George McIntire (GH: @joolsa) has created a wonderful dataset containing the headline, body and wheter it is fake or real. Whenever you are looking for a dataset, always try searching on Kaggle and GitHub before you start building your own

Dependencies

I used a Google Colab instance for training my model. If you also plan on using Google Colab then I reccomend choosing a GPU Instance (It is Free) This allows you to train the model on the GPU. Turicreat is built on top of Apache's MXNet Framework, for us to use GPU we need to install a CUDA compatible MXNet package.

!pip install turicreate +!pip uninstall -y mxnet +!pip install mxnet-cu100==1.4.0.post0 +
+ +

If you do not wish to train on GPU or are running it on your computer, you can ignore the last two lines

Downloading the Dataset

!wget -q "https://github.com/joolsa/fake_real_news_dataset/raw/master/fake_or_real_news.csv.zip" +!unzip fake_or_real_news.csv.zip +
+ +

Model Creation

import turicreate as tc +tc.config.set_num_gpus(-1) # If you do not wish to use GPUs, set it to 0 +
+ +
dataSFrame = tc.SFrame('fake_or_real_news.csv') +
+ +

The dataset contains a column named "X1", which is of no use to us. Therefore, we simply drop it

dataSFrame.remove_column('X1') +
+ +

Splitting Dataset

train, test = dataSFrame.random_split(.9) +
+ +

Training

model = tc.text_classifier.create( + dataset=train, + target='label', + features=['title','text'] +) +
+ +
+-----------+----------+-----------+--------------+-------------------+---------------------+ +| Iteration | Passes | Step size | Elapsed Time | Training Accuracy | Validation Accuracy | ++-----------+----------+-----------+--------------+-------------------+---------------------+ +| 0 | 2 | 1.000000 | 1.156349 | 0.889680 | 0.790036 | +| 1 | 4 | 1.000000 | 1.359196 | 0.985952 | 0.918149 | +| 2 | 6 | 0.820091 | 1.557205 | 0.990260 | 0.914591 | +| 3 | 7 | 1.000000 | 1.684872 | 0.998689 | 0.925267 | +| 4 | 8 | 1.000000 | 1.814194 | 0.999063 | 0.925267 | +| 9 | 14 | 1.000000 | 2.507072 | 1.000000 | 0.911032 | ++-----------+----------+-----------+--------------+-------------------+---------------------+ +
+ +

Testing the Model

est_predictions = model.predict(test) +accuracy = tc.evaluation.accuracy(test['label'], test_predictions) +print(f'Topic classifier model has a testing accuracy of {accuracy*100}% ', flush=True) +
+ +
Topic classifier model has a testing accuracy of 92.3076923076923% +
+ +

We have just created our own Fake News Detection Model which has an accuracy of 92%!

example_text = {"title": ["Middling ‘Rise Of Skywalker’ Review Leaves Fan On Fence About Whether To Threaten To Kill Critic"], "text": ["Expressing ambivalence toward the relatively balanced appraisal of the film, Star Wars fan Miles Ariely admitted Thursday that an online publication’s middling review of The Rise Of Skywalker had left him on the fence about whether he would still threaten to kill the critic who wrote it. “I’m really of two minds about this, because on the one hand, he said the new movie fails to live up to the original trilogy, which makes me at least want to throw a brick through his window with a note telling him to watch his back,” said Ariely, confirming he had already drafted an eight-page-long death threat to Stan Corimer of the website Screen-On Time, but had not yet decided whether to post it to the reviewer’s Facebook page. “On the other hand, though, he commended J.J. Abrams’ skillful pacing and faithfulness to George Lucas’ vision, which makes me wonder if I should just call the whole thing off. Now, I really don’t feel like camping outside his house for hours. Maybe I could go with a response that’s somewhere in between, like, threatening to kill his dog but not everyone in his whole family? I don’t know. This is a tough one.” At press time, sources reported that Ariely had resolved to wear his Ewok costume while he murdered the critic in his sleep."]} +example_prediction = model.classify(tc.SFrame(example_text)) +print(example_prediction, flush=True) +
+ +
+-------+--------------------+ +| class | probability | ++-------+--------------------+ +| FAKE | 0.9245648658345308 | ++-------+--------------------+ +[1 rows x 2 columns] +
+ +

Exporting the Model

model_name = 'FakeNews' +coreml_model_name = model_name + '.mlmodel' +exportedModel = model.export_coreml(coreml_model_name) +
+ +

Note: To download files from Google Volab, simply click on the files section in the sidebar, right click on filename and then click on downlaod

Link to Colab Notebook

Building the App using SwiftUI

Initial Setup

First we create a single view app (make sure you check the use SwiftUI button)

Then we copy our .mlmodel file to our project (Just drag and drop the file in the XCode Files Sidebar)

Our ML Model does not take a string directly as an input, rather it takes bag of words as an input. DescriptionThe bag-of-words model is a simplifying representation used in NLP, in this text is represented as a bag of words, without any regatd of grammar or order, but noting multiplicity

We define our bag of words function

func bow(text: String) -> [String: Double] { + var bagOfWords = [String: Double]() + + let tagger = NSLinguisticTagger(tagSchemes: [.tokenType], options: 0) + let range = NSRange(location: 0, length: text.utf16.count) + let options: NSLinguisticTagger.Options = [.omitPunctuation, .omitWhitespace] + tagger.string = text + + tagger.enumerateTags(in: range, unit: .word, scheme: .tokenType, options: options) { _, tokenRange, _ in + let word = (text as NSString).substring(with: tokenRange) + if bagOfWords[word] != nil { + bagOfWords[word]! += 1 + } else { + bagOfWords[word] = 1 + } + } + + return bagOfWords + } +
+ +

We also declare our variables

@State private var title: String = "" +@State private var headline: String = "" +@State private var alertTitle = "" +@State private var alertText = "" +@State private var showingAlert = false +
+ +

Finally, we implement a simple function which reads the two text fields, creates their bag of words representation and displays an alert with the appropriate result

Complete Code

import SwiftUI + +struct ContentView: View { + @State private var title: String = "" + @State private var headline: String = "" + + @State private var alertTitle = "" + @State private var alertText = "" + @State private var showingAlert = false + + var body: some View { + NavigationView { + VStack(alignment: .leading) { + Text("Headline").font(.headline) + TextField("Please Enter Headline", text: $title) + .lineLimit(nil) + Text("Body").font(.headline) + TextField("Please Enter the content", text: $headline) + .lineLimit(nil) + } + .navigationBarTitle("Fake News Checker") + .navigationBarItems(trailing: + Button(action: classifyFakeNews) { + Text("Check") + }) + .padding() + .alert(isPresented: $showingAlert){ + Alert(title: Text(alertTitle), message: Text(alertText), dismissButton: .default(Text("OK"))) + } + } + + } + + func classifyFakeNews(){ + let model = FakeNews() + let myTitle = bow(text: title) + let myText = bow(text: headline) + do { + let prediction = try model.prediction(title: myTitle, text: myText) + alertTitle = prediction.label + alertText = "It is likely that this piece of news is \(prediction.label.lowercased())." + print(alertText) + } catch { + alertTitle = "Error" + alertText = "Sorry, could not classify if the input news was fake or not." + } + + showingAlert = true + } + func bow(text: String) -> [String: Double] { + var bagOfWords = [String: Double]() + + let tagger = NSLinguisticTagger(tagSchemes: [.tokenType], options: 0) + let range = NSRange(location: 0, length: text.utf16.count) + let options: NSLinguisticTagger.Options = [.omitPunctuation, .omitWhitespace] + tagger.string = text + + tagger.enumerateTags(in: range, unit: .word, scheme: .tokenType, options: options) { _, tokenRange, _ in + let word = (text as NSString).substring(with: tokenRange) + if bagOfWords[word] != nil { + bagOfWords[word]! += 1 + } else { + bagOfWords[word] = 1 + } + } + + return bagOfWords + } +} + +struct ContentView_Previews: PreviewProvider { + static var previews: some View { + ContentView() + } +} +
+ +
]]>
https://navanchauhan.github.io/posts/2019-12-16-TensorFlow-Polynomial-RegressionPolynomial Regression Using TensorFlowPolynomial regression using TensorFlowhttps://navanchauhan.github.io/posts/2019-12-16-TensorFlow-Polynomial-RegressionMon, 16 Dec 2019 14:16:00 +0530Polynomial Regression Using TensorFlow

In this tutorial you will learn about polynomial regression and how you can implement it in Tensorflow.

In this, we will be performing polynomial regression using 5 types of equations -

  • Linear
  • Quadratic
  • Cubic
  • Quartic
  • Quintic

Regression

What is Regression?

Regression is a statistical measurement that is used to try to determine the relationship between a dependent variable (often denoted by Y), and series of varying variables (called independent variables, often denoted by X ).

What is Polynomial Regression

This is a form of Regression Analysis where the relationship between Y and X is denoted as the nth degree/power of X. Polynomial regression even fits a non-linear relationship (e.g when the points don't form a straight line).

Imports

import tensorflow.compat.v1 as tf +tf.disable_v2_behavior() +import matplotlib.pyplot as plt +import numpy as np +import pandas as pd +
+ +

Dataset

Creating Random Data

Even though in this tutorial we will use a Position Vs Salary datasset, it is important to know how to create synthetic data

To create 50 values spaced evenly between 0 and 50, we use NumPy's linspace funtion

linspace(lower_limit, upper_limit, no_of_observations)

x = np.linspace(0, 50, 50) +y = np.linspace(0, 50, 50) +
+ +

We use the following function to add noise to the data, so that our values

x += np.random.uniform(-4, 4, 50) +y += np.random.uniform(-4, 4, 50) +
+ +

Position vs Salary Dataset

We will be using https://drive.google.com/file/d/1tNL4jxZEfpaP4oflfSn6pIHJX7Pachm9/view (Salary vs Position Dataset)

!wget --no-check-certificate 'https://docs.google.com/uc?export=download&id=1tNL4jxZEfpaP4oflfSn6pIHJX7Pachm9' -O data.csv +
+ +
df = pd.read_csv("data.csv") +
+ +
df # this gives us a preview of the dataset we are working with +
+ +
| Position | Level | Salary | +|-------------------|-------|---------| +| Business Analyst | 1 | 45000 | +| Junior Consultant | 2 | 50000 | +| Senior Consultant | 3 | 60000 | +| Manager | 4 | 80000 | +| Country Manager | 5 | 110000 | +| Region Manager | 6 | 150000 | +| Partner | 7 | 200000 | +| Senior Partner | 8 | 300000 | +| C-level | 9 | 500000 | +| CEO | 10 | 1000000 | +
+ +

We convert the salary column as the ordinate (y-cordinate) and level column as the abscissa

abscissa = df["Level"].to_list() # abscissa = [1,2,3,4,5,6,7,8,9,10] +ordinate = df["Salary"].to_list() # ordinate = [45000,50000,60000,80000,110000,150000,200000,300000,500000,1000000] +
+ +
n = len(abscissa) # no of observations +plt.scatter(abscissa, ordinate) +plt.ylabel('Salary') +plt.xlabel('Position') +plt.title("Salary vs Position") +plt.show() +
+ +

Defining Stuff

X = tf.placeholder("float") +Y = tf.placeholder("float") +
+ +

Defining Variables

We first define all the coefficients and constant as tensorflow variables haveing a random intitial value

a = tf.Variable(np.random.randn(), name = "a") +b = tf.Variable(np.random.randn(), name = "b") +c = tf.Variable(np.random.randn(), name = "c") +d = tf.Variable(np.random.randn(), name = "d") +e = tf.Variable(np.random.randn(), name = "e") +f = tf.Variable(np.random.randn(), name = "f") +
+ +

Model Configuration

learning_rate = 0.2 +no_of_epochs = 25000 +
+ +

Equations

deg1 = a*X + b +deg2 = a*tf.pow(X,2) + b*X + c +deg3 = a*tf.pow(X,3) + b*tf.pow(X,2) + c*X + d +deg4 = a*tf.pow(X,4) + b*tf.pow(X,3) + c*tf.pow(X,2) + d*X + e +deg5 = a*tf.pow(X,5) + b*tf.pow(X,4) + c*tf.pow(X,3) + d*tf.pow(X,2) + e*X + f +
+ +

Cost Function

We use the Mean Squared Error Function

mse1 = tf.reduce_sum(tf.pow(deg1-Y,2))/(2*n) +mse2 = tf.reduce_sum(tf.pow(deg2-Y,2))/(2*n) +mse3 = tf.reduce_sum(tf.pow(deg3-Y,2))/(2*n) +mse4 = tf.reduce_sum(tf.pow(deg4-Y,2))/(2*n) +mse5 = tf.reduce_sum(tf.pow(deg5-Y,2))/(2*n) +
+ +

Optimizer

We use the AdamOptimizer for the polynomial functions and GradientDescentOptimizer for the linear function

optimizer1 = tf.train.GradientDescentOptimizer(learning_rate).minimize(mse1) +optimizer2 = tf.train.AdamOptimizer(learning_rate).minimize(mse2) +optimizer3 = tf.train.AdamOptimizer(learning_rate).minimize(mse3) +optimizer4 = tf.train.AdamOptimizer(learning_rate).minimize(mse4) +optimizer5 = tf.train.AdamOptimizer(learning_rate).minimize(mse5) +
+ +
init=tf.global_variables_initializer() +
+ +

Model Predictions

For each type of equation first we make the model predict the values of the coefficient(s) and constant, once we get these values we use it to predict the Y values using the X values. We then plot it to compare the actual data and predicted line.

Linear Equation

with tf.Session() as sess: + sess.run(init) + for epoch in range(no_of_epochs): + for (x,y) in zip(abscissa, ordinate): + sess.run(optimizer1, feed_dict={X:x, Y:y}) + if (epoch+1)%1000==0: + cost = sess.run(mse1,feed_dict={X:abscissa,Y:ordinate}) + print("Epoch",(epoch+1), ": Training Cost:", cost," a,b:",sess.run(a),sess.run(b)) + + training_cost = sess.run(mse1,feed_dict={X:abscissa,Y:ordinate}) + coefficient1 = sess.run(a) + constant = sess.run(b) + +print(training_cost, coefficient1, constant) +
+ +
Epoch 1000 : Training Cost: 88999125000.0 a,b: 180396.42 -478869.12 +Epoch 2000 : Training Cost: 88999125000.0 a,b: 180396.42 -478869.12 +Epoch 3000 : Training Cost: 88999125000.0 a,b: 180396.42 -478869.12 +Epoch 4000 : Training Cost: 88999125000.0 a,b: 180396.42 -478869.12 +Epoch 5000 : Training Cost: 88999125000.0 a,b: 180396.42 -478869.12 +Epoch 6000 : Training Cost: 88999125000.0 a,b: 180396.42 -478869.12 +Epoch 7000 : Training Cost: 88999125000.0 a,b: 180396.42 -478869.12 +Epoch 8000 : Training Cost: 88999125000.0 a,b: 180396.42 -478869.12 +Epoch 9000 : Training Cost: 88999125000.0 a,b: 180396.42 -478869.12 +Epoch 10000 : Training Cost: 88999125000.0 a,b: 180396.42 -478869.12 +Epoch 11000 : Training Cost: 88999125000.0 a,b: 180396.42 -478869.12 +Epoch 12000 : Training Cost: 88999125000.0 a,b: 180396.42 -478869.12 +Epoch 13000 : Training Cost: 88999125000.0 a,b: 180396.42 -478869.12 +Epoch 14000 : Training Cost: 88999125000.0 a,b: 180396.42 -478869.12 +Epoch 15000 : Training Cost: 88999125000.0 a,b: 180396.42 -478869.12 +Epoch 16000 : Training Cost: 88999125000.0 a,b: 180396.42 -478869.12 +Epoch 17000 : Training Cost: 88999125000.0 a,b: 180396.42 -478869.12 +Epoch 18000 : Training Cost: 88999125000.0 a,b: 180396.42 -478869.12 +Epoch 19000 : Training Cost: 88999125000.0 a,b: 180396.42 -478869.12 +Epoch 20000 : Training Cost: 88999125000.0 a,b: 180396.42 -478869.12 +Epoch 21000 : Training Cost: 88999125000.0 a,b: 180396.42 -478869.12 +Epoch 22000 : Training Cost: 88999125000.0 a,b: 180396.42 -478869.12 +Epoch 23000 : Training Cost: 88999125000.0 a,b: 180396.42 -478869.12 +Epoch 24000 : Training Cost: 88999125000.0 a,b: 180396.42 -478869.12 +Epoch 25000 : Training Cost: 88999125000.0 a,b: 180396.42 -478869.12 +88999125000.0 180396.42 -478869.12 +
+ +
predictions = [] +for x in abscissa: + predictions.append((coefficient1*x + constant)) +plt.plot(abscissa , ordinate, 'ro', label ='Original data') +plt.plot(abscissa, predictions, label ='Fitted line') +plt.title('Linear Regression Result') +plt.legend() +plt.show() +
+ +

Quadratic Equation

with tf.Session() as sess: + sess.run(init) + for epoch in range(no_of_epochs): + for (x,y) in zip(abscissa, ordinate): + sess.run(optimizer2, feed_dict={X:x, Y:y}) + if (epoch+1)%1000==0: + cost = sess.run(mse2,feed_dict={X:abscissa,Y:ordinate}) + print("Epoch",(epoch+1), ": Training Cost:", cost," a,b,c:",sess.run(a),sess.run(b),sess.run(c)) + + training_cost = sess.run(mse2,feed_dict={X:abscissa,Y:ordinate}) + coefficient1 = sess.run(a) + coefficient2 = sess.run(b) + constant = sess.run(c) + +print(training_cost, coefficient1, coefficient2, constant) +
+ +
Epoch 1000 : Training Cost: 52571360000.0 a,b,c: 1002.4456 1097.0197 1276.6921 +Epoch 2000 : Training Cost: 37798890000.0 a,b,c: 1952.4263 2130.2825 2469.7756 +Epoch 3000 : Training Cost: 26751185000.0 a,b,c: 2839.5825 3081.6118 3554.351 +Epoch 4000 : Training Cost: 19020106000.0 a,b,c: 3644.56 3922.9563 4486.3135 +Epoch 5000 : Training Cost: 14060446000.0 a,b,c: 4345.042 4621.4233 5212.693 +Epoch 6000 : Training Cost: 11201084000.0 a,b,c: 4921.1855 5148.1504 5689.0713 +Epoch 7000 : Training Cost: 9732740000.0 a,b,c: 5364.764 5493.0156 5906.754 +Epoch 8000 : Training Cost: 9050918000.0 a,b,c: 5685.4067 5673.182 5902.0728 +Epoch 9000 : Training Cost: 8750394000.0 a,b,c: 5906.9814 5724.8906 5734.746 +Epoch 10000 : Training Cost: 8613128000.0 a,b,c: 6057.3677 5687.3364 5461.167 +Epoch 11000 : Training Cost: 8540034600.0 a,b,c: 6160.547 5592.3022 5122.8633 +Epoch 12000 : Training Cost: 8490983000.0 a,b,c: 6233.9175 5462.025 4747.111 +Epoch 13000 : Training Cost: 8450816500.0 a,b,c: 6289.048 5310.7583 4350.6997 +Epoch 14000 : Training Cost: 8414082000.0 a,b,c: 6333.199 5147.394 3943.9294 +Epoch 15000 : Training Cost: 8378841600.0 a,b,c: 6370.7944 4977.1704 3532.476 +Epoch 16000 : Training Cost: 8344471000.0 a,b,c: 6404.468 4803.542 3120.2087 +Epoch 17000 : Training Cost: 8310785500.0 a,b,c: 6435.365 4628.1523 2709.1445 +Epoch 18000 : Training Cost: 8277482000.0 a,b,c: 6465.5493 4451.833 2300.2783 +Epoch 19000 : Training Cost: 8244650000.0 a,b,c: 6494.609 4274.826 1894.3738 +Epoch 20000 : Training Cost: 8212349000.0 a,b,c: 6522.8247 4098.1733 1491.9915 +Epoch 21000 : Training Cost: 8180598300.0 a,b,c: 6550.6567 3922.7405 1093.3868 +Epoch 22000 : Training Cost: 8149257700.0 a,b,c: 6578.489 3747.8362 698.53357 +Epoch 23000 : Training Cost: 8118325000.0 a,b,c: 6606.1973 3573.2742 307.3541 +Epoch 24000 : Training Cost: 8088001000.0 a,b,c: 6632.96 3399.878 -79.89219 +Epoch 25000 : Training Cost: 8058094600.0 a,b,c: 6659.793 3227.2517 -463.03156 +8058094600.0 6659.793 3227.2517 -463.03156 +
+ +
predictions = [] +for x in abscissa: + predictions.append((coefficient1*pow(x,2) + coefficient2*x + constant)) +plt.plot(abscissa , ordinate, 'ro', label ='Original data') +plt.plot(abscissa, predictions, label ='Fitted line') +plt.title('Quadratic Regression Result') +plt.legend() +plt.show() +
+ +

Cubic

with tf.Session() as sess: + sess.run(init) + for epoch in range(no_of_epochs): + for (x,y) in zip(abscissa, ordinate): + sess.run(optimizer3, feed_dict={X:x, Y:y}) + if (epoch+1)%1000==0: + cost = sess.run(mse3,feed_dict={X:abscissa,Y:ordinate}) + print("Epoch",(epoch+1), ": Training Cost:", cost," a,b,c,d:",sess.run(a),sess.run(b),sess.run(c),sess.run(d)) + + training_cost = sess.run(mse3,feed_dict={X:abscissa,Y:ordinate}) + coefficient1 = sess.run(a) + coefficient2 = sess.run(b) + coefficient3 = sess.run(c) + constant = sess.run(d) + +print(training_cost, coefficient1, coefficient2, coefficient3, constant) +
+ +
Epoch 1000 : Training Cost: 4279814000.0 a,b,c,d: 670.1527 694.4212 751.4653 903.9527 +Epoch 2000 : Training Cost: 3770950400.0 a,b,c,d: 742.6414 666.3489 636.94525 859.2088 +Epoch 3000 : Training Cost: 3717708300.0 a,b,c,d: 756.2582 569.3339 448.105 748.23956 +Epoch 4000 : Training Cost: 3667464000.0 a,b,c,d: 769.4476 474.0318 265.5761 654.75525 +Epoch 5000 : Training Cost: 3620040700.0 a,b,c,d: 782.32324 380.54272 89.39888 578.5136 +Epoch 6000 : Training Cost: 3575265800.0 a,b,c,d: 794.8898 288.83356 -80.5215 519.13654 +Epoch 7000 : Training Cost: 3532972000.0 a,b,c,d: 807.1608 198.87044 -244.31102 476.2061 +Epoch 8000 : Training Cost: 3493009200.0 a,b,c,d: 819.13513 110.64169 -402.0677 449.3291 +Epoch 9000 : Training Cost: 3455228400.0 a,b,c,d: 830.80255 24.0964 -553.92804 438.0652 +Epoch 10000 : Training Cost: 3419475500.0 a,b,c,d: 842.21594 -60.797424 -700.0123 441.983 +Epoch 11000 : Training Cost: 3385625300.0 a,b,c,d: 853.3363 -144.08699 -840.467 460.6356 +Epoch 12000 : Training Cost: 3353544700.0 a,b,c,d: 864.19135 -225.8125 -975.4196 493.57703 +Epoch 13000 : Training Cost: 3323125000.0 a,b,c,d: 874.778 -305.98932 -1104.9867 540.39465 +Epoch 14000 : Training Cost: 3294257000.0 a,b,c,d: 885.1007 -384.63474 -1229.277 600.65607 +Epoch 15000 : Training Cost: 3266820000.0 a,b,c,d: 895.18823 -461.819 -1348.4417 673.9051 +Epoch 16000 : Training Cost: 3240736000.0 a,b,c,d: 905.0128 -537.541 -1462.6171 759.7118 +Epoch 17000 : Training Cost: 3215895000.0 a,b,c,d: 914.60065 -611.8676 -1571.9058 857.6638 +Epoch 18000 : Training Cost: 3192216800.0 a,b,c,d: 923.9603 -684.8093 -1676.4642 967.30475 +Epoch 19000 : Training Cost: 3169632300.0 a,b,c,d: 933.08594 -756.3582 -1776.4275 1088.2198 +Epoch 20000 : Training Cost: 3148046300.0 a,b,c,d: 941.9928 -826.6257 -1871.9355 1219.9702 +Epoch 21000 : Training Cost: 3127394800.0 a,b,c,d: 950.67896 -895.6205 -1963.0989 1362.1665 +Epoch 22000 : Training Cost: 3107608600.0 a,b,c,d: 959.1487 -963.38116 -2050.0586 1514.4026 +Epoch 23000 : Training Cost: 3088618200.0 a,b,c,d: 967.4355 -1029.9625 -2132.961 1676.2717 +Epoch 24000 : Training Cost: 3070361300.0 a,b,c,d: 975.52875 -1095.4292 -2211.854 1847.4485 +Epoch 25000 : Training Cost: 3052791300.0 a,b,c,d: 983.4346 -1159.7922 -2286.9412 2027.4857 +3052791300.0 983.4346 -1159.7922 -2286.9412 2027.4857 +
+ +
predictions = [] +for x in abscissa: + predictions.append((coefficient1*pow(x,3) + coefficient2*pow(x,2) + coefficient3*x + constant)) +plt.plot(abscissa , ordinate, 'ro', label ='Original data') +plt.plot(abscissa, predictions, label ='Fitted line') +plt.title('Cubic Regression Result') +plt.legend() +plt.show() +
+ +

Quartic

with tf.Session() as sess: + sess.run(init) + for epoch in range(no_of_epochs): + for (x,y) in zip(abscissa, ordinate): + sess.run(optimizer4, feed_dict={X:x, Y:y}) + if (epoch+1)%1000==0: + cost = sess.run(mse4,feed_dict={X:abscissa,Y:ordinate}) + print("Epoch",(epoch+1), ": Training Cost:", cost," a,b,c,d:",sess.run(a),sess.run(b),sess.run(c),sess.run(d),sess.run(e)) + + training_cost = sess.run(mse4,feed_dict={X:abscissa,Y:ordinate}) + coefficient1 = sess.run(a) + coefficient2 = sess.run(b) + coefficient3 = sess.run(c) + coefficient4 = sess.run(d) + constant = sess.run(e) + +print(training_cost, coefficient1, coefficient2, coefficient3, coefficient4, constant) +
+ +
Epoch 1000 : Training Cost: 1902632600.0 a,b,c,d: 84.48304 52.210594 54.791424 142.51952 512.0343 +Epoch 2000 : Training Cost: 1854316200.0 a,b,c,d: 88.998955 13.073557 14.276088 223.55667 1056.4655 +Epoch 3000 : Training Cost: 1812812400.0 a,b,c,d: 92.9462 -22.331177 -15.262934 327.41858 1634.9054 +Epoch 4000 : Training Cost: 1775716000.0 a,b,c,d: 96.42522 -54.64535 -35.829437 449.5028 2239.1392 +Epoch 5000 : Training Cost: 1741494100.0 a,b,c,d: 99.524734 -84.43976 -49.181057 585.85876 2862.4915 +Epoch 6000 : Training Cost: 1709199600.0 a,b,c,d: 102.31984 -112.19895 -56.808075 733.1876 3499.6199 +Epoch 7000 : Training Cost: 1678261800.0 a,b,c,d: 104.87324 -138.32709 -59.9442 888.79626 4146.2944 +Epoch 8000 : Training Cost: 1648340600.0 a,b,c,d: 107.23536 -163.15173 -59.58964 1050.524 4798.979 +Epoch 9000 : Training Cost: 1619243400.0 a,b,c,d: 109.44742 -186.9409 -56.53944 1216.6432 5454.9463 +Epoch 10000 : Training Cost: 1590821900.0 a,b,c,d: 111.54233 -209.91287 -51.423084 1385.8513 6113.5137 +Epoch 11000 : Training Cost: 1563042200.0 a,b,c,d: 113.54405 -232.21953 -44.73371 1557.1084 6771.7046 +Epoch 12000 : Training Cost: 1535855600.0 a,b,c,d: 115.471565 -253.9838 -36.851135 1729.535 7429.069 +Epoch 13000 : Training Cost: 1509255300.0 a,b,c,d: 117.33939 -275.29697 -28.0714 1902.5308 8083.9634 +Epoch 14000 : Training Cost: 1483227000.0 a,b,c,d: 119.1605 -296.2472 -18.618649 2075.6094 8735.381 +Epoch 15000 : Training Cost: 1457726700.0 a,b,c,d: 120.94584 -316.915 -8.650095 2248.3247 9384.197 +Epoch 16000 : Training Cost: 1432777300.0 a,b,c,d: 122.69806 -337.30704 1.7027153 2420.5771 10028.871 +Epoch 17000 : Training Cost: 1408365000.0 a,b,c,d: 124.42179 -357.45245 12.33499 2592.2983 10669.157 +Epoch 18000 : Training Cost: 1384480000.0 a,b,c,d: 126.12332 -377.39734 23.168756 2763.0933 11305.027 +Epoch 19000 : Training Cost: 1361116800.0 a,b,c,d: 127.80568 -397.16415 34.160156 2933.0452 11935.669 +Epoch 20000 : Training Cost: 1338288100.0 a,b,c,d: 129.4674 -416.72803 45.259155 3101.7727 12561.179 +Epoch 21000 : Training Cost: 1315959700.0 a,b,c,d: 131.11403 -436.14285 56.4436 3269.3142 13182.058 +Epoch 22000 : Training Cost: 1294164700.0 a,b,c,d: 132.74377 -455.3779 67.6757 3435.3833 13796.807 +Epoch 23000 : Training Cost: 1272863600.0 a,b,c,d: 134.35779 -474.45316 78.96117 3600.264 14406.58 +Epoch 24000 : Training Cost: 1252052600.0 a,b,c,d: 135.9583 -493.38254 90.268616 3764.0078 15010.481 +Epoch 25000 : Training Cost: 1231713700.0 a,b,c,d: 137.54753 -512.1876 101.59372 3926.4897 15609.368 +1231713700.0 137.54753 -512.1876 101.59372 3926.4897 15609.368 +
+ +
predictions = [] +for x in abscissa: + predictions.append((coefficient1*pow(x,4) + coefficient2*pow(x,3) + coefficient3*pow(x,2) + coefficient4*x + constant)) +plt.plot(abscissa , ordinate, 'ro', label ='Original data') +plt.plot(abscissa, predictions, label ='Fitted line') +plt.title('Quartic Regression Result') +plt.legend() +plt.show() +
+ +

Quintic

with tf.Session() as sess: + sess.run(init) + for epoch in range(no_of_epochs): + for (x,y) in zip(abscissa, ordinate): + sess.run(optimizer5, feed_dict={X:x, Y:y}) + if (epoch+1)%1000==0: + cost = sess.run(mse5,feed_dict={X:abscissa,Y:ordinate}) + print("Epoch",(epoch+1), ": Training Cost:", cost," a,b,c,d,e,f:",sess.run(a),sess.run(b),sess.run(c),sess.run(d),sess.run(e),sess.run(f)) + + training_cost = sess.run(mse5,feed_dict={X:abscissa,Y:ordinate}) + coefficient1 = sess.run(a) + coefficient2 = sess.run(b) + coefficient3 = sess.run(c) + coefficient4 = sess.run(d) + coefficient5 = sess.run(e) + constant = sess.run(f) +
+ +
Epoch 1000 : Training Cost: 1409200100.0 a,b,c,d,e,f: 7.949472 7.46219 55.626034 184.29028 484.00223 1024.0083 +Epoch 2000 : Training Cost: 1306882400.0 a,b,c,d,e,f: 8.732181 -4.0085897 73.25298 315.90103 904.08887 2004.9749 +Epoch 3000 : Training Cost: 1212606000.0 a,b,c,d,e,f: 9.732249 -16.90125 86.28379 437.06552 1305.055 2966.2188 +Epoch 4000 : Training Cost: 1123640400.0 a,b,c,d,e,f: 10.74851 -29.82692 98.59997 555.331 1698.4631 3917.9155 +Epoch 5000 : Training Cost: 1039694300.0 a,b,c,d,e,f: 11.75426 -42.598194 110.698326 671.64355 2085.5513 4860.8535 +Epoch 6000 : Training Cost: 960663550.0 a,b,c,d,e,f: 12.745439 -55.18337 122.644936 786.00214 2466.1638 5794.3735 +Epoch 7000 : Training Cost: 886438340.0 a,b,c,d,e,f: 13.721028 -67.57168 134.43822 898.3691 2839.9958 6717.659 +Epoch 8000 : Training Cost: 816913100.0 a,b,c,d,e,f: 14.679965 -79.75113 146.07385 1008.66895 3206.6692 7629.812 +Epoch 9000 : Training Cost: 751971500.0 a,b,c,d,e,f: 15.62181 -91.71608 157.55713 1116.7715 3565.8323 8529.976 +Epoch 10000 : Training Cost: 691508740.0 a,b,c,d,e,f: 16.545347 -103.4531 168.88321 1222.6348 3916.9785 9416.236 +Epoch 11000 : Training Cost: 635382000.0 a,b,c,d,e,f: 17.450052 -114.954254 180.03932 1326.1565 4259.842 10287.99 +Epoch 12000 : Training Cost: 583477250.0 a,b,c,d,e,f: 18.334944 -126.20821 191.02948 1427.2095 4593.8 11143.449 +Epoch 13000 : Training Cost: 535640400.0 a,b,c,d,e,f: 19.198917 -137.20206 201.84718 1525.6926 4918.5327 11981.633 +Epoch 14000 : Training Cost: 491722240.0 a,b,c,d,e,f: 20.041153 -147.92719 212.49709 1621.5496 5233.627 12800.468 +Epoch 15000 : Training Cost: 451559520.0 a,b,c,d,e,f: 20.860966 -158.37456 222.97133 1714.7141 5538.676 13598.337 +Epoch 16000 : Training Cost: 414988960.0 a,b,c,d,e,f: 21.657421 -168.53406 233.27422 1805.0874 5833.1978 14373.658 +Epoch 17000 : Training Cost: 381837920.0 a,b,c,d,e,f: 22.429693 -178.39536 243.39914 1892.5883 6116.847 15124.394 +Epoch 18000 : Training Cost: 351931300.0 a,b,c,d,e,f: 23.176882 -187.94789 253.3445 1977.137 6389.117 15848.417 +Epoch 19000 : Training Cost: 325074400.0 a,b,c,d,e,f: 23.898485 -197.18741 263.12512 2058.6716 6649.8037 16543.95 +Epoch 20000 : Training Cost: 301073570.0 a,b,c,d,e,f: 24.593851 -206.10497 272.72385 2137.1797 6898.544 17209.367 +Epoch 21000 : Training Cost: 279727000.0 a,b,c,d,e,f: 25.262104 -214.69217 282.14642 2212.6372 7135.217 17842.854 +Epoch 22000 : Training Cost: 260845550.0 a,b,c,d,e,f: 25.903376 -222.94969 291.4003 2284.9844 7359.4644 18442.408 +Epoch 23000 : Training Cost: 244218030.0 a,b,c,d,e,f: 26.517094 -230.8697 300.45532 2354.3003 7571.261 19007.49 +Epoch 24000 : Training Cost: 229660080.0 a,b,c,d,e,f: 27.102589 -238.44817 309.35342 2420.4185 7770.5728 19536.19 +Epoch 25000 : Training Cost: 216972400.0 a,b,c,d,e,f: 27.660324 -245.69016 318.10062 2483.3608 7957.354 20027.707 +216972400.0 27.660324 -245.69016 318.10062 2483.3608 7957.354 20027.707 +
+ +
predictions = [] +for x in abscissa: + predictions.append((coefficient1*pow(x,5) + coefficient2*pow(x,4) + coefficient3*pow(x,3) + coefficient4*pow(x,2) + coefficient5*x + constant)) +plt.plot(abscissa , ordinate, 'ro', label ='Original data') +plt.plot(abscissa, predictions, label ='Fitted line') +plt.title('Quintic Regression Result') +plt.legend() +plt.show() +
+ +

Results and Conclusion

You just learnt Polynomial Regression using TensorFlow!

Notes

Overfitting

> Overfitting refers to a model that models the training data too well.Overfitting happens when a model learns the detail and noise in the training data to the extent that it negatively impacts the performance of the model on new data. This means that the noise or random fluctuations in the training data is picked up and learned as concepts by the model. The problem is that these concepts do not apply to new data and negatively impact the models ability to generalize.

Source: Machine Learning Mastery

Basically if you train your machine learning model on a small dataset for a really large number of epochs, the model will learn all the deformities/noise in the data and will actually think that it is a normal part. Therefore when it will see some new data, it will discard that new data as noise and will impact the accuracy of the model in a negative manner

]]>
https://navanchauhan.github.io/posts/2019-12-10-TensorFlow-Model-PredictionMaking Predictions using Image Classifier (TensorFlow)Making predictions for image classification models built using TensorFlowhttps://navanchauhan.github.io/posts/2019-12-10-TensorFlow-Model-PredictionTue, 10 Dec 2019 11:10:00 +0530Making Predictions using Image Classifier (TensorFlow)

This was tested on TF 2.x and works as of 2019-12-10

If you want to understand how to make your own custom image classifier, please refer to my previous post.

If you followed my last post, then you created a model which took an image of dimensions 50x50 as an input.

First we import the following if we have not imported these before

import cv2 +import os +
+ +

Then we read the file using OpenCV.

image=cv2.imread(imagePath) +
+ +

The cv2. imread() function returns a NumPy array representing the image. Therefore, we need to convert it before we can use it.

image_from_array = Image.fromarray(image, 'RGB') +
+ +

Then we resize the image

size_image = image_from_array.resize((50,50)) +
+ +

After this we create a batch consisting of only one image

p = np.expand_dims(size_image, 0) +
+ +

We then convert this uint8 datatype to a float32 datatype

img = tf.cast(p, tf.float32) +
+ +

Finally we make the prediction

print(['Infected','Uninfected'][np.argmax(model.predict(img))]) +
+ +

Infected

]]>
https://navanchauhan.github.io/posts/2019-12-08-Image-Classifier-TensorflowCreating a Custom Image Classifier using Tensorflow 2.x and Keras for Detecting MalariaTutorial on creating an image classifier model using TensorFlow which detects malariahttps://navanchauhan.github.io/posts/2019-12-08-Image-Classifier-TensorflowSun, 8 Dec 2019 14:16:00 +0530Creating a Custom Image Classifier using Tensorflow 2.x and Keras for Detecting Malaria

Done during Google Code-In. Org: Tensorflow.

Imports

%tensorflow_version 2.x #This is for telling Colab that you want to use TF 2.0, ignore if running on local machine + +from PIL import Image # We use the PIL Library to resize images +import numpy as np +import os +import cv2 +import tensorflow as tf +from tensorflow.keras import datasets, layers, models +import pandas as pd +import matplotlib.pyplot as plt +from keras.models import Sequential +from keras.layers import Conv2D,MaxPooling2D,Dense,Flatten,Dropout +
+ +

Dataset

Fetching the Data

!wget ftp://lhcftp.nlm.nih.gov/Open-Access-Datasets/Malaria/cell_images.zip +!unzip cell_images.zip +
+ +

Processing the Data

We resize all the images as 50x50 and add the numpy array of that image as well as their label names (Infected or Not) to common arrays.

data = [] +labels = [] + +Parasitized = os.listdir("./cell_images/Parasitized/") +for parasite in Parasitized: + try: + image=cv2.imread("./cell_images/Parasitized/"+parasite) + image_from_array = Image.fromarray(image, 'RGB') + size_image = image_from_array.resize((50, 50)) + data.append(np.array(size_image)) + labels.append(0) + except AttributeError: + print("") + +Uninfected = os.listdir("./cell_images/Uninfected/") +for uninfect in Uninfected: + try: + image=cv2.imread("./cell_images/Uninfected/"+uninfect) + image_from_array = Image.fromarray(image, 'RGB') + size_image = image_from_array.resize((50, 50)) + data.append(np.array(size_image)) + labels.append(1) + except AttributeError: + print("") +
+ +

Splitting Data

df = np.array(data) +labels = np.array(labels) +(X_train, X_test) = df[(int)(0.1*len(df)):],df[:(int)(0.1*len(df))] +(y_train, y_test) = labels[(int)(0.1*len(labels)):],labels[:(int)(0.1*len(labels))] +
+ +
s=np.arange(X_train.shape[0]) +np.random.shuffle(s) +X_train=X_train[s] +y_train=y_train[s] +X_train = X_train/255.0 +
+ +

Model

Creating Model

By creating a sequential model, we create a linear stack of layers.

Note: The input shape for the first layer is 50,50 which corresponds with the sizes of the resized images

model = models.Sequential() +model.add(layers.Conv2D(filters=16, kernel_size=2, padding='same', activation='relu', input_shape=(50,50,3))) +model.add(layers.MaxPooling2D(pool_size=2)) +model.add(layers.Conv2D(filters=32,kernel_size=2,padding='same',activation='relu')) +model.add(layers.MaxPooling2D(pool_size=2)) +model.add(layers.Conv2D(filters=64,kernel_size=2,padding="same",activation="relu")) +model.add(layers.MaxPooling2D(pool_size=2)) +model.add(layers.Dropout(0.2)) +model.add(layers.Flatten()) +model.add(layers.Dense(500,activation="relu")) +model.add(layers.Dropout(0.2)) +model.add(layers.Dense(2,activation="softmax"))#2 represent output layer neurons +model.summary() +
+ +

Compiling Model

We use the adam optimiser as it is an adaptive learning rate optimization algorithm that's been designed specifically for training deep neural networks, which means it changes its learning rate automaticaly to get the best results

model.compile(optimizer="adam", + loss="sparse_categorical_crossentropy", + metrics=["accuracy"]) +
+ +

Training Model

We train the model for 10 epochs on the training data and then validate it using the testing data

history = model.fit(X_train,y_train, epochs=10, validation_data=(X_test,y_test)) +
+ +
Train on 24803 samples, validate on 2755 samples +Epoch 1/10 +24803/24803 [==============================] - 57s 2ms/sample - loss: 0.0786 - accuracy: 0.9729 - val_loss: 0.0000e+00 - val_accuracy: 1.0000 +Epoch 2/10 +24803/24803 [==============================] - 58s 2ms/sample - loss: 0.0746 - accuracy: 0.9731 - val_loss: 0.0290 - val_accuracy: 0.9996 +Epoch 3/10 +24803/24803 [==============================] - 58s 2ms/sample - loss: 0.0672 - accuracy: 0.9764 - val_loss: 0.0000e+00 - val_accuracy: 1.0000 +Epoch 4/10 +24803/24803 [==============================] - 58s 2ms/sample - loss: 0.0601 - accuracy: 0.9789 - val_loss: 0.0000e+00 - val_accuracy: 1.0000 +Epoch 5/10 +24803/24803 [==============================] - 58s 2ms/sample - loss: 0.0558 - accuracy: 0.9804 - val_loss: 0.0000e+00 - val_accuracy: 1.0000 +Epoch 6/10 +24803/24803 [==============================] - 57s 2ms/sample - loss: 0.0513 - accuracy: 0.9819 - val_loss: 0.0000e+00 - val_accuracy: 1.0000 +Epoch 7/10 +24803/24803 [==============================] - 58s 2ms/sample - loss: 0.0452 - accuracy: 0.9849 - val_loss: 0.3190 - val_accuracy: 0.9985 +Epoch 8/10 +24803/24803 [==============================] - 58s 2ms/sample - loss: 0.0404 - accuracy: 0.9858 - val_loss: 0.0000e+00 - val_accuracy: 1.0000 +Epoch 9/10 +24803/24803 [==============================] - 58s 2ms/sample - loss: 0.0352 - accuracy: 0.9878 - val_loss: 0.0000e+00 - val_accuracy: 1.0000 +Epoch 10/10 +24803/24803 [==============================] - 58s 2ms/sample - loss: 0.0373 - accuracy: 0.9865 - val_loss: 0.0000e+00 - val_accuracy: 1.0000 +
+ +

Results

accuracy = history.history['accuracy'][-1]*100 +loss = history.history['loss'][-1]*100 +val_accuracy = history.history['val_accuracy'][-1]*100 +val_loss = history.history['val_loss'][-1]*100 + +print( + 'Accuracy:', accuracy, + '\nLoss:', loss, + '\nValidation Accuracy:', val_accuracy, + '\nValidation Loss:', val_loss +) +
+ +
Accuracy: 98.64532351493835 +Loss: 3.732407123270176 +Validation Accuracy: 100.0 +Validation Loss: 0.0 +
+ +

We have achieved 98% Accuracy!

Link to Colab Notebook

]]>
https://navanchauhan.github.io/posts/2019-12-08-Splitting-ZipsSplitting ZIPs into Multiple PartsShort code snippet for splitting zips.https://navanchauhan.github.io/posts/2019-12-08-Splitting-ZipsSun, 8 Dec 2019 13:27:00 +0530Splitting ZIPs into Multiple Parts

Tested on macOS

Creating the archive:

zip -r -s 5 oodlesofnoodles.zip website/ +
+ +

5 stands for each split files' size (in mb, kb and gb can also be specified)

For encrypting the zip:

zip -er -s 5 oodlesofnoodles.zip website +
+ +

Extracting Files

First we need to collect all parts, then

zip -F oodlesofnoodles.zip --out merged.zip +
+ +
]]>
https://navanchauhan.github.io/publications/2019-05-14-Detecting-Driver-Fatigue-Over-Speeding-and-Speeding-up-Post-Accident-ResponseDetecting Driver Fatigue, Over-Speeding, and Speeding up Post-Accident ResponseThis paper is about Detecting Driver Fatigue, Over-Speeding, and Speeding up Post-Accident Response.https://navanchauhan.github.io/publications/2019-05-14-Detecting-Driver-Fatigue-Over-Speeding-and-Speeding-up-Post-Accident-ResponseTue, 14 May 2019 02:42:00 +0530Detecting Driver Fatigue, Over-Speeding, and Speeding up Post-Accident Response

Based on the project showcased at Toyota Hackathon, IITD - 17/18th December 2018

Download paper here

Recommended citation:

ATP

Chauhan, N. (2019). &quot;Detecting Driver Fatigue, Over-Speeding, and Speeding up Post-Accident Response.&quot; <i>International Research Journal of Engineering and Technology (IRJET), 6(5)</i>. +
+ +

BibTeX

@article{chauhan_2019, title={Detecting Driver Fatigue, Over-Speeding, and Speeding up Post-Accident Response}, volume={6}, url={https://www.irjet.net/archives/V6/i5/IRJET-V6I5318.pdf}, number={5}, journal={International Research Journal of Engineering and Technology (IRJET)}, author={Chauhan, Navan}, year={2019}} +
+ +
]]>
https://navanchauhan.github.io/posts/hello-worldHello WorldMy first post.https://navanchauhan.github.io/posts/hello-worldTue, 16 Apr 2019 17:39:00 +0530Hello World

Why a Hello World post?

Just re-did the entire website using Publish (Publish by John Sundell). So, a new hello world post :)

]]>
https://navanchauhan.github.io/posts/2010-01-24-experimentsExperimentsJust a markdown file for all experiments related to the websitehttps://navanchauhan.github.io/posts/2010-01-24-experimentsSun, 24 Jan 2010 23:43:00 +0530Experiments

https://s3-us-west-2.amazonaws.com/s.cdpn.io/148866/img-original.jpg

]]>
\ No newline at end of file diff --git a/feed.rss b/feed.rss index 07b398c..12ab6d2 100644 --- a/feed.rss +++ b/feed.rss @@ -1,4 +1,7 @@ -Navan ChauhanWelcome to my personal fragment of the internet.https://navanchauhan.github.io/enTue, 4 Feb 2020 14:04:18 +0530Tue, 4 Feb 2020 14:04:18 +0530250https://navanchauhan.github.io/posts/2020-01-19-Connect-To-Bluetooth-Devices-Linux-TerminalHow to setup Bluetooth on a Raspberry PiConnecting to Bluetooth Devices using terminal, tested on Raspberry Pi Zero Whttps://navanchauhan.github.io/posts/2020-01-19-Connect-To-Bluetooth-Devices-Linux-TerminalSun, 19 Jan 2020 15:27:00 +0530How to setup Bluetooth on a Raspberry Pi

This was tested on a Raspberry Pi Zero W

Enter in the Bluetooth Mode

pi@raspberrypi:~ $ bluetoothctl

[bluetooth]# agent on

[bluetooth]# default-agent

[bluetooth]# scan on

To Pair

While being in bluetooth mode

[bluetooth]# pair XX:XX:XX:XX:XX:XX

To Exit out of bluetoothctl anytime, just type exit

]]>
https://navanchauhan.github.io/posts/2020-01-16-Image-Classifier-Using-TuricreateCreating a Custom Image Classifier using Turicreate to detect Smoke and FireTutorial on creating a custom Image Classifier using Turicreate and a dataset from Kagglehttps://navanchauhan.github.io/posts/2020-01-16-Image-Classifier-Using-TuricreateThu, 16 Jan 2020 10:36:00 +0530Creating a Custom Image Classifier using Turicreate to detect Smoke and Fire

For setting up Kaggle with Google Colab, please refer to my previous post

Dataset

Mounting Google Drive

import os +Navan ChauhanWelcome to my personal fragment of the internet.https://navanchauhan.github.io/enMon, 2 Mar 2020 14:04:46 +0530Mon, 2 Mar 2020 14:04:46 +0530250https://navanchauhan.github.io/posts/2020-03-02-Open-PeepsOpen PeepsTrying out Open Peeps, a CC0 Libraryhttps://navanchauhan.github.io/posts/2020-03-02-Open-PeepsMon, 2 Mar 2020 13:52:00 +0530Open Peeps

About Open Peeps

Open Peeps is a hand-drawn illustration library to create scenes of people. You can use them in product illustration, marketing, comics, product states, user flows, personas, storyboarding, quinceañera invitations, or whatever you want! - Product Hunt

Some Examples

Standing

+ + +]]>
https://navanchauhan.github.io/posts/2020-01-19-Connect-To-Bluetooth-Devices-Linux-TerminalHow to setup Bluetooth on a Raspberry PiConnecting to Bluetooth Devices using terminal, tested on Raspberry Pi Zero Whttps://navanchauhan.github.io/posts/2020-01-19-Connect-To-Bluetooth-Devices-Linux-TerminalSun, 19 Jan 2020 15:27:00 +0530How to setup Bluetooth on a Raspberry Pi

This was tested on a Raspberry Pi Zero W

Enter in the Bluetooth Mode

pi@raspberrypi:~ $ bluetoothctl

[bluetooth]# agent on

[bluetooth]# default-agent

[bluetooth]# scan on

To Pair

While being in bluetooth mode

[bluetooth]# pair XX:XX:XX:XX:XX:XX

To Exit out of bluetoothctl anytime, just type exit

]]>
https://navanchauhan.github.io/posts/2020-01-16-Image-Classifier-Using-TuricreateCreating a Custom Image Classifier using Turicreate to detect Smoke and FireTutorial on creating a custom Image Classifier using Turicreate and a dataset from Kagglehttps://navanchauhan.github.io/posts/2020-01-16-Image-Classifier-Using-TuricreateThu, 16 Jan 2020 10:36:00 +0530Creating a Custom Image Classifier using Turicreate to detect Smoke and Fire

For setting up Kaggle with Google Colab, please refer to my previous post

Dataset

Mounting Google Drive

import os from google.colab import drive drive.mount('/content/drive')
diff --git a/images/04d0580b-d347-476a-232d-9568839851cd.webPlatform 3.png b/images/04d0580b-d347-476a-232d-9568839851cd.webPlatform 3.png new file mode 100644 index 0000000..c277bbc Binary files /dev/null and b/images/04d0580b-d347-476a-232d-9568839851cd.webPlatform 3.png differ diff --git a/images/04d0580b-d347-476a-232d-9568839851cd.webPlatform 4.png b/images/04d0580b-d347-476a-232d-9568839851cd.webPlatform 4.png new file mode 100644 index 0000000..c277bbc Binary files /dev/null and b/images/04d0580b-d347-476a-232d-9568839851cd.webPlatform 4.png differ diff --git a/images/04d0580b-d347-476a-232d-9568839851cd.webPlatform 5.png b/images/04d0580b-d347-476a-232d-9568839851cd.webPlatform 5.png new file mode 100644 index 0000000..c277bbc Binary files /dev/null and b/images/04d0580b-d347-476a-232d-9568839851cd.webPlatform 5.png differ diff --git a/images/14a6e126-4866-93de-8df5-e0e6a3c65da1.webPlatform 3.png b/images/14a6e126-4866-93de-8df5-e0e6a3c65da1.webPlatform 3.png new file mode 100644 index 0000000..643a2bd Binary files /dev/null and b/images/14a6e126-4866-93de-8df5-e0e6a3c65da1.webPlatform 3.png differ diff --git a/images/14a6e126-4866-93de-8df5-e0e6a3c65da1.webPlatform 4.png b/images/14a6e126-4866-93de-8df5-e0e6a3c65da1.webPlatform 4.png new file mode 100644 index 0000000..643a2bd Binary files /dev/null and b/images/14a6e126-4866-93de-8df5-e0e6a3c65da1.webPlatform 4.png differ diff --git a/images/14a6e126-4866-93de-8df5-e0e6a3c65da1.webPlatform 5.png b/images/14a6e126-4866-93de-8df5-e0e6a3c65da1.webPlatform 5.png new file mode 100644 index 0000000..643a2bd Binary files /dev/null and b/images/14a6e126-4866-93de-8df5-e0e6a3c65da1.webPlatform 5.png differ diff --git a/images/15294abc-6c7c-ffb8-df8d-d2fad23f50b0.webPlatform 4.png b/images/15294abc-6c7c-ffb8-df8d-d2fad23f50b0.webPlatform 4.png new file mode 100644 index 0000000..1dcb03d Binary files /dev/null and b/images/15294abc-6c7c-ffb8-df8d-d2fad23f50b0.webPlatform 4.png differ diff --git a/images/15294abc-6c7c-ffb8-df8d-d2fad23f50b0.webPlatform 5.png b/images/15294abc-6c7c-ffb8-df8d-d2fad23f50b0.webPlatform 5.png new file mode 100644 index 0000000..1dcb03d Binary files /dev/null and b/images/15294abc-6c7c-ffb8-df8d-d2fad23f50b0.webPlatform 5.png differ diff --git a/images/6b5f7f70-557f-0e4b-3d76-127534525db9.webPlatform 4.png b/images/6b5f7f70-557f-0e4b-3d76-127534525db9.webPlatform 4.png new file mode 100644 index 0000000..02e58c4 Binary files /dev/null and b/images/6b5f7f70-557f-0e4b-3d76-127534525db9.webPlatform 4.png differ diff --git a/images/6b5f7f70-557f-0e4b-3d76-127534525db9.webPlatform 5.png b/images/6b5f7f70-557f-0e4b-3d76-127534525db9.webPlatform 5.png new file mode 100644 index 0000000..02e58c4 Binary files /dev/null and b/images/6b5f7f70-557f-0e4b-3d76-127534525db9.webPlatform 5.png differ diff --git a/images/82e24f17-2e71-90d8-67a7-587163282ebf.webPlatform 3.png b/images/82e24f17-2e71-90d8-67a7-587163282ebf.webPlatform 3.png new file mode 100644 index 0000000..cc36571 Binary files /dev/null and b/images/82e24f17-2e71-90d8-67a7-587163282ebf.webPlatform 3.png differ diff --git a/images/82e24f17-2e71-90d8-67a7-587163282ebf.webPlatform 4.png b/images/82e24f17-2e71-90d8-67a7-587163282ebf.webPlatform 4.png new file mode 100644 index 0000000..cc36571 Binary files /dev/null and b/images/82e24f17-2e71-90d8-67a7-587163282ebf.webPlatform 4.png differ diff --git a/images/82e24f17-2e71-90d8-67a7-587163282ebf.webPlatform 5.png b/images/82e24f17-2e71-90d8-67a7-587163282ebf.webPlatform 5.png new file mode 100644 index 0000000..cc36571 Binary files /dev/null and b/images/82e24f17-2e71-90d8-67a7-587163282ebf.webPlatform 5.png differ diff --git a/images/8c0ffe9e-b615-96cd-3e18-ab4307c859a0.webPlatform 4.png b/images/8c0ffe9e-b615-96cd-3e18-ab4307c859a0.webPlatform 4.png new file mode 100644 index 0000000..76e34bc Binary files /dev/null and b/images/8c0ffe9e-b615-96cd-3e18-ab4307c859a0.webPlatform 4.png differ diff --git a/images/8c0ffe9e-b615-96cd-3e18-ab4307c859a0.webPlatform 5.png b/images/8c0ffe9e-b615-96cd-3e18-ab4307c859a0.webPlatform 5.png new file mode 100644 index 0000000..76e34bc Binary files /dev/null and b/images/8c0ffe9e-b615-96cd-3e18-ab4307c859a0.webPlatform 5.png differ diff --git a/images/9384518b-2a6c-0abc-136c-8c8faf49c71b.webPlatform 4.png b/images/9384518b-2a6c-0abc-136c-8c8faf49c71b.webPlatform 4.png new file mode 100644 index 0000000..f659b0b Binary files /dev/null and b/images/9384518b-2a6c-0abc-136c-8c8faf49c71b.webPlatform 4.png differ diff --git a/images/9384518b-2a6c-0abc-136c-8c8faf49c71b.webPlatform 5.png b/images/9384518b-2a6c-0abc-136c-8c8faf49c71b.webPlatform 5.png new file mode 100644 index 0000000..f659b0b Binary files /dev/null and b/images/9384518b-2a6c-0abc-136c-8c8faf49c71b.webPlatform 5.png differ diff --git a/images/9bf4aee8-92e3-932f-5388-7731928b5692.webPlatform 3.png b/images/9bf4aee8-92e3-932f-5388-7731928b5692.webPlatform 3.png new file mode 100644 index 0000000..31c4507 Binary files /dev/null and b/images/9bf4aee8-92e3-932f-5388-7731928b5692.webPlatform 3.png differ diff --git a/images/9bf4aee8-92e3-932f-5388-7731928b5692.webPlatform 4.png b/images/9bf4aee8-92e3-932f-5388-7731928b5692.webPlatform 4.png new file mode 100644 index 0000000..31c4507 Binary files /dev/null and b/images/9bf4aee8-92e3-932f-5388-7731928b5692.webPlatform 4.png differ diff --git a/images/9bf4aee8-92e3-932f-5388-7731928b5692.webPlatform 5.png b/images/9bf4aee8-92e3-932f-5388-7731928b5692.webPlatform 5.png new file mode 100644 index 0000000..31c4507 Binary files /dev/null and b/images/9bf4aee8-92e3-932f-5388-7731928b5692.webPlatform 5.png differ diff --git a/images/9dc22996-fd1b-b2d3-3627-cef4fa224e25.webPlatform 3.png b/images/9dc22996-fd1b-b2d3-3627-cef4fa224e25.webPlatform 3.png new file mode 100644 index 0000000..baf1814 Binary files /dev/null and b/images/9dc22996-fd1b-b2d3-3627-cef4fa224e25.webPlatform 3.png differ diff --git a/images/9dc22996-fd1b-b2d3-3627-cef4fa224e25.webPlatform 4.png b/images/9dc22996-fd1b-b2d3-3627-cef4fa224e25.webPlatform 4.png new file mode 100644 index 0000000..baf1814 Binary files /dev/null and b/images/9dc22996-fd1b-b2d3-3627-cef4fa224e25.webPlatform 4.png differ diff --git a/images/9dc22996-fd1b-b2d3-3627-cef4fa224e25.webPlatform 5.png b/images/9dc22996-fd1b-b2d3-3627-cef4fa224e25.webPlatform 5.png new file mode 100644 index 0000000..baf1814 Binary files /dev/null and b/images/9dc22996-fd1b-b2d3-3627-cef4fa224e25.webPlatform 5.png differ diff --git a/images/afd91c53-cfd0-b52e-ca49-1db0cc292b7d.webPlatform 3.png b/images/afd91c53-cfd0-b52e-ca49-1db0cc292b7d.webPlatform 3.png new file mode 100644 index 0000000..adbf1a3 Binary files /dev/null and b/images/afd91c53-cfd0-b52e-ca49-1db0cc292b7d.webPlatform 3.png differ diff --git a/images/afd91c53-cfd0-b52e-ca49-1db0cc292b7d.webPlatform 4.png b/images/afd91c53-cfd0-b52e-ca49-1db0cc292b7d.webPlatform 4.png new file mode 100644 index 0000000..adbf1a3 Binary files /dev/null and b/images/afd91c53-cfd0-b52e-ca49-1db0cc292b7d.webPlatform 4.png differ diff --git a/images/afd91c53-cfd0-b52e-ca49-1db0cc292b7d.webPlatform 5.png b/images/afd91c53-cfd0-b52e-ca49-1db0cc292b7d.webPlatform 5.png new file mode 100644 index 0000000..adbf1a3 Binary files /dev/null and b/images/afd91c53-cfd0-b52e-ca49-1db0cc292b7d.webPlatform 5.png differ diff --git a/images/b0cac729-56cb-2a63-7e8b-ac62a038a023.webPlatform 4.png b/images/b0cac729-56cb-2a63-7e8b-ac62a038a023.webPlatform 4.png new file mode 100644 index 0000000..7cfe2a7 Binary files /dev/null and b/images/b0cac729-56cb-2a63-7e8b-ac62a038a023.webPlatform 4.png differ diff --git a/images/b0cac729-56cb-2a63-7e8b-ac62a038a023.webPlatform 5.png b/images/b0cac729-56cb-2a63-7e8b-ac62a038a023.webPlatform 5.png new file mode 100644 index 0000000..7cfe2a7 Binary files /dev/null and b/images/b0cac729-56cb-2a63-7e8b-ac62a038a023.webPlatform 5.png differ diff --git a/images/bb0aca46-4612-c284-055f-58850c0730bd.webPlatform 3.png b/images/bb0aca46-4612-c284-055f-58850c0730bd.webPlatform 3.png new file mode 100644 index 0000000..a5be51a Binary files /dev/null and b/images/bb0aca46-4612-c284-055f-58850c0730bd.webPlatform 3.png differ diff --git a/images/bb0aca46-4612-c284-055f-58850c0730bd.webPlatform 4.png b/images/bb0aca46-4612-c284-055f-58850c0730bd.webPlatform 4.png new file mode 100644 index 0000000..a5be51a Binary files /dev/null and b/images/bb0aca46-4612-c284-055f-58850c0730bd.webPlatform 4.png differ diff --git a/images/bb0aca46-4612-c284-055f-58850c0730bd.webPlatform 5.png b/images/bb0aca46-4612-c284-055f-58850c0730bd.webPlatform 5.png new file mode 100644 index 0000000..a5be51a Binary files /dev/null and b/images/bb0aca46-4612-c284-055f-58850c0730bd.webPlatform 5.png differ diff --git a/images/c5840a63-85f5-62b0-c68f-2faa4aaea42b.webPlatform 4.png b/images/c5840a63-85f5-62b0-c68f-2faa4aaea42b.webPlatform 4.png new file mode 100644 index 0000000..bbc54fc Binary files /dev/null and b/images/c5840a63-85f5-62b0-c68f-2faa4aaea42b.webPlatform 4.png differ diff --git a/images/c5840a63-85f5-62b0-c68f-2faa4aaea42b.webPlatform 5.png b/images/c5840a63-85f5-62b0-c68f-2faa4aaea42b.webPlatform 5.png new file mode 100644 index 0000000..bbc54fc Binary files /dev/null and b/images/c5840a63-85f5-62b0-c68f-2faa4aaea42b.webPlatform 5.png differ diff --git a/images/cbac5b1d-0299-9db6-3747-c7aeaaaa37d0.webPlatform 3.png b/images/cbac5b1d-0299-9db6-3747-c7aeaaaa37d0.webPlatform 3.png new file mode 100644 index 0000000..80c3c4f Binary files /dev/null and b/images/cbac5b1d-0299-9db6-3747-c7aeaaaa37d0.webPlatform 3.png differ diff --git a/images/cbac5b1d-0299-9db6-3747-c7aeaaaa37d0.webPlatform 4.png b/images/cbac5b1d-0299-9db6-3747-c7aeaaaa37d0.webPlatform 4.png new file mode 100644 index 0000000..80c3c4f Binary files /dev/null and b/images/cbac5b1d-0299-9db6-3747-c7aeaaaa37d0.webPlatform 4.png differ diff --git a/images/cbac5b1d-0299-9db6-3747-c7aeaaaa37d0.webPlatform 5.png b/images/cbac5b1d-0299-9db6-3747-c7aeaaaa37d0.webPlatform 5.png new file mode 100644 index 0000000..80c3c4f Binary files /dev/null and b/images/cbac5b1d-0299-9db6-3747-c7aeaaaa37d0.webPlatform 5.png differ diff --git a/images/e429a798-7e86-1f02-565e-39dfab41fe36.webPlatform 3.png b/images/e429a798-7e86-1f02-565e-39dfab41fe36.webPlatform 3.png new file mode 100644 index 0000000..f1bc144 Binary files /dev/null and b/images/e429a798-7e86-1f02-565e-39dfab41fe36.webPlatform 3.png differ diff --git a/images/e429a798-7e86-1f02-565e-39dfab41fe36.webPlatform 4.png b/images/e429a798-7e86-1f02-565e-39dfab41fe36.webPlatform 4.png new file mode 100644 index 0000000..f1bc144 Binary files /dev/null and b/images/e429a798-7e86-1f02-565e-39dfab41fe36.webPlatform 4.png differ diff --git a/images/e429a798-7e86-1f02-565e-39dfab41fe36.webPlatform 5.png b/images/e429a798-7e86-1f02-565e-39dfab41fe36.webPlatform 5.png new file mode 100644 index 0000000..f1bc144 Binary files /dev/null and b/images/e429a798-7e86-1f02-565e-39dfab41fe36.webPlatform 5.png differ diff --git a/images/f1579c61-f17f-ff49-3f97-e942f202bebf.webPlatform 3.png b/images/f1579c61-f17f-ff49-3f97-e942f202bebf.webPlatform 3.png new file mode 100644 index 0000000..aad0184 Binary files /dev/null and b/images/f1579c61-f17f-ff49-3f97-e942f202bebf.webPlatform 3.png differ diff --git a/images/f1579c61-f17f-ff49-3f97-e942f202bebf.webPlatform 4.png b/images/f1579c61-f17f-ff49-3f97-e942f202bebf.webPlatform 4.png new file mode 100644 index 0000000..aad0184 Binary files /dev/null and b/images/f1579c61-f17f-ff49-3f97-e942f202bebf.webPlatform 4.png differ diff --git a/images/f1579c61-f17f-ff49-3f97-e942f202bebf.webPlatform 5.png b/images/f1579c61-f17f-ff49-3f97-e942f202bebf.webPlatform 5.png new file mode 100644 index 0000000..aad0184 Binary files /dev/null and b/images/f1579c61-f17f-ff49-3f97-e942f202bebf.webPlatform 5.png differ diff --git a/images/f178697f-630b-bafd-7c7d-e1287b98a969.webPlatform 3.png b/images/f178697f-630b-bafd-7c7d-e1287b98a969.webPlatform 3.png new file mode 100644 index 0000000..d4c320d Binary files /dev/null and b/images/f178697f-630b-bafd-7c7d-e1287b98a969.webPlatform 3.png differ diff --git a/images/f178697f-630b-bafd-7c7d-e1287b98a969.webPlatform 4.png b/images/f178697f-630b-bafd-7c7d-e1287b98a969.webPlatform 4.png new file mode 100644 index 0000000..d4c320d Binary files /dev/null and b/images/f178697f-630b-bafd-7c7d-e1287b98a969.webPlatform 4.png differ diff --git a/images/f178697f-630b-bafd-7c7d-e1287b98a969.webPlatform 5.png b/images/f178697f-630b-bafd-7c7d-e1287b98a969.webPlatform 5.png new file mode 100644 index 0000000..d4c320d Binary files /dev/null and b/images/f178697f-630b-bafd-7c7d-e1287b98a969.webPlatform 5.png differ diff --git a/images/f400aaaa-861c-78c0-0919-07a886e57304.webPlatform 3.png b/images/f400aaaa-861c-78c0-0919-07a886e57304.webPlatform 3.png new file mode 100644 index 0000000..bb5762e Binary files /dev/null and b/images/f400aaaa-861c-78c0-0919-07a886e57304.webPlatform 3.png differ diff --git a/images/f400aaaa-861c-78c0-0919-07a886e57304.webPlatform 4.png b/images/f400aaaa-861c-78c0-0919-07a886e57304.webPlatform 4.png new file mode 100644 index 0000000..bb5762e Binary files /dev/null and b/images/f400aaaa-861c-78c0-0919-07a886e57304.webPlatform 4.png differ diff --git a/images/f400aaaa-861c-78c0-0919-07a886e57304.webPlatform 5.png b/images/f400aaaa-861c-78c0-0919-07a886e57304.webPlatform 5.png new file mode 100644 index 0000000..bb5762e Binary files /dev/null and b/images/f400aaaa-861c-78c0-0919-07a886e57304.webPlatform 5.png differ diff --git a/images/f7842765-fff5-aa39-9f7f-fdd3024d4056.webPlatform 3.png b/images/f7842765-fff5-aa39-9f7f-fdd3024d4056.webPlatform 3.png new file mode 100644 index 0000000..ea79c57 Binary files /dev/null and b/images/f7842765-fff5-aa39-9f7f-fdd3024d4056.webPlatform 3.png differ diff --git a/images/f7842765-fff5-aa39-9f7f-fdd3024d4056.webPlatform 4.png b/images/f7842765-fff5-aa39-9f7f-fdd3024d4056.webPlatform 4.png new file mode 100644 index 0000000..ea79c57 Binary files /dev/null and b/images/f7842765-fff5-aa39-9f7f-fdd3024d4056.webPlatform 4.png differ diff --git a/images/f7842765-fff5-aa39-9f7f-fdd3024d4056.webPlatform 5.png b/images/f7842765-fff5-aa39-9f7f-fdd3024d4056.webPlatform 5.png new file mode 100644 index 0000000..ea79c57 Binary files /dev/null and b/images/f7842765-fff5-aa39-9f7f-fdd3024d4056.webPlatform 5.png differ diff --git a/images/favicon 3.png b/images/favicon 3.png new file mode 100644 index 0000000..ce3a263 Binary files /dev/null and b/images/favicon 3.png differ diff --git a/images/favicon 4.png b/images/favicon 4.png new file mode 100644 index 0000000..ce3a263 Binary files /dev/null and b/images/favicon 4.png differ diff --git a/images/favicon 5.png b/images/favicon 5.png new file mode 100644 index 0000000..ce3a263 Binary files /dev/null and b/images/favicon 5.png differ diff --git a/images/logo 3.png b/images/logo 3.png new file mode 100644 index 0000000..caaf43c Binary files /dev/null and b/images/logo 3.png differ diff --git a/images/logo 4.png b/images/logo 4.png new file mode 100644 index 0000000..caaf43c Binary files /dev/null and b/images/logo 4.png differ diff --git a/images/logo 5.png b/images/logo 5.png new file mode 100644 index 0000000..caaf43c Binary files /dev/null and b/images/logo 5.png differ diff --git a/images/me 3.jpeg b/images/me 3.jpeg new file mode 100644 index 0000000..cf70e23 Binary files /dev/null and b/images/me 3.jpeg differ diff --git a/images/me 4.jpeg b/images/me 4.jpeg new file mode 100644 index 0000000..cf70e23 Binary files /dev/null and b/images/me 4.jpeg differ diff --git a/images/me 5.jpeg b/images/me 5.jpeg new file mode 100644 index 0000000..cf70e23 Binary files /dev/null and b/images/me 5.jpeg differ diff --git a/index 4.html b/index 4.html new file mode 100644 index 0000000..8ea2c4a --- /dev/null +++ b/index 4.html @@ -0,0 +1 @@ +Hi! | Navan Chauhan

Hi!

Welcome to my personal fragment of the internet.

Latest content

\ No newline at end of file diff --git a/index.html b/index.html index d748327..8ea2c4a 100644 --- a/index.html +++ b/index.html @@ -1 +1 @@ -Hi! | Navan Chauhan

Hi!

Welcome to my personal fragment of the internet.

Latest content

\ No newline at end of file +Hi! | Navan Chauhan

Hi!

Welcome to my personal fragment of the internet.

Latest content

\ No newline at end of file diff --git a/manifest 4.json b/manifest 4.json new file mode 100644 index 0000000..bb4ec5d --- /dev/null +++ b/manifest 4.json @@ -0,0 +1,119 @@ +{ + "dir": "ltr", + "lang": "en", + "name": "Hi! | Navan Chauhan", + "scope": "/", + "display": "fullscreen", + "start_url": "https://navanchauhan.github.io/", + "short_name": "Navan Chauhan", + "theme_color": "black", + "description": "Welcome to my personal fragment of the internet.", + "orientation": "any", + "background_color": "transparent", + "related_applications": [], + "prefer_related_applications": false, + "icons": [ + { + "src": "/images/favicon.png", + "type": "image/png", + "sizes": "32x32" + }, + { + "src": "/images/14a6e126-4866-93de-8df5-e0e6a3c65da1.webPlatform.png", + "sizes": "44x44", + "type": "image/png" + }, + { + "src": "/images/6b5f7f70-557f-0e4b-3d76-127534525db9.webPlatform.png", + "sizes": "48x48", + "type": "image/png" + }, + { + "src": "/images/c5840a63-85f5-62b0-c68f-2faa4aaea42b.webPlatform.png", + "sizes": "1240x600", + "type": "image/png" + }, + { + "src": "/images/82e24f17-2e71-90d8-67a7-587163282ebf.webPlatform.png", + "sizes": "300x300", + "type": "image/png" + }, + { + "src": "/images/f7842765-fff5-aa39-9f7f-fdd3024d4056.webPlatform.png", + "sizes": "150x150", + "type": "image/png" + }, + { + "src": "/images/9384518b-2a6c-0abc-136c-8c8faf49c71b.webPlatform.png", + "sizes": "88x88", + "type": "image/png" + }, + { + "src": "/images/15294abc-6c7c-ffb8-df8d-d2fad23f50b0.webPlatform.png", + "sizes": "24x24", + "type": "image/png" + }, + { + "src": "/images/f178697f-630b-bafd-7c7d-e1287b98a969.webPlatform.png", + "sizes": "50x50", + "type": "image/png" + }, + { + "src": "/images/f400aaaa-861c-78c0-0919-07a886e57304.webPlatform.png", + "sizes": "620x300", + "type": "image/png" + }, + { + "src": "/images/8c0ffe9e-b615-96cd-3e18-ab4307c859a0.webPlatform.png", + "sizes": "192x192", + "type": "image/png" + }, + { + "src": "/images/f1579c61-f17f-ff49-3f97-e942f202bebf.webPlatform.png", + "sizes": "144x144", + "type": "image/png" + }, + { + "src": "/images/9bf4aee8-92e3-932f-5388-7731928b5692.webPlatform.png", + "sizes": "96x96", + "type": "image/png" + }, + { + "src": "/images/9dc22996-fd1b-b2d3-3627-cef4fa224e25.webPlatform.png", + "sizes": "72x72", + "type": "image/png" + }, + { + "src": "/images/afd91c53-cfd0-b52e-ca49-1db0cc292b7d.webPlatform.png", + "sizes": "36x36", + "type": "image/png" + }, + { + "src": "/images/e429a798-7e86-1f02-565e-39dfab41fe36.webPlatform.png", + "sizes": "1024x1024", + "type": "image/png" + }, + { + "src": "/images/04d0580b-d347-476a-232d-9568839851cd.webPlatform.png", + "sizes": "180x180", + "type": "image/png" + }, + { + "src": "/images/cbac5b1d-0299-9db6-3747-c7aeaaaa37d0.webPlatform.png", + "sizes": "152x152", + "type": "image/png" + }, + { + "src": "/images/b0cac729-56cb-2a63-7e8b-ac62a038a023.webPlatform.png", + "sizes": "120x120", + "type": "image/png" + }, + { + "src": "/images/bb0aca46-4612-c284-055f-58850c0730bd.webPlatform.png", + "sizes": "76x76", + "type": "image/png" + } + ], + "url": "https://navanchauhan.github.io", + "screenshots": [] +} diff --git a/posts/2010-01-24-experiments/index 4.html b/posts/2010-01-24-experiments/index 4.html new file mode 100644 index 0000000..1afb81b --- /dev/null +++ b/posts/2010-01-24-experiments/index 4.html @@ -0,0 +1 @@ +Experiments | Navan Chauhan
0 minute readCreated on January 24, 2010Last modified on February 4, 2020

Experiments

https://s3-us-west-2.amazonaws.com/s.cdpn.io/148866/img-original.jpg

Tagged with:
\ No newline at end of file diff --git a/posts/2010-01-24-experiments/index 8.html b/posts/2010-01-24-experiments/index 8.html new file mode 100644 index 0000000..1afb81b --- /dev/null +++ b/posts/2010-01-24-experiments/index 8.html @@ -0,0 +1 @@ +Experiments | Navan Chauhan
0 minute readCreated on January 24, 2010Last modified on February 4, 2020

Experiments

https://s3-us-west-2.amazonaws.com/s.cdpn.io/148866/img-original.jpg

Tagged with:
\ No newline at end of file diff --git a/posts/2019-12-08-Image-Classifier-Tensorflow/index 2.html b/posts/2019-12-08-Image-Classifier-Tensorflow/index 2.html new file mode 100644 index 0000000..0507f4f --- /dev/null +++ b/posts/2019-12-08-Image-Classifier-Tensorflow/index 2.html @@ -0,0 +1,123 @@ +Creating a Custom Image Classifier using Tensorflow 2.x and Keras for Detecting Malaria | Navan Chauhan
4 minute readCreated on December 8, 2019Last modified on January 18, 2020

Creating a Custom Image Classifier using Tensorflow 2.x and Keras for Detecting Malaria

Done during Google Code-In. Org: Tensorflow.

Imports

%tensorflow_version 2.x #This is for telling Colab that you want to use TF 2.0, ignore if running on local machine + +from PIL import Image # We use the PIL Library to resize images +import numpy as np +import os +import cv2 +import tensorflow as tf +from tensorflow.keras import datasets, layers, models +import pandas as pd +import matplotlib.pyplot as plt +from keras.models import Sequential +from keras.layers import Conv2D,MaxPooling2D,Dense,Flatten,Dropout +
+ +

Dataset

Fetching the Data

!wget ftp://lhcftp.nlm.nih.gov/Open-Access-Datasets/Malaria/cell_images.zip +!unzip cell_images.zip +
+ +

Processing the Data

We resize all the images as 50x50 and add the numpy array of that image as well as their label names (Infected or Not) to common arrays.

data = [] +labels = [] + +Parasitized = os.listdir("./cell_images/Parasitized/") +for parasite in Parasitized: + try: + image=cv2.imread("./cell_images/Parasitized/"+parasite) + image_from_array = Image.fromarray(image, 'RGB') + size_image = image_from_array.resize((50, 50)) + data.append(np.array(size_image)) + labels.append(0) + except AttributeError: + print("") + +Uninfected = os.listdir("./cell_images/Uninfected/") +for uninfect in Uninfected: + try: + image=cv2.imread("./cell_images/Uninfected/"+uninfect) + image_from_array = Image.fromarray(image, 'RGB') + size_image = image_from_array.resize((50, 50)) + data.append(np.array(size_image)) + labels.append(1) + except AttributeError: + print("") +
+ +

Splitting Data

df = np.array(data) +labels = np.array(labels) +(X_train, X_test) = df[(int)(0.1*len(df)):],df[:(int)(0.1*len(df))] +(y_train, y_test) = labels[(int)(0.1*len(labels)):],labels[:(int)(0.1*len(labels))] +
+ +
s=np.arange(X_train.shape[0]) +np.random.shuffle(s) +X_train=X_train[s] +y_train=y_train[s] +X_train = X_train/255.0 +
+ +

Model

Creating Model

By creating a sequential model, we create a linear stack of layers.

Note: The input shape for the first layer is 50,50 which corresponds with the sizes of the resized images

model = models.Sequential() +model.add(layers.Conv2D(filters=16, kernel_size=2, padding='same', activation='relu', input_shape=(50,50,3))) +model.add(layers.MaxPooling2D(pool_size=2)) +model.add(layers.Conv2D(filters=32,kernel_size=2,padding='same',activation='relu')) +model.add(layers.MaxPooling2D(pool_size=2)) +model.add(layers.Conv2D(filters=64,kernel_size=2,padding="same",activation="relu")) +model.add(layers.MaxPooling2D(pool_size=2)) +model.add(layers.Dropout(0.2)) +model.add(layers.Flatten()) +model.add(layers.Dense(500,activation="relu")) +model.add(layers.Dropout(0.2)) +model.add(layers.Dense(2,activation="softmax"))#2 represent output layer neurons +model.summary() +
+ +

Compiling Model

We use the adam optimiser as it is an adaptive learning rate optimization algorithm that's been designed specifically for training deep neural networks, which means it changes its learning rate automaticaly to get the best results

model.compile(optimizer="adam", + loss="sparse_categorical_crossentropy", + metrics=["accuracy"]) +
+ +

Training Model

We train the model for 10 epochs on the training data and then validate it using the testing data

history = model.fit(X_train,y_train, epochs=10, validation_data=(X_test,y_test)) +
+ +
Train on 24803 samples, validate on 2755 samples +Epoch 1/10 +24803/24803 [==============================] - 57s 2ms/sample - loss: 0.0786 - accuracy: 0.9729 - val_loss: 0.0000e+00 - val_accuracy: 1.0000 +Epoch 2/10 +24803/24803 [==============================] - 58s 2ms/sample - loss: 0.0746 - accuracy: 0.9731 - val_loss: 0.0290 - val_accuracy: 0.9996 +Epoch 3/10 +24803/24803 [==============================] - 58s 2ms/sample - loss: 0.0672 - accuracy: 0.9764 - val_loss: 0.0000e+00 - val_accuracy: 1.0000 +Epoch 4/10 +24803/24803 [==============================] - 58s 2ms/sample - loss: 0.0601 - accuracy: 0.9789 - val_loss: 0.0000e+00 - val_accuracy: 1.0000 +Epoch 5/10 +24803/24803 [==============================] - 58s 2ms/sample - loss: 0.0558 - accuracy: 0.9804 - val_loss: 0.0000e+00 - val_accuracy: 1.0000 +Epoch 6/10 +24803/24803 [==============================] - 57s 2ms/sample - loss: 0.0513 - accuracy: 0.9819 - val_loss: 0.0000e+00 - val_accuracy: 1.0000 +Epoch 7/10 +24803/24803 [==============================] - 58s 2ms/sample - loss: 0.0452 - accuracy: 0.9849 - val_loss: 0.3190 - val_accuracy: 0.9985 +Epoch 8/10 +24803/24803 [==============================] - 58s 2ms/sample - loss: 0.0404 - accuracy: 0.9858 - val_loss: 0.0000e+00 - val_accuracy: 1.0000 +Epoch 9/10 +24803/24803 [==============================] - 58s 2ms/sample - loss: 0.0352 - accuracy: 0.9878 - val_loss: 0.0000e+00 - val_accuracy: 1.0000 +Epoch 10/10 +24803/24803 [==============================] - 58s 2ms/sample - loss: 0.0373 - accuracy: 0.9865 - val_loss: 0.0000e+00 - val_accuracy: 1.0000 +
+ +

Results

accuracy = history.history['accuracy'][-1]*100 +loss = history.history['loss'][-1]*100 +val_accuracy = history.history['val_accuracy'][-1]*100 +val_loss = history.history['val_loss'][-1]*100 + +print( + 'Accuracy:', accuracy, + '\nLoss:', loss, + '\nValidation Accuracy:', val_accuracy, + '\nValidation Loss:', val_loss +) +
+ +
Accuracy: 98.64532351493835 +Loss: 3.732407123270176 +Validation Accuracy: 100.0 +Validation Loss: 0.0 +
+ +

We have achieved 98% Accuracy!

Link to Colab Notebook

Tagged with:
\ No newline at end of file diff --git a/posts/2019-12-08-Image-Classifier-Tensorflow/index 5.html b/posts/2019-12-08-Image-Classifier-Tensorflow/index 5.html new file mode 100644 index 0000000..0507f4f --- /dev/null +++ b/posts/2019-12-08-Image-Classifier-Tensorflow/index 5.html @@ -0,0 +1,123 @@ +Creating a Custom Image Classifier using Tensorflow 2.x and Keras for Detecting Malaria | Navan Chauhan
4 minute readCreated on December 8, 2019Last modified on January 18, 2020

Creating a Custom Image Classifier using Tensorflow 2.x and Keras for Detecting Malaria

Done during Google Code-In. Org: Tensorflow.

Imports

%tensorflow_version 2.x #This is for telling Colab that you want to use TF 2.0, ignore if running on local machine + +from PIL import Image # We use the PIL Library to resize images +import numpy as np +import os +import cv2 +import tensorflow as tf +from tensorflow.keras import datasets, layers, models +import pandas as pd +import matplotlib.pyplot as plt +from keras.models import Sequential +from keras.layers import Conv2D,MaxPooling2D,Dense,Flatten,Dropout +
+ +

Dataset

Fetching the Data

!wget ftp://lhcftp.nlm.nih.gov/Open-Access-Datasets/Malaria/cell_images.zip +!unzip cell_images.zip +
+ +

Processing the Data

We resize all the images as 50x50 and add the numpy array of that image as well as their label names (Infected or Not) to common arrays.

data = [] +labels = [] + +Parasitized = os.listdir("./cell_images/Parasitized/") +for parasite in Parasitized: + try: + image=cv2.imread("./cell_images/Parasitized/"+parasite) + image_from_array = Image.fromarray(image, 'RGB') + size_image = image_from_array.resize((50, 50)) + data.append(np.array(size_image)) + labels.append(0) + except AttributeError: + print("") + +Uninfected = os.listdir("./cell_images/Uninfected/") +for uninfect in Uninfected: + try: + image=cv2.imread("./cell_images/Uninfected/"+uninfect) + image_from_array = Image.fromarray(image, 'RGB') + size_image = image_from_array.resize((50, 50)) + data.append(np.array(size_image)) + labels.append(1) + except AttributeError: + print("") +
+ +

Splitting Data

df = np.array(data) +labels = np.array(labels) +(X_train, X_test) = df[(int)(0.1*len(df)):],df[:(int)(0.1*len(df))] +(y_train, y_test) = labels[(int)(0.1*len(labels)):],labels[:(int)(0.1*len(labels))] +
+ +
s=np.arange(X_train.shape[0]) +np.random.shuffle(s) +X_train=X_train[s] +y_train=y_train[s] +X_train = X_train/255.0 +
+ +

Model

Creating Model

By creating a sequential model, we create a linear stack of layers.

Note: The input shape for the first layer is 50,50 which corresponds with the sizes of the resized images

model = models.Sequential() +model.add(layers.Conv2D(filters=16, kernel_size=2, padding='same', activation='relu', input_shape=(50,50,3))) +model.add(layers.MaxPooling2D(pool_size=2)) +model.add(layers.Conv2D(filters=32,kernel_size=2,padding='same',activation='relu')) +model.add(layers.MaxPooling2D(pool_size=2)) +model.add(layers.Conv2D(filters=64,kernel_size=2,padding="same",activation="relu")) +model.add(layers.MaxPooling2D(pool_size=2)) +model.add(layers.Dropout(0.2)) +model.add(layers.Flatten()) +model.add(layers.Dense(500,activation="relu")) +model.add(layers.Dropout(0.2)) +model.add(layers.Dense(2,activation="softmax"))#2 represent output layer neurons +model.summary() +
+ +

Compiling Model

We use the adam optimiser as it is an adaptive learning rate optimization algorithm that's been designed specifically for training deep neural networks, which means it changes its learning rate automaticaly to get the best results

model.compile(optimizer="adam", + loss="sparse_categorical_crossentropy", + metrics=["accuracy"]) +
+ +

Training Model

We train the model for 10 epochs on the training data and then validate it using the testing data

history = model.fit(X_train,y_train, epochs=10, validation_data=(X_test,y_test)) +
+ +
Train on 24803 samples, validate on 2755 samples +Epoch 1/10 +24803/24803 [==============================] - 57s 2ms/sample - loss: 0.0786 - accuracy: 0.9729 - val_loss: 0.0000e+00 - val_accuracy: 1.0000 +Epoch 2/10 +24803/24803 [==============================] - 58s 2ms/sample - loss: 0.0746 - accuracy: 0.9731 - val_loss: 0.0290 - val_accuracy: 0.9996 +Epoch 3/10 +24803/24803 [==============================] - 58s 2ms/sample - loss: 0.0672 - accuracy: 0.9764 - val_loss: 0.0000e+00 - val_accuracy: 1.0000 +Epoch 4/10 +24803/24803 [==============================] - 58s 2ms/sample - loss: 0.0601 - accuracy: 0.9789 - val_loss: 0.0000e+00 - val_accuracy: 1.0000 +Epoch 5/10 +24803/24803 [==============================] - 58s 2ms/sample - loss: 0.0558 - accuracy: 0.9804 - val_loss: 0.0000e+00 - val_accuracy: 1.0000 +Epoch 6/10 +24803/24803 [==============================] - 57s 2ms/sample - loss: 0.0513 - accuracy: 0.9819 - val_loss: 0.0000e+00 - val_accuracy: 1.0000 +Epoch 7/10 +24803/24803 [==============================] - 58s 2ms/sample - loss: 0.0452 - accuracy: 0.9849 - val_loss: 0.3190 - val_accuracy: 0.9985 +Epoch 8/10 +24803/24803 [==============================] - 58s 2ms/sample - loss: 0.0404 - accuracy: 0.9858 - val_loss: 0.0000e+00 - val_accuracy: 1.0000 +Epoch 9/10 +24803/24803 [==============================] - 58s 2ms/sample - loss: 0.0352 - accuracy: 0.9878 - val_loss: 0.0000e+00 - val_accuracy: 1.0000 +Epoch 10/10 +24803/24803 [==============================] - 58s 2ms/sample - loss: 0.0373 - accuracy: 0.9865 - val_loss: 0.0000e+00 - val_accuracy: 1.0000 +
+ +

Results

accuracy = history.history['accuracy'][-1]*100 +loss = history.history['loss'][-1]*100 +val_accuracy = history.history['val_accuracy'][-1]*100 +val_loss = history.history['val_loss'][-1]*100 + +print( + 'Accuracy:', accuracy, + '\nLoss:', loss, + '\nValidation Accuracy:', val_accuracy, + '\nValidation Loss:', val_loss +) +
+ +
Accuracy: 98.64532351493835 +Loss: 3.732407123270176 +Validation Accuracy: 100.0 +Validation Loss: 0.0 +
+ +

We have achieved 98% Accuracy!

Link to Colab Notebook

Tagged with:
\ No newline at end of file diff --git a/posts/2019-12-08-Splitting-Zips/index 2.html b/posts/2019-12-08-Splitting-Zips/index 2.html new file mode 100644 index 0000000..516cf6b --- /dev/null +++ b/posts/2019-12-08-Splitting-Zips/index 2.html @@ -0,0 +1,10 @@ +Splitting ZIPs into Multiple Parts | Navan Chauhan
0 minute readCreated on December 8, 2019Last modified on January 18, 2020

Splitting ZIPs into Multiple Parts

Tested on macOS

Creating the archive:

zip -r -s 5 oodlesofnoodles.zip website/ +
+ +

5 stands for each split files' size (in mb, kb and gb can also be specified)

For encrypting the zip:

zip -er -s 5 oodlesofnoodles.zip website +
+ +

Extracting Files

First we need to collect all parts, then

zip -F oodlesofnoodles.zip --out merged.zip +
+ +
Tagged with:
\ No newline at end of file diff --git a/posts/2019-12-08-Splitting-Zips/index 5.html b/posts/2019-12-08-Splitting-Zips/index 5.html new file mode 100644 index 0000000..516cf6b --- /dev/null +++ b/posts/2019-12-08-Splitting-Zips/index 5.html @@ -0,0 +1,10 @@ +Splitting ZIPs into Multiple Parts | Navan Chauhan
0 minute readCreated on December 8, 2019Last modified on January 18, 2020

Splitting ZIPs into Multiple Parts

Tested on macOS

Creating the archive:

zip -r -s 5 oodlesofnoodles.zip website/ +
+ +

5 stands for each split files' size (in mb, kb and gb can also be specified)

For encrypting the zip:

zip -er -s 5 oodlesofnoodles.zip website +
+ +

Extracting Files

First we need to collect all parts, then

zip -F oodlesofnoodles.zip --out merged.zip +
+ +
Tagged with:
\ No newline at end of file diff --git a/posts/2019-12-10-TensorFlow-Model-Prediction/index 2.html b/posts/2019-12-10-TensorFlow-Model-Prediction/index 2.html new file mode 100644 index 0000000..ebd6f4a --- /dev/null +++ b/posts/2019-12-10-TensorFlow-Model-Prediction/index 2.html @@ -0,0 +1,23 @@ +Making Predictions using Image Classifier (TensorFlow) | Navan Chauhan
1 minute readCreated on December 10, 2019Last modified on January 18, 2020

Making Predictions using Image Classifier (TensorFlow)

This was tested on TF 2.x and works as of 2019-12-10

If you want to understand how to make your own custom image classifier, please refer to my previous post.

If you followed my last post, then you created a model which took an image of dimensions 50x50 as an input.

First we import the following if we have not imported these before

import cv2 +import os +
+ +

Then we read the file using OpenCV.

image=cv2.imread(imagePath) +
+ +

The cv2. imread() function returns a NumPy array representing the image. Therefore, we need to convert it before we can use it.

image_from_array = Image.fromarray(image, 'RGB') +
+ +

Then we resize the image

size_image = image_from_array.resize((50,50)) +
+ +

After this we create a batch consisting of only one image

p = np.expand_dims(size_image, 0) +
+ +

We then convert this uint8 datatype to a float32 datatype

img = tf.cast(p, tf.float32) +
+ +

Finally we make the prediction

print(['Infected','Uninfected'][np.argmax(model.predict(img))]) +
+ +

Infected

Tagged with:
\ No newline at end of file diff --git a/posts/2019-12-10-TensorFlow-Model-Prediction/index 5.html b/posts/2019-12-10-TensorFlow-Model-Prediction/index 5.html new file mode 100644 index 0000000..ebd6f4a --- /dev/null +++ b/posts/2019-12-10-TensorFlow-Model-Prediction/index 5.html @@ -0,0 +1,23 @@ +Making Predictions using Image Classifier (TensorFlow) | Navan Chauhan
1 minute readCreated on December 10, 2019Last modified on January 18, 2020

Making Predictions using Image Classifier (TensorFlow)

This was tested on TF 2.x and works as of 2019-12-10

If you want to understand how to make your own custom image classifier, please refer to my previous post.

If you followed my last post, then you created a model which took an image of dimensions 50x50 as an input.

First we import the following if we have not imported these before

import cv2 +import os +
+ +

Then we read the file using OpenCV.

image=cv2.imread(imagePath) +
+ +

The cv2. imread() function returns a NumPy array representing the image. Therefore, we need to convert it before we can use it.

image_from_array = Image.fromarray(image, 'RGB') +
+ +

Then we resize the image

size_image = image_from_array.resize((50,50)) +
+ +

After this we create a batch consisting of only one image

p = np.expand_dims(size_image, 0) +
+ +

We then convert this uint8 datatype to a float32 datatype

img = tf.cast(p, tf.float32) +
+ +

Finally we make the prediction

print(['Infected','Uninfected'][np.argmax(model.predict(img))]) +
+ +

Infected

Tagged with:
\ No newline at end of file diff --git a/posts/2019-12-16-TensorFlow-Polynomial-Regression/index 2.html b/posts/2019-12-16-TensorFlow-Polynomial-Regression/index 2.html new file mode 100644 index 0000000..07fa95a --- /dev/null +++ b/posts/2019-12-16-TensorFlow-Polynomial-Regression/index 2.html @@ -0,0 +1,369 @@ +Polynomial Regression Using TensorFlow | Navan Chauhan
16 minute readCreated on December 16, 2019Last modified on January 18, 2020

Polynomial Regression Using TensorFlow

In this tutorial you will learn about polynomial regression and how you can implement it in Tensorflow.

In this, we will be performing polynomial regression using 5 types of equations -

  • Linear
  • Quadratic
  • Cubic
  • Quartic
  • Quintic

Regression

What is Regression?

Regression is a statistical measurement that is used to try to determine the relationship between a dependent variable (often denoted by Y), and series of varying variables (called independent variables, often denoted by X ).

What is Polynomial Regression

This is a form of Regression Analysis where the relationship between Y and X is denoted as the nth degree/power of X. Polynomial regression even fits a non-linear relationship (e.g when the points don't form a straight line).

Imports

import tensorflow.compat.v1 as tf +tf.disable_v2_behavior() +import matplotlib.pyplot as plt +import numpy as np +import pandas as pd +
+ +

Dataset

Creating Random Data

Even though in this tutorial we will use a Position Vs Salary datasset, it is important to know how to create synthetic data

To create 50 values spaced evenly between 0 and 50, we use NumPy's linspace funtion

linspace(lower_limit, upper_limit, no_of_observations)

x = np.linspace(0, 50, 50) +y = np.linspace(0, 50, 50) +
+ +

We use the following function to add noise to the data, so that our values

x += np.random.uniform(-4, 4, 50) +y += np.random.uniform(-4, 4, 50) +
+ +

Position vs Salary Dataset

We will be using https://drive.google.com/file/d/1tNL4jxZEfpaP4oflfSn6pIHJX7Pachm9/view (Salary vs Position Dataset)

!wget --no-check-certificate 'https://docs.google.com/uc?export=download&id=1tNL4jxZEfpaP4oflfSn6pIHJX7Pachm9' -O data.csv +
+ +
df = pd.read_csv("data.csv") +
+ +
df # this gives us a preview of the dataset we are working with +
+ +
| Position | Level | Salary | +|-------------------|-------|---------| +| Business Analyst | 1 | 45000 | +| Junior Consultant | 2 | 50000 | +| Senior Consultant | 3 | 60000 | +| Manager | 4 | 80000 | +| Country Manager | 5 | 110000 | +| Region Manager | 6 | 150000 | +| Partner | 7 | 200000 | +| Senior Partner | 8 | 300000 | +| C-level | 9 | 500000 | +| CEO | 10 | 1000000 | +
+ +

We convert the salary column as the ordinate (y-cordinate) and level column as the abscissa

abscissa = df["Level"].to_list() # abscissa = [1,2,3,4,5,6,7,8,9,10] +ordinate = df["Salary"].to_list() # ordinate = [45000,50000,60000,80000,110000,150000,200000,300000,500000,1000000] +
+ +
n = len(abscissa) # no of observations +plt.scatter(abscissa, ordinate) +plt.ylabel('Salary') +plt.xlabel('Position') +plt.title("Salary vs Position") +plt.show() +
+ +

Defining Stuff

X = tf.placeholder("float") +Y = tf.placeholder("float") +
+ +

Defining Variables

We first define all the coefficients and constant as tensorflow variables haveing a random intitial value

a = tf.Variable(np.random.randn(), name = "a") +b = tf.Variable(np.random.randn(), name = "b") +c = tf.Variable(np.random.randn(), name = "c") +d = tf.Variable(np.random.randn(), name = "d") +e = tf.Variable(np.random.randn(), name = "e") +f = tf.Variable(np.random.randn(), name = "f") +
+ +

Model Configuration

learning_rate = 0.2 +no_of_epochs = 25000 +
+ +

Equations

deg1 = a*X + b +deg2 = a*tf.pow(X,2) + b*X + c +deg3 = a*tf.pow(X,3) + b*tf.pow(X,2) + c*X + d +deg4 = a*tf.pow(X,4) + b*tf.pow(X,3) + c*tf.pow(X,2) + d*X + e +deg5 = a*tf.pow(X,5) + b*tf.pow(X,4) + c*tf.pow(X,3) + d*tf.pow(X,2) + e*X + f +
+ +

Cost Function

We use the Mean Squared Error Function

mse1 = tf.reduce_sum(tf.pow(deg1-Y,2))/(2*n) +mse2 = tf.reduce_sum(tf.pow(deg2-Y,2))/(2*n) +mse3 = tf.reduce_sum(tf.pow(deg3-Y,2))/(2*n) +mse4 = tf.reduce_sum(tf.pow(deg4-Y,2))/(2*n) +mse5 = tf.reduce_sum(tf.pow(deg5-Y,2))/(2*n) +
+ +

Optimizer

We use the AdamOptimizer for the polynomial functions and GradientDescentOptimizer for the linear function

optimizer1 = tf.train.GradientDescentOptimizer(learning_rate).minimize(mse1) +optimizer2 = tf.train.AdamOptimizer(learning_rate).minimize(mse2) +optimizer3 = tf.train.AdamOptimizer(learning_rate).minimize(mse3) +optimizer4 = tf.train.AdamOptimizer(learning_rate).minimize(mse4) +optimizer5 = tf.train.AdamOptimizer(learning_rate).minimize(mse5) +
+ +
init=tf.global_variables_initializer() +
+ +

Model Predictions

For each type of equation first we make the model predict the values of the coefficient(s) and constant, once we get these values we use it to predict the Y values using the X values. We then plot it to compare the actual data and predicted line.

Linear Equation

with tf.Session() as sess: + sess.run(init) + for epoch in range(no_of_epochs): + for (x,y) in zip(abscissa, ordinate): + sess.run(optimizer1, feed_dict={X:x, Y:y}) + if (epoch+1)%1000==0: + cost = sess.run(mse1,feed_dict={X:abscissa,Y:ordinate}) + print("Epoch",(epoch+1), ": Training Cost:", cost," a,b:",sess.run(a),sess.run(b)) + + training_cost = sess.run(mse1,feed_dict={X:abscissa,Y:ordinate}) + coefficient1 = sess.run(a) + constant = sess.run(b) + +print(training_cost, coefficient1, constant) +
+ +
Epoch 1000 : Training Cost: 88999125000.0 a,b: 180396.42 -478869.12 +Epoch 2000 : Training Cost: 88999125000.0 a,b: 180396.42 -478869.12 +Epoch 3000 : Training Cost: 88999125000.0 a,b: 180396.42 -478869.12 +Epoch 4000 : Training Cost: 88999125000.0 a,b: 180396.42 -478869.12 +Epoch 5000 : Training Cost: 88999125000.0 a,b: 180396.42 -478869.12 +Epoch 6000 : Training Cost: 88999125000.0 a,b: 180396.42 -478869.12 +Epoch 7000 : Training Cost: 88999125000.0 a,b: 180396.42 -478869.12 +Epoch 8000 : Training Cost: 88999125000.0 a,b: 180396.42 -478869.12 +Epoch 9000 : Training Cost: 88999125000.0 a,b: 180396.42 -478869.12 +Epoch 10000 : Training Cost: 88999125000.0 a,b: 180396.42 -478869.12 +Epoch 11000 : Training Cost: 88999125000.0 a,b: 180396.42 -478869.12 +Epoch 12000 : Training Cost: 88999125000.0 a,b: 180396.42 -478869.12 +Epoch 13000 : Training Cost: 88999125000.0 a,b: 180396.42 -478869.12 +Epoch 14000 : Training Cost: 88999125000.0 a,b: 180396.42 -478869.12 +Epoch 15000 : Training Cost: 88999125000.0 a,b: 180396.42 -478869.12 +Epoch 16000 : Training Cost: 88999125000.0 a,b: 180396.42 -478869.12 +Epoch 17000 : Training Cost: 88999125000.0 a,b: 180396.42 -478869.12 +Epoch 18000 : Training Cost: 88999125000.0 a,b: 180396.42 -478869.12 +Epoch 19000 : Training Cost: 88999125000.0 a,b: 180396.42 -478869.12 +Epoch 20000 : Training Cost: 88999125000.0 a,b: 180396.42 -478869.12 +Epoch 21000 : Training Cost: 88999125000.0 a,b: 180396.42 -478869.12 +Epoch 22000 : Training Cost: 88999125000.0 a,b: 180396.42 -478869.12 +Epoch 23000 : Training Cost: 88999125000.0 a,b: 180396.42 -478869.12 +Epoch 24000 : Training Cost: 88999125000.0 a,b: 180396.42 -478869.12 +Epoch 25000 : Training Cost: 88999125000.0 a,b: 180396.42 -478869.12 +88999125000.0 180396.42 -478869.12 +
+ +
predictions = [] +for x in abscissa: + predictions.append((coefficient1*x + constant)) +plt.plot(abscissa , ordinate, 'ro', label ='Original data') +plt.plot(abscissa, predictions, label ='Fitted line') +plt.title('Linear Regression Result') +plt.legend() +plt.show() +
+ +

Quadratic Equation

with tf.Session() as sess: + sess.run(init) + for epoch in range(no_of_epochs): + for (x,y) in zip(abscissa, ordinate): + sess.run(optimizer2, feed_dict={X:x, Y:y}) + if (epoch+1)%1000==0: + cost = sess.run(mse2,feed_dict={X:abscissa,Y:ordinate}) + print("Epoch",(epoch+1), ": Training Cost:", cost," a,b,c:",sess.run(a),sess.run(b),sess.run(c)) + + training_cost = sess.run(mse2,feed_dict={X:abscissa,Y:ordinate}) + coefficient1 = sess.run(a) + coefficient2 = sess.run(b) + constant = sess.run(c) + +print(training_cost, coefficient1, coefficient2, constant) +
+ +
Epoch 1000 : Training Cost: 52571360000.0 a,b,c: 1002.4456 1097.0197 1276.6921 +Epoch 2000 : Training Cost: 37798890000.0 a,b,c: 1952.4263 2130.2825 2469.7756 +Epoch 3000 : Training Cost: 26751185000.0 a,b,c: 2839.5825 3081.6118 3554.351 +Epoch 4000 : Training Cost: 19020106000.0 a,b,c: 3644.56 3922.9563 4486.3135 +Epoch 5000 : Training Cost: 14060446000.0 a,b,c: 4345.042 4621.4233 5212.693 +Epoch 6000 : Training Cost: 11201084000.0 a,b,c: 4921.1855 5148.1504 5689.0713 +Epoch 7000 : Training Cost: 9732740000.0 a,b,c: 5364.764 5493.0156 5906.754 +Epoch 8000 : Training Cost: 9050918000.0 a,b,c: 5685.4067 5673.182 5902.0728 +Epoch 9000 : Training Cost: 8750394000.0 a,b,c: 5906.9814 5724.8906 5734.746 +Epoch 10000 : Training Cost: 8613128000.0 a,b,c: 6057.3677 5687.3364 5461.167 +Epoch 11000 : Training Cost: 8540034600.0 a,b,c: 6160.547 5592.3022 5122.8633 +Epoch 12000 : Training Cost: 8490983000.0 a,b,c: 6233.9175 5462.025 4747.111 +Epoch 13000 : Training Cost: 8450816500.0 a,b,c: 6289.048 5310.7583 4350.6997 +Epoch 14000 : Training Cost: 8414082000.0 a,b,c: 6333.199 5147.394 3943.9294 +Epoch 15000 : Training Cost: 8378841600.0 a,b,c: 6370.7944 4977.1704 3532.476 +Epoch 16000 : Training Cost: 8344471000.0 a,b,c: 6404.468 4803.542 3120.2087 +Epoch 17000 : Training Cost: 8310785500.0 a,b,c: 6435.365 4628.1523 2709.1445 +Epoch 18000 : Training Cost: 8277482000.0 a,b,c: 6465.5493 4451.833 2300.2783 +Epoch 19000 : Training Cost: 8244650000.0 a,b,c: 6494.609 4274.826 1894.3738 +Epoch 20000 : Training Cost: 8212349000.0 a,b,c: 6522.8247 4098.1733 1491.9915 +Epoch 21000 : Training Cost: 8180598300.0 a,b,c: 6550.6567 3922.7405 1093.3868 +Epoch 22000 : Training Cost: 8149257700.0 a,b,c: 6578.489 3747.8362 698.53357 +Epoch 23000 : Training Cost: 8118325000.0 a,b,c: 6606.1973 3573.2742 307.3541 +Epoch 24000 : Training Cost: 8088001000.0 a,b,c: 6632.96 3399.878 -79.89219 +Epoch 25000 : Training Cost: 8058094600.0 a,b,c: 6659.793 3227.2517 -463.03156 +8058094600.0 6659.793 3227.2517 -463.03156 +
+ +
predictions = [] +for x in abscissa: + predictions.append((coefficient1*pow(x,2) + coefficient2*x + constant)) +plt.plot(abscissa , ordinate, 'ro', label ='Original data') +plt.plot(abscissa, predictions, label ='Fitted line') +plt.title('Quadratic Regression Result') +plt.legend() +plt.show() +
+ +

Cubic

with tf.Session() as sess: + sess.run(init) + for epoch in range(no_of_epochs): + for (x,y) in zip(abscissa, ordinate): + sess.run(optimizer3, feed_dict={X:x, Y:y}) + if (epoch+1)%1000==0: + cost = sess.run(mse3,feed_dict={X:abscissa,Y:ordinate}) + print("Epoch",(epoch+1), ": Training Cost:", cost," a,b,c,d:",sess.run(a),sess.run(b),sess.run(c),sess.run(d)) + + training_cost = sess.run(mse3,feed_dict={X:abscissa,Y:ordinate}) + coefficient1 = sess.run(a) + coefficient2 = sess.run(b) + coefficient3 = sess.run(c) + constant = sess.run(d) + +print(training_cost, coefficient1, coefficient2, coefficient3, constant) +
+ +
Epoch 1000 : Training Cost: 4279814000.0 a,b,c,d: 670.1527 694.4212 751.4653 903.9527 +Epoch 2000 : Training Cost: 3770950400.0 a,b,c,d: 742.6414 666.3489 636.94525 859.2088 +Epoch 3000 : Training Cost: 3717708300.0 a,b,c,d: 756.2582 569.3339 448.105 748.23956 +Epoch 4000 : Training Cost: 3667464000.0 a,b,c,d: 769.4476 474.0318 265.5761 654.75525 +Epoch 5000 : Training Cost: 3620040700.0 a,b,c,d: 782.32324 380.54272 89.39888 578.5136 +Epoch 6000 : Training Cost: 3575265800.0 a,b,c,d: 794.8898 288.83356 -80.5215 519.13654 +Epoch 7000 : Training Cost: 3532972000.0 a,b,c,d: 807.1608 198.87044 -244.31102 476.2061 +Epoch 8000 : Training Cost: 3493009200.0 a,b,c,d: 819.13513 110.64169 -402.0677 449.3291 +Epoch 9000 : Training Cost: 3455228400.0 a,b,c,d: 830.80255 24.0964 -553.92804 438.0652 +Epoch 10000 : Training Cost: 3419475500.0 a,b,c,d: 842.21594 -60.797424 -700.0123 441.983 +Epoch 11000 : Training Cost: 3385625300.0 a,b,c,d: 853.3363 -144.08699 -840.467 460.6356 +Epoch 12000 : Training Cost: 3353544700.0 a,b,c,d: 864.19135 -225.8125 -975.4196 493.57703 +Epoch 13000 : Training Cost: 3323125000.0 a,b,c,d: 874.778 -305.98932 -1104.9867 540.39465 +Epoch 14000 : Training Cost: 3294257000.0 a,b,c,d: 885.1007 -384.63474 -1229.277 600.65607 +Epoch 15000 : Training Cost: 3266820000.0 a,b,c,d: 895.18823 -461.819 -1348.4417 673.9051 +Epoch 16000 : Training Cost: 3240736000.0 a,b,c,d: 905.0128 -537.541 -1462.6171 759.7118 +Epoch 17000 : Training Cost: 3215895000.0 a,b,c,d: 914.60065 -611.8676 -1571.9058 857.6638 +Epoch 18000 : Training Cost: 3192216800.0 a,b,c,d: 923.9603 -684.8093 -1676.4642 967.30475 +Epoch 19000 : Training Cost: 3169632300.0 a,b,c,d: 933.08594 -756.3582 -1776.4275 1088.2198 +Epoch 20000 : Training Cost: 3148046300.0 a,b,c,d: 941.9928 -826.6257 -1871.9355 1219.9702 +Epoch 21000 : Training Cost: 3127394800.0 a,b,c,d: 950.67896 -895.6205 -1963.0989 1362.1665 +Epoch 22000 : Training Cost: 3107608600.0 a,b,c,d: 959.1487 -963.38116 -2050.0586 1514.4026 +Epoch 23000 : Training Cost: 3088618200.0 a,b,c,d: 967.4355 -1029.9625 -2132.961 1676.2717 +Epoch 24000 : Training Cost: 3070361300.0 a,b,c,d: 975.52875 -1095.4292 -2211.854 1847.4485 +Epoch 25000 : Training Cost: 3052791300.0 a,b,c,d: 983.4346 -1159.7922 -2286.9412 2027.4857 +3052791300.0 983.4346 -1159.7922 -2286.9412 2027.4857 +
+ +
predictions = [] +for x in abscissa: + predictions.append((coefficient1*pow(x,3) + coefficient2*pow(x,2) + coefficient3*x + constant)) +plt.plot(abscissa , ordinate, 'ro', label ='Original data') +plt.plot(abscissa, predictions, label ='Fitted line') +plt.title('Cubic Regression Result') +plt.legend() +plt.show() +
+ +

Quartic

with tf.Session() as sess: + sess.run(init) + for epoch in range(no_of_epochs): + for (x,y) in zip(abscissa, ordinate): + sess.run(optimizer4, feed_dict={X:x, Y:y}) + if (epoch+1)%1000==0: + cost = sess.run(mse4,feed_dict={X:abscissa,Y:ordinate}) + print("Epoch",(epoch+1), ": Training Cost:", cost," a,b,c,d:",sess.run(a),sess.run(b),sess.run(c),sess.run(d),sess.run(e)) + + training_cost = sess.run(mse4,feed_dict={X:abscissa,Y:ordinate}) + coefficient1 = sess.run(a) + coefficient2 = sess.run(b) + coefficient3 = sess.run(c) + coefficient4 = sess.run(d) + constant = sess.run(e) + +print(training_cost, coefficient1, coefficient2, coefficient3, coefficient4, constant) +
+ +
Epoch 1000 : Training Cost: 1902632600.0 a,b,c,d: 84.48304 52.210594 54.791424 142.51952 512.0343 +Epoch 2000 : Training Cost: 1854316200.0 a,b,c,d: 88.998955 13.073557 14.276088 223.55667 1056.4655 +Epoch 3000 : Training Cost: 1812812400.0 a,b,c,d: 92.9462 -22.331177 -15.262934 327.41858 1634.9054 +Epoch 4000 : Training Cost: 1775716000.0 a,b,c,d: 96.42522 -54.64535 -35.829437 449.5028 2239.1392 +Epoch 5000 : Training Cost: 1741494100.0 a,b,c,d: 99.524734 -84.43976 -49.181057 585.85876 2862.4915 +Epoch 6000 : Training Cost: 1709199600.0 a,b,c,d: 102.31984 -112.19895 -56.808075 733.1876 3499.6199 +Epoch 7000 : Training Cost: 1678261800.0 a,b,c,d: 104.87324 -138.32709 -59.9442 888.79626 4146.2944 +Epoch 8000 : Training Cost: 1648340600.0 a,b,c,d: 107.23536 -163.15173 -59.58964 1050.524 4798.979 +Epoch 9000 : Training Cost: 1619243400.0 a,b,c,d: 109.44742 -186.9409 -56.53944 1216.6432 5454.9463 +Epoch 10000 : Training Cost: 1590821900.0 a,b,c,d: 111.54233 -209.91287 -51.423084 1385.8513 6113.5137 +Epoch 11000 : Training Cost: 1563042200.0 a,b,c,d: 113.54405 -232.21953 -44.73371 1557.1084 6771.7046 +Epoch 12000 : Training Cost: 1535855600.0 a,b,c,d: 115.471565 -253.9838 -36.851135 1729.535 7429.069 +Epoch 13000 : Training Cost: 1509255300.0 a,b,c,d: 117.33939 -275.29697 -28.0714 1902.5308 8083.9634 +Epoch 14000 : Training Cost: 1483227000.0 a,b,c,d: 119.1605 -296.2472 -18.618649 2075.6094 8735.381 +Epoch 15000 : Training Cost: 1457726700.0 a,b,c,d: 120.94584 -316.915 -8.650095 2248.3247 9384.197 +Epoch 16000 : Training Cost: 1432777300.0 a,b,c,d: 122.69806 -337.30704 1.7027153 2420.5771 10028.871 +Epoch 17000 : Training Cost: 1408365000.0 a,b,c,d: 124.42179 -357.45245 12.33499 2592.2983 10669.157 +Epoch 18000 : Training Cost: 1384480000.0 a,b,c,d: 126.12332 -377.39734 23.168756 2763.0933 11305.027 +Epoch 19000 : Training Cost: 1361116800.0 a,b,c,d: 127.80568 -397.16415 34.160156 2933.0452 11935.669 +Epoch 20000 : Training Cost: 1338288100.0 a,b,c,d: 129.4674 -416.72803 45.259155 3101.7727 12561.179 +Epoch 21000 : Training Cost: 1315959700.0 a,b,c,d: 131.11403 -436.14285 56.4436 3269.3142 13182.058 +Epoch 22000 : Training Cost: 1294164700.0 a,b,c,d: 132.74377 -455.3779 67.6757 3435.3833 13796.807 +Epoch 23000 : Training Cost: 1272863600.0 a,b,c,d: 134.35779 -474.45316 78.96117 3600.264 14406.58 +Epoch 24000 : Training Cost: 1252052600.0 a,b,c,d: 135.9583 -493.38254 90.268616 3764.0078 15010.481 +Epoch 25000 : Training Cost: 1231713700.0 a,b,c,d: 137.54753 -512.1876 101.59372 3926.4897 15609.368 +1231713700.0 137.54753 -512.1876 101.59372 3926.4897 15609.368 +
+ +
predictions = [] +for x in abscissa: + predictions.append((coefficient1*pow(x,4) + coefficient2*pow(x,3) + coefficient3*pow(x,2) + coefficient4*x + constant)) +plt.plot(abscissa , ordinate, 'ro', label ='Original data') +plt.plot(abscissa, predictions, label ='Fitted line') +plt.title('Quartic Regression Result') +plt.legend() +plt.show() +
+ +

Quintic

with tf.Session() as sess: + sess.run(init) + for epoch in range(no_of_epochs): + for (x,y) in zip(abscissa, ordinate): + sess.run(optimizer5, feed_dict={X:x, Y:y}) + if (epoch+1)%1000==0: + cost = sess.run(mse5,feed_dict={X:abscissa,Y:ordinate}) + print("Epoch",(epoch+1), ": Training Cost:", cost," a,b,c,d,e,f:",sess.run(a),sess.run(b),sess.run(c),sess.run(d),sess.run(e),sess.run(f)) + + training_cost = sess.run(mse5,feed_dict={X:abscissa,Y:ordinate}) + coefficient1 = sess.run(a) + coefficient2 = sess.run(b) + coefficient3 = sess.run(c) + coefficient4 = sess.run(d) + coefficient5 = sess.run(e) + constant = sess.run(f) +
+ +
Epoch 1000 : Training Cost: 1409200100.0 a,b,c,d,e,f: 7.949472 7.46219 55.626034 184.29028 484.00223 1024.0083 +Epoch 2000 : Training Cost: 1306882400.0 a,b,c,d,e,f: 8.732181 -4.0085897 73.25298 315.90103 904.08887 2004.9749 +Epoch 3000 : Training Cost: 1212606000.0 a,b,c,d,e,f: 9.732249 -16.90125 86.28379 437.06552 1305.055 2966.2188 +Epoch 4000 : Training Cost: 1123640400.0 a,b,c,d,e,f: 10.74851 -29.82692 98.59997 555.331 1698.4631 3917.9155 +Epoch 5000 : Training Cost: 1039694300.0 a,b,c,d,e,f: 11.75426 -42.598194 110.698326 671.64355 2085.5513 4860.8535 +Epoch 6000 : Training Cost: 960663550.0 a,b,c,d,e,f: 12.745439 -55.18337 122.644936 786.00214 2466.1638 5794.3735 +Epoch 7000 : Training Cost: 886438340.0 a,b,c,d,e,f: 13.721028 -67.57168 134.43822 898.3691 2839.9958 6717.659 +Epoch 8000 : Training Cost: 816913100.0 a,b,c,d,e,f: 14.679965 -79.75113 146.07385 1008.66895 3206.6692 7629.812 +Epoch 9000 : Training Cost: 751971500.0 a,b,c,d,e,f: 15.62181 -91.71608 157.55713 1116.7715 3565.8323 8529.976 +Epoch 10000 : Training Cost: 691508740.0 a,b,c,d,e,f: 16.545347 -103.4531 168.88321 1222.6348 3916.9785 9416.236 +Epoch 11000 : Training Cost: 635382000.0 a,b,c,d,e,f: 17.450052 -114.954254 180.03932 1326.1565 4259.842 10287.99 +Epoch 12000 : Training Cost: 583477250.0 a,b,c,d,e,f: 18.334944 -126.20821 191.02948 1427.2095 4593.8 11143.449 +Epoch 13000 : Training Cost: 535640400.0 a,b,c,d,e,f: 19.198917 -137.20206 201.84718 1525.6926 4918.5327 11981.633 +Epoch 14000 : Training Cost: 491722240.0 a,b,c,d,e,f: 20.041153 -147.92719 212.49709 1621.5496 5233.627 12800.468 +Epoch 15000 : Training Cost: 451559520.0 a,b,c,d,e,f: 20.860966 -158.37456 222.97133 1714.7141 5538.676 13598.337 +Epoch 16000 : Training Cost: 414988960.0 a,b,c,d,e,f: 21.657421 -168.53406 233.27422 1805.0874 5833.1978 14373.658 +Epoch 17000 : Training Cost: 381837920.0 a,b,c,d,e,f: 22.429693 -178.39536 243.39914 1892.5883 6116.847 15124.394 +Epoch 18000 : Training Cost: 351931300.0 a,b,c,d,e,f: 23.176882 -187.94789 253.3445 1977.137 6389.117 15848.417 +Epoch 19000 : Training Cost: 325074400.0 a,b,c,d,e,f: 23.898485 -197.18741 263.12512 2058.6716 6649.8037 16543.95 +Epoch 20000 : Training Cost: 301073570.0 a,b,c,d,e,f: 24.593851 -206.10497 272.72385 2137.1797 6898.544 17209.367 +Epoch 21000 : Training Cost: 279727000.0 a,b,c,d,e,f: 25.262104 -214.69217 282.14642 2212.6372 7135.217 17842.854 +Epoch 22000 : Training Cost: 260845550.0 a,b,c,d,e,f: 25.903376 -222.94969 291.4003 2284.9844 7359.4644 18442.408 +Epoch 23000 : Training Cost: 244218030.0 a,b,c,d,e,f: 26.517094 -230.8697 300.45532 2354.3003 7571.261 19007.49 +Epoch 24000 : Training Cost: 229660080.0 a,b,c,d,e,f: 27.102589 -238.44817 309.35342 2420.4185 7770.5728 19536.19 +Epoch 25000 : Training Cost: 216972400.0 a,b,c,d,e,f: 27.660324 -245.69016 318.10062 2483.3608 7957.354 20027.707 +216972400.0 27.660324 -245.69016 318.10062 2483.3608 7957.354 20027.707 +
+ +
predictions = [] +for x in abscissa: + predictions.append((coefficient1*pow(x,5) + coefficient2*pow(x,4) + coefficient3*pow(x,3) + coefficient4*pow(x,2) + coefficient5*x + constant)) +plt.plot(abscissa , ordinate, 'ro', label ='Original data') +plt.plot(abscissa, predictions, label ='Fitted line') +plt.title('Quintic Regression Result') +plt.legend() +plt.show() +
+ +

Results and Conclusion

You just learnt Polynomial Regression using TensorFlow!

Notes

Overfitting

> Overfitting refers to a model that models the training data too well.Overfitting happens when a model learns the detail and noise in the training data to the extent that it negatively impacts the performance of the model on new data. This means that the noise or random fluctuations in the training data is picked up and learned as concepts by the model. The problem is that these concepts do not apply to new data and negatively impact the models ability to generalize.

Source: Machine Learning Mastery

Basically if you train your machine learning model on a small dataset for a really large number of epochs, the model will learn all the deformities/noise in the data and will actually think that it is a normal part. Therefore when it will see some new data, it will discard that new data as noise and will impact the accuracy of the model in a negative manner

Tagged with:
\ No newline at end of file diff --git a/posts/2019-12-16-TensorFlow-Polynomial-Regression/index 5.html b/posts/2019-12-16-TensorFlow-Polynomial-Regression/index 5.html new file mode 100644 index 0000000..07fa95a --- /dev/null +++ b/posts/2019-12-16-TensorFlow-Polynomial-Regression/index 5.html @@ -0,0 +1,369 @@ +Polynomial Regression Using TensorFlow | Navan Chauhan
16 minute readCreated on December 16, 2019Last modified on January 18, 2020

Polynomial Regression Using TensorFlow

In this tutorial you will learn about polynomial regression and how you can implement it in Tensorflow.

In this, we will be performing polynomial regression using 5 types of equations -

  • Linear
  • Quadratic
  • Cubic
  • Quartic
  • Quintic

Regression

What is Regression?

Regression is a statistical measurement that is used to try to determine the relationship between a dependent variable (often denoted by Y), and series of varying variables (called independent variables, often denoted by X ).

What is Polynomial Regression

This is a form of Regression Analysis where the relationship between Y and X is denoted as the nth degree/power of X. Polynomial regression even fits a non-linear relationship (e.g when the points don't form a straight line).

Imports

import tensorflow.compat.v1 as tf +tf.disable_v2_behavior() +import matplotlib.pyplot as plt +import numpy as np +import pandas as pd +
+ +

Dataset

Creating Random Data

Even though in this tutorial we will use a Position Vs Salary datasset, it is important to know how to create synthetic data

To create 50 values spaced evenly between 0 and 50, we use NumPy's linspace funtion

linspace(lower_limit, upper_limit, no_of_observations)

x = np.linspace(0, 50, 50) +y = np.linspace(0, 50, 50) +
+ +

We use the following function to add noise to the data, so that our values

x += np.random.uniform(-4, 4, 50) +y += np.random.uniform(-4, 4, 50) +
+ +

Position vs Salary Dataset

We will be using https://drive.google.com/file/d/1tNL4jxZEfpaP4oflfSn6pIHJX7Pachm9/view (Salary vs Position Dataset)

!wget --no-check-certificate 'https://docs.google.com/uc?export=download&id=1tNL4jxZEfpaP4oflfSn6pIHJX7Pachm9' -O data.csv +
+ +
df = pd.read_csv("data.csv") +
+ +
df # this gives us a preview of the dataset we are working with +
+ +
| Position | Level | Salary | +|-------------------|-------|---------| +| Business Analyst | 1 | 45000 | +| Junior Consultant | 2 | 50000 | +| Senior Consultant | 3 | 60000 | +| Manager | 4 | 80000 | +| Country Manager | 5 | 110000 | +| Region Manager | 6 | 150000 | +| Partner | 7 | 200000 | +| Senior Partner | 8 | 300000 | +| C-level | 9 | 500000 | +| CEO | 10 | 1000000 | +
+ +

We convert the salary column as the ordinate (y-cordinate) and level column as the abscissa

abscissa = df["Level"].to_list() # abscissa = [1,2,3,4,5,6,7,8,9,10] +ordinate = df["Salary"].to_list() # ordinate = [45000,50000,60000,80000,110000,150000,200000,300000,500000,1000000] +
+ +
n = len(abscissa) # no of observations +plt.scatter(abscissa, ordinate) +plt.ylabel('Salary') +plt.xlabel('Position') +plt.title("Salary vs Position") +plt.show() +
+ +

Defining Stuff

X = tf.placeholder("float") +Y = tf.placeholder("float") +
+ +

Defining Variables

We first define all the coefficients and constant as tensorflow variables haveing a random intitial value

a = tf.Variable(np.random.randn(), name = "a") +b = tf.Variable(np.random.randn(), name = "b") +c = tf.Variable(np.random.randn(), name = "c") +d = tf.Variable(np.random.randn(), name = "d") +e = tf.Variable(np.random.randn(), name = "e") +f = tf.Variable(np.random.randn(), name = "f") +
+ +

Model Configuration

learning_rate = 0.2 +no_of_epochs = 25000 +
+ +

Equations

deg1 = a*X + b +deg2 = a*tf.pow(X,2) + b*X + c +deg3 = a*tf.pow(X,3) + b*tf.pow(X,2) + c*X + d +deg4 = a*tf.pow(X,4) + b*tf.pow(X,3) + c*tf.pow(X,2) + d*X + e +deg5 = a*tf.pow(X,5) + b*tf.pow(X,4) + c*tf.pow(X,3) + d*tf.pow(X,2) + e*X + f +
+ +

Cost Function

We use the Mean Squared Error Function

mse1 = tf.reduce_sum(tf.pow(deg1-Y,2))/(2*n) +mse2 = tf.reduce_sum(tf.pow(deg2-Y,2))/(2*n) +mse3 = tf.reduce_sum(tf.pow(deg3-Y,2))/(2*n) +mse4 = tf.reduce_sum(tf.pow(deg4-Y,2))/(2*n) +mse5 = tf.reduce_sum(tf.pow(deg5-Y,2))/(2*n) +
+ +

Optimizer

We use the AdamOptimizer for the polynomial functions and GradientDescentOptimizer for the linear function

optimizer1 = tf.train.GradientDescentOptimizer(learning_rate).minimize(mse1) +optimizer2 = tf.train.AdamOptimizer(learning_rate).minimize(mse2) +optimizer3 = tf.train.AdamOptimizer(learning_rate).minimize(mse3) +optimizer4 = tf.train.AdamOptimizer(learning_rate).minimize(mse4) +optimizer5 = tf.train.AdamOptimizer(learning_rate).minimize(mse5) +
+ +
init=tf.global_variables_initializer() +
+ +

Model Predictions

For each type of equation first we make the model predict the values of the coefficient(s) and constant, once we get these values we use it to predict the Y values using the X values. We then plot it to compare the actual data and predicted line.

Linear Equation

with tf.Session() as sess: + sess.run(init) + for epoch in range(no_of_epochs): + for (x,y) in zip(abscissa, ordinate): + sess.run(optimizer1, feed_dict={X:x, Y:y}) + if (epoch+1)%1000==0: + cost = sess.run(mse1,feed_dict={X:abscissa,Y:ordinate}) + print("Epoch",(epoch+1), ": Training Cost:", cost," a,b:",sess.run(a),sess.run(b)) + + training_cost = sess.run(mse1,feed_dict={X:abscissa,Y:ordinate}) + coefficient1 = sess.run(a) + constant = sess.run(b) + +print(training_cost, coefficient1, constant) +
+ +
Epoch 1000 : Training Cost: 88999125000.0 a,b: 180396.42 -478869.12 +Epoch 2000 : Training Cost: 88999125000.0 a,b: 180396.42 -478869.12 +Epoch 3000 : Training Cost: 88999125000.0 a,b: 180396.42 -478869.12 +Epoch 4000 : Training Cost: 88999125000.0 a,b: 180396.42 -478869.12 +Epoch 5000 : Training Cost: 88999125000.0 a,b: 180396.42 -478869.12 +Epoch 6000 : Training Cost: 88999125000.0 a,b: 180396.42 -478869.12 +Epoch 7000 : Training Cost: 88999125000.0 a,b: 180396.42 -478869.12 +Epoch 8000 : Training Cost: 88999125000.0 a,b: 180396.42 -478869.12 +Epoch 9000 : Training Cost: 88999125000.0 a,b: 180396.42 -478869.12 +Epoch 10000 : Training Cost: 88999125000.0 a,b: 180396.42 -478869.12 +Epoch 11000 : Training Cost: 88999125000.0 a,b: 180396.42 -478869.12 +Epoch 12000 : Training Cost: 88999125000.0 a,b: 180396.42 -478869.12 +Epoch 13000 : Training Cost: 88999125000.0 a,b: 180396.42 -478869.12 +Epoch 14000 : Training Cost: 88999125000.0 a,b: 180396.42 -478869.12 +Epoch 15000 : Training Cost: 88999125000.0 a,b: 180396.42 -478869.12 +Epoch 16000 : Training Cost: 88999125000.0 a,b: 180396.42 -478869.12 +Epoch 17000 : Training Cost: 88999125000.0 a,b: 180396.42 -478869.12 +Epoch 18000 : Training Cost: 88999125000.0 a,b: 180396.42 -478869.12 +Epoch 19000 : Training Cost: 88999125000.0 a,b: 180396.42 -478869.12 +Epoch 20000 : Training Cost: 88999125000.0 a,b: 180396.42 -478869.12 +Epoch 21000 : Training Cost: 88999125000.0 a,b: 180396.42 -478869.12 +Epoch 22000 : Training Cost: 88999125000.0 a,b: 180396.42 -478869.12 +Epoch 23000 : Training Cost: 88999125000.0 a,b: 180396.42 -478869.12 +Epoch 24000 : Training Cost: 88999125000.0 a,b: 180396.42 -478869.12 +Epoch 25000 : Training Cost: 88999125000.0 a,b: 180396.42 -478869.12 +88999125000.0 180396.42 -478869.12 +
+ +
predictions = [] +for x in abscissa: + predictions.append((coefficient1*x + constant)) +plt.plot(abscissa , ordinate, 'ro', label ='Original data') +plt.plot(abscissa, predictions, label ='Fitted line') +plt.title('Linear Regression Result') +plt.legend() +plt.show() +
+ +

Quadratic Equation

with tf.Session() as sess: + sess.run(init) + for epoch in range(no_of_epochs): + for (x,y) in zip(abscissa, ordinate): + sess.run(optimizer2, feed_dict={X:x, Y:y}) + if (epoch+1)%1000==0: + cost = sess.run(mse2,feed_dict={X:abscissa,Y:ordinate}) + print("Epoch",(epoch+1), ": Training Cost:", cost," a,b,c:",sess.run(a),sess.run(b),sess.run(c)) + + training_cost = sess.run(mse2,feed_dict={X:abscissa,Y:ordinate}) + coefficient1 = sess.run(a) + coefficient2 = sess.run(b) + constant = sess.run(c) + +print(training_cost, coefficient1, coefficient2, constant) +
+ +
Epoch 1000 : Training Cost: 52571360000.0 a,b,c: 1002.4456 1097.0197 1276.6921 +Epoch 2000 : Training Cost: 37798890000.0 a,b,c: 1952.4263 2130.2825 2469.7756 +Epoch 3000 : Training Cost: 26751185000.0 a,b,c: 2839.5825 3081.6118 3554.351 +Epoch 4000 : Training Cost: 19020106000.0 a,b,c: 3644.56 3922.9563 4486.3135 +Epoch 5000 : Training Cost: 14060446000.0 a,b,c: 4345.042 4621.4233 5212.693 +Epoch 6000 : Training Cost: 11201084000.0 a,b,c: 4921.1855 5148.1504 5689.0713 +Epoch 7000 : Training Cost: 9732740000.0 a,b,c: 5364.764 5493.0156 5906.754 +Epoch 8000 : Training Cost: 9050918000.0 a,b,c: 5685.4067 5673.182 5902.0728 +Epoch 9000 : Training Cost: 8750394000.0 a,b,c: 5906.9814 5724.8906 5734.746 +Epoch 10000 : Training Cost: 8613128000.0 a,b,c: 6057.3677 5687.3364 5461.167 +Epoch 11000 : Training Cost: 8540034600.0 a,b,c: 6160.547 5592.3022 5122.8633 +Epoch 12000 : Training Cost: 8490983000.0 a,b,c: 6233.9175 5462.025 4747.111 +Epoch 13000 : Training Cost: 8450816500.0 a,b,c: 6289.048 5310.7583 4350.6997 +Epoch 14000 : Training Cost: 8414082000.0 a,b,c: 6333.199 5147.394 3943.9294 +Epoch 15000 : Training Cost: 8378841600.0 a,b,c: 6370.7944 4977.1704 3532.476 +Epoch 16000 : Training Cost: 8344471000.0 a,b,c: 6404.468 4803.542 3120.2087 +Epoch 17000 : Training Cost: 8310785500.0 a,b,c: 6435.365 4628.1523 2709.1445 +Epoch 18000 : Training Cost: 8277482000.0 a,b,c: 6465.5493 4451.833 2300.2783 +Epoch 19000 : Training Cost: 8244650000.0 a,b,c: 6494.609 4274.826 1894.3738 +Epoch 20000 : Training Cost: 8212349000.0 a,b,c: 6522.8247 4098.1733 1491.9915 +Epoch 21000 : Training Cost: 8180598300.0 a,b,c: 6550.6567 3922.7405 1093.3868 +Epoch 22000 : Training Cost: 8149257700.0 a,b,c: 6578.489 3747.8362 698.53357 +Epoch 23000 : Training Cost: 8118325000.0 a,b,c: 6606.1973 3573.2742 307.3541 +Epoch 24000 : Training Cost: 8088001000.0 a,b,c: 6632.96 3399.878 -79.89219 +Epoch 25000 : Training Cost: 8058094600.0 a,b,c: 6659.793 3227.2517 -463.03156 +8058094600.0 6659.793 3227.2517 -463.03156 +
+ +
predictions = [] +for x in abscissa: + predictions.append((coefficient1*pow(x,2) + coefficient2*x + constant)) +plt.plot(abscissa , ordinate, 'ro', label ='Original data') +plt.plot(abscissa, predictions, label ='Fitted line') +plt.title('Quadratic Regression Result') +plt.legend() +plt.show() +
+ +

Cubic

with tf.Session() as sess: + sess.run(init) + for epoch in range(no_of_epochs): + for (x,y) in zip(abscissa, ordinate): + sess.run(optimizer3, feed_dict={X:x, Y:y}) + if (epoch+1)%1000==0: + cost = sess.run(mse3,feed_dict={X:abscissa,Y:ordinate}) + print("Epoch",(epoch+1), ": Training Cost:", cost," a,b,c,d:",sess.run(a),sess.run(b),sess.run(c),sess.run(d)) + + training_cost = sess.run(mse3,feed_dict={X:abscissa,Y:ordinate}) + coefficient1 = sess.run(a) + coefficient2 = sess.run(b) + coefficient3 = sess.run(c) + constant = sess.run(d) + +print(training_cost, coefficient1, coefficient2, coefficient3, constant) +
+ +
Epoch 1000 : Training Cost: 4279814000.0 a,b,c,d: 670.1527 694.4212 751.4653 903.9527 +Epoch 2000 : Training Cost: 3770950400.0 a,b,c,d: 742.6414 666.3489 636.94525 859.2088 +Epoch 3000 : Training Cost: 3717708300.0 a,b,c,d: 756.2582 569.3339 448.105 748.23956 +Epoch 4000 : Training Cost: 3667464000.0 a,b,c,d: 769.4476 474.0318 265.5761 654.75525 +Epoch 5000 : Training Cost: 3620040700.0 a,b,c,d: 782.32324 380.54272 89.39888 578.5136 +Epoch 6000 : Training Cost: 3575265800.0 a,b,c,d: 794.8898 288.83356 -80.5215 519.13654 +Epoch 7000 : Training Cost: 3532972000.0 a,b,c,d: 807.1608 198.87044 -244.31102 476.2061 +Epoch 8000 : Training Cost: 3493009200.0 a,b,c,d: 819.13513 110.64169 -402.0677 449.3291 +Epoch 9000 : Training Cost: 3455228400.0 a,b,c,d: 830.80255 24.0964 -553.92804 438.0652 +Epoch 10000 : Training Cost: 3419475500.0 a,b,c,d: 842.21594 -60.797424 -700.0123 441.983 +Epoch 11000 : Training Cost: 3385625300.0 a,b,c,d: 853.3363 -144.08699 -840.467 460.6356 +Epoch 12000 : Training Cost: 3353544700.0 a,b,c,d: 864.19135 -225.8125 -975.4196 493.57703 +Epoch 13000 : Training Cost: 3323125000.0 a,b,c,d: 874.778 -305.98932 -1104.9867 540.39465 +Epoch 14000 : Training Cost: 3294257000.0 a,b,c,d: 885.1007 -384.63474 -1229.277 600.65607 +Epoch 15000 : Training Cost: 3266820000.0 a,b,c,d: 895.18823 -461.819 -1348.4417 673.9051 +Epoch 16000 : Training Cost: 3240736000.0 a,b,c,d: 905.0128 -537.541 -1462.6171 759.7118 +Epoch 17000 : Training Cost: 3215895000.0 a,b,c,d: 914.60065 -611.8676 -1571.9058 857.6638 +Epoch 18000 : Training Cost: 3192216800.0 a,b,c,d: 923.9603 -684.8093 -1676.4642 967.30475 +Epoch 19000 : Training Cost: 3169632300.0 a,b,c,d: 933.08594 -756.3582 -1776.4275 1088.2198 +Epoch 20000 : Training Cost: 3148046300.0 a,b,c,d: 941.9928 -826.6257 -1871.9355 1219.9702 +Epoch 21000 : Training Cost: 3127394800.0 a,b,c,d: 950.67896 -895.6205 -1963.0989 1362.1665 +Epoch 22000 : Training Cost: 3107608600.0 a,b,c,d: 959.1487 -963.38116 -2050.0586 1514.4026 +Epoch 23000 : Training Cost: 3088618200.0 a,b,c,d: 967.4355 -1029.9625 -2132.961 1676.2717 +Epoch 24000 : Training Cost: 3070361300.0 a,b,c,d: 975.52875 -1095.4292 -2211.854 1847.4485 +Epoch 25000 : Training Cost: 3052791300.0 a,b,c,d: 983.4346 -1159.7922 -2286.9412 2027.4857 +3052791300.0 983.4346 -1159.7922 -2286.9412 2027.4857 +
+ +
predictions = [] +for x in abscissa: + predictions.append((coefficient1*pow(x,3) + coefficient2*pow(x,2) + coefficient3*x + constant)) +plt.plot(abscissa , ordinate, 'ro', label ='Original data') +plt.plot(abscissa, predictions, label ='Fitted line') +plt.title('Cubic Regression Result') +plt.legend() +plt.show() +
+ +

Quartic

with tf.Session() as sess: + sess.run(init) + for epoch in range(no_of_epochs): + for (x,y) in zip(abscissa, ordinate): + sess.run(optimizer4, feed_dict={X:x, Y:y}) + if (epoch+1)%1000==0: + cost = sess.run(mse4,feed_dict={X:abscissa,Y:ordinate}) + print("Epoch",(epoch+1), ": Training Cost:", cost," a,b,c,d:",sess.run(a),sess.run(b),sess.run(c),sess.run(d),sess.run(e)) + + training_cost = sess.run(mse4,feed_dict={X:abscissa,Y:ordinate}) + coefficient1 = sess.run(a) + coefficient2 = sess.run(b) + coefficient3 = sess.run(c) + coefficient4 = sess.run(d) + constant = sess.run(e) + +print(training_cost, coefficient1, coefficient2, coefficient3, coefficient4, constant) +
+ +
Epoch 1000 : Training Cost: 1902632600.0 a,b,c,d: 84.48304 52.210594 54.791424 142.51952 512.0343 +Epoch 2000 : Training Cost: 1854316200.0 a,b,c,d: 88.998955 13.073557 14.276088 223.55667 1056.4655 +Epoch 3000 : Training Cost: 1812812400.0 a,b,c,d: 92.9462 -22.331177 -15.262934 327.41858 1634.9054 +Epoch 4000 : Training Cost: 1775716000.0 a,b,c,d: 96.42522 -54.64535 -35.829437 449.5028 2239.1392 +Epoch 5000 : Training Cost: 1741494100.0 a,b,c,d: 99.524734 -84.43976 -49.181057 585.85876 2862.4915 +Epoch 6000 : Training Cost: 1709199600.0 a,b,c,d: 102.31984 -112.19895 -56.808075 733.1876 3499.6199 +Epoch 7000 : Training Cost: 1678261800.0 a,b,c,d: 104.87324 -138.32709 -59.9442 888.79626 4146.2944 +Epoch 8000 : Training Cost: 1648340600.0 a,b,c,d: 107.23536 -163.15173 -59.58964 1050.524 4798.979 +Epoch 9000 : Training Cost: 1619243400.0 a,b,c,d: 109.44742 -186.9409 -56.53944 1216.6432 5454.9463 +Epoch 10000 : Training Cost: 1590821900.0 a,b,c,d: 111.54233 -209.91287 -51.423084 1385.8513 6113.5137 +Epoch 11000 : Training Cost: 1563042200.0 a,b,c,d: 113.54405 -232.21953 -44.73371 1557.1084 6771.7046 +Epoch 12000 : Training Cost: 1535855600.0 a,b,c,d: 115.471565 -253.9838 -36.851135 1729.535 7429.069 +Epoch 13000 : Training Cost: 1509255300.0 a,b,c,d: 117.33939 -275.29697 -28.0714 1902.5308 8083.9634 +Epoch 14000 : Training Cost: 1483227000.0 a,b,c,d: 119.1605 -296.2472 -18.618649 2075.6094 8735.381 +Epoch 15000 : Training Cost: 1457726700.0 a,b,c,d: 120.94584 -316.915 -8.650095 2248.3247 9384.197 +Epoch 16000 : Training Cost: 1432777300.0 a,b,c,d: 122.69806 -337.30704 1.7027153 2420.5771 10028.871 +Epoch 17000 : Training Cost: 1408365000.0 a,b,c,d: 124.42179 -357.45245 12.33499 2592.2983 10669.157 +Epoch 18000 : Training Cost: 1384480000.0 a,b,c,d: 126.12332 -377.39734 23.168756 2763.0933 11305.027 +Epoch 19000 : Training Cost: 1361116800.0 a,b,c,d: 127.80568 -397.16415 34.160156 2933.0452 11935.669 +Epoch 20000 : Training Cost: 1338288100.0 a,b,c,d: 129.4674 -416.72803 45.259155 3101.7727 12561.179 +Epoch 21000 : Training Cost: 1315959700.0 a,b,c,d: 131.11403 -436.14285 56.4436 3269.3142 13182.058 +Epoch 22000 : Training Cost: 1294164700.0 a,b,c,d: 132.74377 -455.3779 67.6757 3435.3833 13796.807 +Epoch 23000 : Training Cost: 1272863600.0 a,b,c,d: 134.35779 -474.45316 78.96117 3600.264 14406.58 +Epoch 24000 : Training Cost: 1252052600.0 a,b,c,d: 135.9583 -493.38254 90.268616 3764.0078 15010.481 +Epoch 25000 : Training Cost: 1231713700.0 a,b,c,d: 137.54753 -512.1876 101.59372 3926.4897 15609.368 +1231713700.0 137.54753 -512.1876 101.59372 3926.4897 15609.368 +
+ +
predictions = [] +for x in abscissa: + predictions.append((coefficient1*pow(x,4) + coefficient2*pow(x,3) + coefficient3*pow(x,2) + coefficient4*x + constant)) +plt.plot(abscissa , ordinate, 'ro', label ='Original data') +plt.plot(abscissa, predictions, label ='Fitted line') +plt.title('Quartic Regression Result') +plt.legend() +plt.show() +
+ +

Quintic

with tf.Session() as sess: + sess.run(init) + for epoch in range(no_of_epochs): + for (x,y) in zip(abscissa, ordinate): + sess.run(optimizer5, feed_dict={X:x, Y:y}) + if (epoch+1)%1000==0: + cost = sess.run(mse5,feed_dict={X:abscissa,Y:ordinate}) + print("Epoch",(epoch+1), ": Training Cost:", cost," a,b,c,d,e,f:",sess.run(a),sess.run(b),sess.run(c),sess.run(d),sess.run(e),sess.run(f)) + + training_cost = sess.run(mse5,feed_dict={X:abscissa,Y:ordinate}) + coefficient1 = sess.run(a) + coefficient2 = sess.run(b) + coefficient3 = sess.run(c) + coefficient4 = sess.run(d) + coefficient5 = sess.run(e) + constant = sess.run(f) +
+ +
Epoch 1000 : Training Cost: 1409200100.0 a,b,c,d,e,f: 7.949472 7.46219 55.626034 184.29028 484.00223 1024.0083 +Epoch 2000 : Training Cost: 1306882400.0 a,b,c,d,e,f: 8.732181 -4.0085897 73.25298 315.90103 904.08887 2004.9749 +Epoch 3000 : Training Cost: 1212606000.0 a,b,c,d,e,f: 9.732249 -16.90125 86.28379 437.06552 1305.055 2966.2188 +Epoch 4000 : Training Cost: 1123640400.0 a,b,c,d,e,f: 10.74851 -29.82692 98.59997 555.331 1698.4631 3917.9155 +Epoch 5000 : Training Cost: 1039694300.0 a,b,c,d,e,f: 11.75426 -42.598194 110.698326 671.64355 2085.5513 4860.8535 +Epoch 6000 : Training Cost: 960663550.0 a,b,c,d,e,f: 12.745439 -55.18337 122.644936 786.00214 2466.1638 5794.3735 +Epoch 7000 : Training Cost: 886438340.0 a,b,c,d,e,f: 13.721028 -67.57168 134.43822 898.3691 2839.9958 6717.659 +Epoch 8000 : Training Cost: 816913100.0 a,b,c,d,e,f: 14.679965 -79.75113 146.07385 1008.66895 3206.6692 7629.812 +Epoch 9000 : Training Cost: 751971500.0 a,b,c,d,e,f: 15.62181 -91.71608 157.55713 1116.7715 3565.8323 8529.976 +Epoch 10000 : Training Cost: 691508740.0 a,b,c,d,e,f: 16.545347 -103.4531 168.88321 1222.6348 3916.9785 9416.236 +Epoch 11000 : Training Cost: 635382000.0 a,b,c,d,e,f: 17.450052 -114.954254 180.03932 1326.1565 4259.842 10287.99 +Epoch 12000 : Training Cost: 583477250.0 a,b,c,d,e,f: 18.334944 -126.20821 191.02948 1427.2095 4593.8 11143.449 +Epoch 13000 : Training Cost: 535640400.0 a,b,c,d,e,f: 19.198917 -137.20206 201.84718 1525.6926 4918.5327 11981.633 +Epoch 14000 : Training Cost: 491722240.0 a,b,c,d,e,f: 20.041153 -147.92719 212.49709 1621.5496 5233.627 12800.468 +Epoch 15000 : Training Cost: 451559520.0 a,b,c,d,e,f: 20.860966 -158.37456 222.97133 1714.7141 5538.676 13598.337 +Epoch 16000 : Training Cost: 414988960.0 a,b,c,d,e,f: 21.657421 -168.53406 233.27422 1805.0874 5833.1978 14373.658 +Epoch 17000 : Training Cost: 381837920.0 a,b,c,d,e,f: 22.429693 -178.39536 243.39914 1892.5883 6116.847 15124.394 +Epoch 18000 : Training Cost: 351931300.0 a,b,c,d,e,f: 23.176882 -187.94789 253.3445 1977.137 6389.117 15848.417 +Epoch 19000 : Training Cost: 325074400.0 a,b,c,d,e,f: 23.898485 -197.18741 263.12512 2058.6716 6649.8037 16543.95 +Epoch 20000 : Training Cost: 301073570.0 a,b,c,d,e,f: 24.593851 -206.10497 272.72385 2137.1797 6898.544 17209.367 +Epoch 21000 : Training Cost: 279727000.0 a,b,c,d,e,f: 25.262104 -214.69217 282.14642 2212.6372 7135.217 17842.854 +Epoch 22000 : Training Cost: 260845550.0 a,b,c,d,e,f: 25.903376 -222.94969 291.4003 2284.9844 7359.4644 18442.408 +Epoch 23000 : Training Cost: 244218030.0 a,b,c,d,e,f: 26.517094 -230.8697 300.45532 2354.3003 7571.261 19007.49 +Epoch 24000 : Training Cost: 229660080.0 a,b,c,d,e,f: 27.102589 -238.44817 309.35342 2420.4185 7770.5728 19536.19 +Epoch 25000 : Training Cost: 216972400.0 a,b,c,d,e,f: 27.660324 -245.69016 318.10062 2483.3608 7957.354 20027.707 +216972400.0 27.660324 -245.69016 318.10062 2483.3608 7957.354 20027.707 +
+ +
predictions = [] +for x in abscissa: + predictions.append((coefficient1*pow(x,5) + coefficient2*pow(x,4) + coefficient3*pow(x,3) + coefficient4*pow(x,2) + coefficient5*x + constant)) +plt.plot(abscissa , ordinate, 'ro', label ='Original data') +plt.plot(abscissa, predictions, label ='Fitted line') +plt.title('Quintic Regression Result') +plt.legend() +plt.show() +
+ +

Results and Conclusion

You just learnt Polynomial Regression using TensorFlow!

Notes

Overfitting

> Overfitting refers to a model that models the training data too well.Overfitting happens when a model learns the detail and noise in the training data to the extent that it negatively impacts the performance of the model on new data. This means that the noise or random fluctuations in the training data is picked up and learned as concepts by the model. The problem is that these concepts do not apply to new data and negatively impact the models ability to generalize.

Source: Machine Learning Mastery

Basically if you train your machine learning model on a small dataset for a really large number of epochs, the model will learn all the deformities/noise in the data and will actually think that it is a normal part. Therefore when it will see some new data, it will discard that new data as noise and will impact the accuracy of the model in a negative manner

Tagged with:
\ No newline at end of file diff --git a/posts/2019-12-22-Fake-News-Detector/index 2.html b/posts/2019-12-22-Fake-News-Detector/index 2.html new file mode 100644 index 0000000..2a6cb7a --- /dev/null +++ b/posts/2019-12-22-Fake-News-Detector/index 2.html @@ -0,0 +1,173 @@ +Building a Fake News Detector with Turicreate | Navan Chauhan
6 minute readCreated on December 22, 2019Last modified on January 18, 2020

Building a Fake News Detector with Turicreate

In this tutorial we will build a fake news detecting app from scratch, using Turicreate for the machine learning model and SwiftUI for building the app

Note: These commands are written as if you are running a jupyter notebook.

Building the Machine Learning Model

Data Gathering

To build a classifier, you need a lot of data. George McIntire (GH: @joolsa) has created a wonderful dataset containing the headline, body and wheter it is fake or real. Whenever you are looking for a dataset, always try searching on Kaggle and GitHub before you start building your own

Dependencies

I used a Google Colab instance for training my model. If you also plan on using Google Colab then I reccomend choosing a GPU Instance (It is Free) This allows you to train the model on the GPU. Turicreat is built on top of Apache's MXNet Framework, for us to use GPU we need to install a CUDA compatible MXNet package.

!pip install turicreate +!pip uninstall -y mxnet +!pip install mxnet-cu100==1.4.0.post0 +
+ +

If you do not wish to train on GPU or are running it on your computer, you can ignore the last two lines

Downloading the Dataset

!wget -q "https://github.com/joolsa/fake_real_news_dataset/raw/master/fake_or_real_news.csv.zip" +!unzip fake_or_real_news.csv.zip +
+ +

Model Creation

import turicreate as tc +tc.config.set_num_gpus(-1) # If you do not wish to use GPUs, set it to 0 +
+ +
dataSFrame = tc.SFrame('fake_or_real_news.csv') +
+ +

The dataset contains a column named "X1", which is of no use to us. Therefore, we simply drop it

dataSFrame.remove_column('X1') +
+ +

Splitting Dataset

train, test = dataSFrame.random_split(.9) +
+ +

Training

model = tc.text_classifier.create( + dataset=train, + target='label', + features=['title','text'] +) +
+ +
+-----------+----------+-----------+--------------+-------------------+---------------------+ +| Iteration | Passes | Step size | Elapsed Time | Training Accuracy | Validation Accuracy | ++-----------+----------+-----------+--------------+-------------------+---------------------+ +| 0 | 2 | 1.000000 | 1.156349 | 0.889680 | 0.790036 | +| 1 | 4 | 1.000000 | 1.359196 | 0.985952 | 0.918149 | +| 2 | 6 | 0.820091 | 1.557205 | 0.990260 | 0.914591 | +| 3 | 7 | 1.000000 | 1.684872 | 0.998689 | 0.925267 | +| 4 | 8 | 1.000000 | 1.814194 | 0.999063 | 0.925267 | +| 9 | 14 | 1.000000 | 2.507072 | 1.000000 | 0.911032 | ++-----------+----------+-----------+--------------+-------------------+---------------------+ +
+ +

Testing the Model

est_predictions = model.predict(test) +accuracy = tc.evaluation.accuracy(test['label'], test_predictions) +print(f'Topic classifier model has a testing accuracy of {accuracy*100}% ', flush=True) +
+ +
Topic classifier model has a testing accuracy of 92.3076923076923% +
+ +

We have just created our own Fake News Detection Model which has an accuracy of 92%!

example_text = {"title": ["Middling ‘Rise Of Skywalker’ Review Leaves Fan On Fence About Whether To Threaten To Kill Critic"], "text": ["Expressing ambivalence toward the relatively balanced appraisal of the film, Star Wars fan Miles Ariely admitted Thursday that an online publication’s middling review of The Rise Of Skywalker had left him on the fence about whether he would still threaten to kill the critic who wrote it. “I’m really of two minds about this, because on the one hand, he said the new movie fails to live up to the original trilogy, which makes me at least want to throw a brick through his window with a note telling him to watch his back,” said Ariely, confirming he had already drafted an eight-page-long death threat to Stan Corimer of the website Screen-On Time, but had not yet decided whether to post it to the reviewer’s Facebook page. “On the other hand, though, he commended J.J. Abrams’ skillful pacing and faithfulness to George Lucas’ vision, which makes me wonder if I should just call the whole thing off. Now, I really don’t feel like camping outside his house for hours. Maybe I could go with a response that’s somewhere in between, like, threatening to kill his dog but not everyone in his whole family? I don’t know. This is a tough one.” At press time, sources reported that Ariely had resolved to wear his Ewok costume while he murdered the critic in his sleep."]} +example_prediction = model.classify(tc.SFrame(example_text)) +print(example_prediction, flush=True) +
+ +
+-------+--------------------+ +| class | probability | ++-------+--------------------+ +| FAKE | 0.9245648658345308 | ++-------+--------------------+ +[1 rows x 2 columns] +
+ +

Exporting the Model

model_name = 'FakeNews' +coreml_model_name = model_name + '.mlmodel' +exportedModel = model.export_coreml(coreml_model_name) +
+ +

Note: To download files from Google Volab, simply click on the files section in the sidebar, right click on filename and then click on downlaod

Link to Colab Notebook

Building the App using SwiftUI

Initial Setup

First we create a single view app (make sure you check the use SwiftUI button)

Then we copy our .mlmodel file to our project (Just drag and drop the file in the XCode Files Sidebar)

Our ML Model does not take a string directly as an input, rather it takes bag of words as an input. DescriptionThe bag-of-words model is a simplifying representation used in NLP, in this text is represented as a bag of words, without any regatd of grammar or order, but noting multiplicity

We define our bag of words function

func bow(text: String) -> [String: Double] { + var bagOfWords = [String: Double]() + + let tagger = NSLinguisticTagger(tagSchemes: [.tokenType], options: 0) + let range = NSRange(location: 0, length: text.utf16.count) + let options: NSLinguisticTagger.Options = [.omitPunctuation, .omitWhitespace] + tagger.string = text + + tagger.enumerateTags(in: range, unit: .word, scheme: .tokenType, options: options) { _, tokenRange, _ in + let word = (text as NSString).substring(with: tokenRange) + if bagOfWords[word] != nil { + bagOfWords[word]! += 1 + } else { + bagOfWords[word] = 1 + } + } + + return bagOfWords + } +
+ +

We also declare our variables

@State private var title: String = "" +@State private var headline: String = "" +@State private var alertTitle = "" +@State private var alertText = "" +@State private var showingAlert = false +
+ +

Finally, we implement a simple function which reads the two text fields, creates their bag of words representation and displays an alert with the appropriate result

Complete Code

import SwiftUI + +struct ContentView: View { + @State private var title: String = "" + @State private var headline: String = "" + + @State private var alertTitle = "" + @State private var alertText = "" + @State private var showingAlert = false + + var body: some View { + NavigationView { + VStack(alignment: .leading) { + Text("Headline").font(.headline) + TextField("Please Enter Headline", text: $title) + .lineLimit(nil) + Text("Body").font(.headline) + TextField("Please Enter the content", text: $headline) + .lineLimit(nil) + } + .navigationBarTitle("Fake News Checker") + .navigationBarItems(trailing: + Button(action: classifyFakeNews) { + Text("Check") + }) + .padding() + .alert(isPresented: $showingAlert){ + Alert(title: Text(alertTitle), message: Text(alertText), dismissButton: .default(Text("OK"))) + } + } + + } + + func classifyFakeNews(){ + let model = FakeNews() + let myTitle = bow(text: title) + let myText = bow(text: headline) + do { + let prediction = try model.prediction(title: myTitle, text: myText) + alertTitle = prediction.label + alertText = "It is likely that this piece of news is \(prediction.label.lowercased())." + print(alertText) + } catch { + alertTitle = "Error" + alertText = "Sorry, could not classify if the input news was fake or not." + } + + showingAlert = true + } + func bow(text: String) -> [String: Double] { + var bagOfWords = [String: Double]() + + let tagger = NSLinguisticTagger(tagSchemes: [.tokenType], options: 0) + let range = NSRange(location: 0, length: text.utf16.count) + let options: NSLinguisticTagger.Options = [.omitPunctuation, .omitWhitespace] + tagger.string = text + + tagger.enumerateTags(in: range, unit: .word, scheme: .tokenType, options: options) { _, tokenRange, _ in + let word = (text as NSString).substring(with: tokenRange) + if bagOfWords[word] != nil { + bagOfWords[word]! += 1 + } else { + bagOfWords[word] = 1 + } + } + + return bagOfWords + } +} + +struct ContentView_Previews: PreviewProvider { + static var previews: some View { + ContentView() + } +} +
+ +
Tagged with:
\ No newline at end of file diff --git a/posts/2019-12-22-Fake-News-Detector/index 5.html b/posts/2019-12-22-Fake-News-Detector/index 5.html new file mode 100644 index 0000000..2a6cb7a --- /dev/null +++ b/posts/2019-12-22-Fake-News-Detector/index 5.html @@ -0,0 +1,173 @@ +Building a Fake News Detector with Turicreate | Navan Chauhan
6 minute readCreated on December 22, 2019Last modified on January 18, 2020

Building a Fake News Detector with Turicreate

In this tutorial we will build a fake news detecting app from scratch, using Turicreate for the machine learning model and SwiftUI for building the app

Note: These commands are written as if you are running a jupyter notebook.

Building the Machine Learning Model

Data Gathering

To build a classifier, you need a lot of data. George McIntire (GH: @joolsa) has created a wonderful dataset containing the headline, body and wheter it is fake or real. Whenever you are looking for a dataset, always try searching on Kaggle and GitHub before you start building your own

Dependencies

I used a Google Colab instance for training my model. If you also plan on using Google Colab then I reccomend choosing a GPU Instance (It is Free) This allows you to train the model on the GPU. Turicreat is built on top of Apache's MXNet Framework, for us to use GPU we need to install a CUDA compatible MXNet package.

!pip install turicreate +!pip uninstall -y mxnet +!pip install mxnet-cu100==1.4.0.post0 +
+ +

If you do not wish to train on GPU or are running it on your computer, you can ignore the last two lines

Downloading the Dataset

!wget -q "https://github.com/joolsa/fake_real_news_dataset/raw/master/fake_or_real_news.csv.zip" +!unzip fake_or_real_news.csv.zip +
+ +

Model Creation

import turicreate as tc +tc.config.set_num_gpus(-1) # If you do not wish to use GPUs, set it to 0 +
+ +
dataSFrame = tc.SFrame('fake_or_real_news.csv') +
+ +

The dataset contains a column named "X1", which is of no use to us. Therefore, we simply drop it

dataSFrame.remove_column('X1') +
+ +

Splitting Dataset

train, test = dataSFrame.random_split(.9) +
+ +

Training

model = tc.text_classifier.create( + dataset=train, + target='label', + features=['title','text'] +) +
+ +
+-----------+----------+-----------+--------------+-------------------+---------------------+ +| Iteration | Passes | Step size | Elapsed Time | Training Accuracy | Validation Accuracy | ++-----------+----------+-----------+--------------+-------------------+---------------------+ +| 0 | 2 | 1.000000 | 1.156349 | 0.889680 | 0.790036 | +| 1 | 4 | 1.000000 | 1.359196 | 0.985952 | 0.918149 | +| 2 | 6 | 0.820091 | 1.557205 | 0.990260 | 0.914591 | +| 3 | 7 | 1.000000 | 1.684872 | 0.998689 | 0.925267 | +| 4 | 8 | 1.000000 | 1.814194 | 0.999063 | 0.925267 | +| 9 | 14 | 1.000000 | 2.507072 | 1.000000 | 0.911032 | ++-----------+----------+-----------+--------------+-------------------+---------------------+ +
+ +

Testing the Model

est_predictions = model.predict(test) +accuracy = tc.evaluation.accuracy(test['label'], test_predictions) +print(f'Topic classifier model has a testing accuracy of {accuracy*100}% ', flush=True) +
+ +
Topic classifier model has a testing accuracy of 92.3076923076923% +
+ +

We have just created our own Fake News Detection Model which has an accuracy of 92%!

example_text = {"title": ["Middling ‘Rise Of Skywalker’ Review Leaves Fan On Fence About Whether To Threaten To Kill Critic"], "text": ["Expressing ambivalence toward the relatively balanced appraisal of the film, Star Wars fan Miles Ariely admitted Thursday that an online publication’s middling review of The Rise Of Skywalker had left him on the fence about whether he would still threaten to kill the critic who wrote it. “I’m really of two minds about this, because on the one hand, he said the new movie fails to live up to the original trilogy, which makes me at least want to throw a brick through his window with a note telling him to watch his back,” said Ariely, confirming he had already drafted an eight-page-long death threat to Stan Corimer of the website Screen-On Time, but had not yet decided whether to post it to the reviewer’s Facebook page. “On the other hand, though, he commended J.J. Abrams’ skillful pacing and faithfulness to George Lucas’ vision, which makes me wonder if I should just call the whole thing off. Now, I really don’t feel like camping outside his house for hours. Maybe I could go with a response that’s somewhere in between, like, threatening to kill his dog but not everyone in his whole family? I don’t know. This is a tough one.” At press time, sources reported that Ariely had resolved to wear his Ewok costume while he murdered the critic in his sleep."]} +example_prediction = model.classify(tc.SFrame(example_text)) +print(example_prediction, flush=True) +
+ +
+-------+--------------------+ +| class | probability | ++-------+--------------------+ +| FAKE | 0.9245648658345308 | ++-------+--------------------+ +[1 rows x 2 columns] +
+ +

Exporting the Model

model_name = 'FakeNews' +coreml_model_name = model_name + '.mlmodel' +exportedModel = model.export_coreml(coreml_model_name) +
+ +

Note: To download files from Google Volab, simply click on the files section in the sidebar, right click on filename and then click on downlaod

Link to Colab Notebook

Building the App using SwiftUI

Initial Setup

First we create a single view app (make sure you check the use SwiftUI button)

Then we copy our .mlmodel file to our project (Just drag and drop the file in the XCode Files Sidebar)

Our ML Model does not take a string directly as an input, rather it takes bag of words as an input. DescriptionThe bag-of-words model is a simplifying representation used in NLP, in this text is represented as a bag of words, without any regatd of grammar or order, but noting multiplicity

We define our bag of words function

func bow(text: String) -> [String: Double] { + var bagOfWords = [String: Double]() + + let tagger = NSLinguisticTagger(tagSchemes: [.tokenType], options: 0) + let range = NSRange(location: 0, length: text.utf16.count) + let options: NSLinguisticTagger.Options = [.omitPunctuation, .omitWhitespace] + tagger.string = text + + tagger.enumerateTags(in: range, unit: .word, scheme: .tokenType, options: options) { _, tokenRange, _ in + let word = (text as NSString).substring(with: tokenRange) + if bagOfWords[word] != nil { + bagOfWords[word]! += 1 + } else { + bagOfWords[word] = 1 + } + } + + return bagOfWords + } +
+ +

We also declare our variables

@State private var title: String = "" +@State private var headline: String = "" +@State private var alertTitle = "" +@State private var alertText = "" +@State private var showingAlert = false +
+ +

Finally, we implement a simple function which reads the two text fields, creates their bag of words representation and displays an alert with the appropriate result

Complete Code

import SwiftUI + +struct ContentView: View { + @State private var title: String = "" + @State private var headline: String = "" + + @State private var alertTitle = "" + @State private var alertText = "" + @State private var showingAlert = false + + var body: some View { + NavigationView { + VStack(alignment: .leading) { + Text("Headline").font(.headline) + TextField("Please Enter Headline", text: $title) + .lineLimit(nil) + Text("Body").font(.headline) + TextField("Please Enter the content", text: $headline) + .lineLimit(nil) + } + .navigationBarTitle("Fake News Checker") + .navigationBarItems(trailing: + Button(action: classifyFakeNews) { + Text("Check") + }) + .padding() + .alert(isPresented: $showingAlert){ + Alert(title: Text(alertTitle), message: Text(alertText), dismissButton: .default(Text("OK"))) + } + } + + } + + func classifyFakeNews(){ + let model = FakeNews() + let myTitle = bow(text: title) + let myText = bow(text: headline) + do { + let prediction = try model.prediction(title: myTitle, text: myText) + alertTitle = prediction.label + alertText = "It is likely that this piece of news is \(prediction.label.lowercased())." + print(alertText) + } catch { + alertTitle = "Error" + alertText = "Sorry, could not classify if the input news was fake or not." + } + + showingAlert = true + } + func bow(text: String) -> [String: Double] { + var bagOfWords = [String: Double]() + + let tagger = NSLinguisticTagger(tagSchemes: [.tokenType], options: 0) + let range = NSRange(location: 0, length: text.utf16.count) + let options: NSLinguisticTagger.Options = [.omitPunctuation, .omitWhitespace] + tagger.string = text + + tagger.enumerateTags(in: range, unit: .word, scheme: .tokenType, options: options) { _, tokenRange, _ in + let word = (text as NSString).substring(with: tokenRange) + if bagOfWords[word] != nil { + bagOfWords[word]! += 1 + } else { + bagOfWords[word] = 1 + } + } + + return bagOfWords + } +} + +struct ContentView_Previews: PreviewProvider { + static var previews: some View { + ContentView() + } +} +
+ +
Tagged with:
\ No newline at end of file diff --git a/posts/2020-01-14-Converting-between-PIL-NumPy/index 2.html b/posts/2020-01-14-Converting-between-PIL-NumPy/index 2.html new file mode 100644 index 0000000..b2ba6be --- /dev/null +++ b/posts/2020-01-14-Converting-between-PIL-NumPy/index 2.html @@ -0,0 +1,19 @@ +Converting between image and NumPy array | Navan Chauhan
0 minute readCreated on January 14, 2020Last modified on January 18, 2020

Converting between image and NumPy array

import numpy +import PIL + +# Convert PIL Image to NumPy array +img = PIL.Image.open("foo.jpg") +arr = numpy.array(img) + +# Convert array to Image +img = PIL.Image.fromarray(arr) +
+ +

Saving an Image

try: + img.save(destination, "JPEG", quality=80, optimize=True, progressive=True) +except IOError: + PIL.ImageFile.MAXBLOCK = img.size[0] * img.size[1] + img.save(destination, "JPEG", quality=80, optimize=True, progressive=True) +
+ +
Tagged with:
\ No newline at end of file diff --git a/posts/2020-01-14-Converting-between-PIL-NumPy/index 5.html b/posts/2020-01-14-Converting-between-PIL-NumPy/index 5.html new file mode 100644 index 0000000..b2ba6be --- /dev/null +++ b/posts/2020-01-14-Converting-between-PIL-NumPy/index 5.html @@ -0,0 +1,19 @@ +Converting between image and NumPy array | Navan Chauhan
0 minute readCreated on January 14, 2020Last modified on January 18, 2020

Converting between image and NumPy array

import numpy +import PIL + +# Convert PIL Image to NumPy array +img = PIL.Image.open("foo.jpg") +arr = numpy.array(img) + +# Convert array to Image +img = PIL.Image.fromarray(arr) +
+ +

Saving an Image

try: + img.save(destination, "JPEG", quality=80, optimize=True, progressive=True) +except IOError: + PIL.ImageFile.MAXBLOCK = img.size[0] * img.size[1] + img.save(destination, "JPEG", quality=80, optimize=True, progressive=True) +
+ +
Tagged with:
\ No newline at end of file diff --git a/posts/2020-01-15-Setting-up-Kaggle-to-use-with-Colab/index 2.html b/posts/2020-01-15-Setting-up-Kaggle-to-use-with-Colab/index 2.html new file mode 100644 index 0000000..ea6a41c --- /dev/null +++ b/posts/2020-01-15-Setting-up-Kaggle-to-use-with-Colab/index 2.html @@ -0,0 +1,9 @@ +Setting up Kaggle to use with Google Colab | Navan Chauhan
1 minute readCreated on January 15, 2020Last modified on January 19, 2020

Setting up Kaggle to use with Google Colab

In order to be able to access Kaggle Datasets, you will need to have an account on Kaggle (which is Free)

Grabbing Our Tokens

Go to Kaggle

Click on your User Profile and Click on My Account

Scroll Down untill you see Create New API Token

This will download your token as a JSON file

Copy the File to the root folder of your Google Drive

Setting up Colab

Mounting Google Drive

import os +from google.colab import drive +drive.mount('/content/drive') +
+ +

After this click on the URL in the output section, login and then paste the Auth Code

Configuring Kaggle

os.environ['KAGGLE_CONFIG_DIR'] = "/content/drive/My Drive/" +
+ +

Voila! You can now download kaggel datasets

Tagged with:
\ No newline at end of file diff --git a/posts/2020-01-15-Setting-up-Kaggle-to-use-with-Colab/index 5.html b/posts/2020-01-15-Setting-up-Kaggle-to-use-with-Colab/index 5.html new file mode 100644 index 0000000..ea6a41c --- /dev/null +++ b/posts/2020-01-15-Setting-up-Kaggle-to-use-with-Colab/index 5.html @@ -0,0 +1,9 @@ +Setting up Kaggle to use with Google Colab | Navan Chauhan
1 minute readCreated on January 15, 2020Last modified on January 19, 2020

Setting up Kaggle to use with Google Colab

In order to be able to access Kaggle Datasets, you will need to have an account on Kaggle (which is Free)

Grabbing Our Tokens

Go to Kaggle

Click on your User Profile and Click on My Account

Scroll Down untill you see Create New API Token

This will download your token as a JSON file

Copy the File to the root folder of your Google Drive

Setting up Colab

Mounting Google Drive

import os +from google.colab import drive +drive.mount('/content/drive') +
+ +

After this click on the URL in the output section, login and then paste the Auth Code

Configuring Kaggle

os.environ['KAGGLE_CONFIG_DIR'] = "/content/drive/My Drive/" +
+ +

Voila! You can now download kaggel datasets

Tagged with:
\ No newline at end of file diff --git a/posts/2020-01-16-Image-Classifier-Using-Turicreate/index 2.html b/posts/2020-01-16-Image-Classifier-Using-Turicreate/index 2.html new file mode 100644 index 0000000..187a8d2 --- /dev/null +++ b/posts/2020-01-16-Image-Classifier-Using-Turicreate/index 2.html @@ -0,0 +1,213 @@ +Creating a Custom Image Classifier using Turicreate to detect Smoke and Fire | Navan Chauhan
6 minute readCreated on January 16, 2020Last modified on January 19, 2020

Creating a Custom Image Classifier using Turicreate to detect Smoke and Fire

For setting up Kaggle with Google Colab, please refer to my previous post

Dataset

Mounting Google Drive

import os +from google.colab import drive +drive.mount('/content/drive') +
+ +

Downloading Dataset from Kaggle

os.environ['KAGGLE_CONFIG_DIR'] = "/content/drive/My Drive/" +!kaggle datasets download ashutosh69/fire-and-smoke-dataset +!unzip "fire-and-smoke-dataset.zip" +
+ +

Pre-Processing

!mkdir default smoke fire +
+ +


!ls data/data/img_data/train/default/*.jpg +
+ +


img_1002.jpg img_20.jpg img_519.jpg img_604.jpg img_80.jpg +img_1003.jpg img_21.jpg img_51.jpg img_60.jpg img_8.jpg +img_1007.jpg img_22.jpg img_520.jpg img_61.jpg img_900.jpg +img_100.jpg img_23.jpg img_521.jpg 'img_62 (2).jpg' img_920.jpg +img_1014.jpg img_24.jpg 'img_52 (2).jpg' img_62.jpg img_921.jpg +img_1018.jpg img_29.jpg img_522.jpg 'img_63 (2).jpg' img_922.jpg +img_101.jpg img_3000.jpg img_523.jpg img_63.jpg img_923.jpg +img_1027.jpg img_335.jpg img_524.jpg img_66.jpg img_924.jpg +img_102.jpg img_336.jpg img_52.jpg img_67.jpg img_925.jpg +img_1042.jpg img_337.jpg img_530.jpg img_68.jpg img_926.jpg +img_1043.jpg img_338.jpg img_531.jpg img_700.jpg img_927.jpg +img_1046.jpg img_339.jpg 'img_53 (2).jpg' img_701.jpg img_928.jpg +img_1052.jpg img_340.jpg img_532.jpg img_702.jpg img_929.jpg +img_107.jpg img_341.jpg img_533.jpg img_703.jpg img_930.jpg +img_108.jpg img_3.jpg img_537.jpg img_704.jpg img_931.jpg +img_109.jpg img_400.jpg img_538.jpg img_705.jpg img_932.jpg +img_10.jpg img_471.jpg img_539.jpg img_706.jpg img_933.jpg +img_118.jpg img_472.jpg img_53.jpg img_707.jpg img_934.jpg +img_12.jpg img_473.jpg img_540.jpg img_708.jpg img_935.jpg +img_14.jpg img_488.jpg img_541.jpg img_709.jpg img_938.jpg +img_15.jpg img_489.jpg 'img_54 (2).jpg' img_70.jpg img_958.jpg +img_16.jpg img_490.jpg img_542.jpg img_710.jpg img_971.jpg +img_17.jpg img_491.jpg img_543.jpg 'img_71 (2).jpg' img_972.jpg +img_18.jpg img_492.jpg img_54.jpg img_71.jpg img_973.jpg +img_19.jpg img_493.jpg 'img_55 (2).jpg' img_72.jpg img_974.jpg +img_1.jpg img_494.jpg img_55.jpg img_73.jpg img_975.jpg +img_200.jpg img_495.jpg img_56.jpg img_74.jpg img_980.jpg +img_201.jpg img_496.jpg img_57.jpg img_75.jpg img_988.jpg +img_202.jpg img_497.jpg img_58.jpg img_76.jpg img_9.jpg +img_203.jpg img_4.jpg img_59.jpg img_77.jpg +img_204.jpg img_501.jpg img_601.jpg img_78.jpg +img_205.jpg img_502.jpg img_602.jpg img_79.jpg +img_206.jpg img_50.jpg img_603.jpg img_7.jpg +
+ +

The image files are not actually JPEG, thus we first need to save them in the correct format for Turicreate

from PIL import Image +import glob + + +folders = ["default","smoke","fire"] +for folder in folders: + n = 1 + for file in glob.glob("./data/data/img_data/train/" + folder + "/*.jpg"): + im = Image.open(file) + rgb_im = im.convert('RGB') + rgb_im.save((folder + "/" + str(n) + ".jpg"), quality=100) + n +=1 + for file in glob.glob("./data/data/img_data/train/" + folder + "/*.jpg"): + im = Image.open(file) + rgb_im = im.convert('RGB') + rgb_im.save((folder + "/" + str(n) + ".jpg"), quality=100) + n +=1 +
+ +


!mkdir train +!mv default ./train +!mv smoke ./train +!mv fire ./train +
+ +

Making the Image Classifier

Making an SFrame

!pip install turicreate +
+ +


import turicreate as tc +import os + +data = tc.image_analysis.load_images("./train", with_path=True) + +data["label"] = data["path"].apply(lambda path: os.path.basename(os.path.dirname(path))) + +print(data) + +data.save('fire-smoke.sframe') +
+ +


+-------------------------+------------------------+ +| path | image | ++-------------------------+------------------------+ +| ./train/default/1.jpg | Height: 224 Width: 224 | +| ./train/default/10.jpg | Height: 224 Width: 224 | +| ./train/default/100.jpg | Height: 224 Width: 224 | +| ./train/default/101.jpg | Height: 224 Width: 224 | +| ./train/default/102.jpg | Height: 224 Width: 224 | +| ./train/default/103.jpg | Height: 224 Width: 224 | +| ./train/default/104.jpg | Height: 224 Width: 224 | +| ./train/default/105.jpg | Height: 224 Width: 224 | +| ./train/default/106.jpg | Height: 224 Width: 224 | +| ./train/default/107.jpg | Height: 224 Width: 224 | ++-------------------------+------------------------+ +[2028 rows x 2 columns] +Note: Only the head of the SFrame is printed. +You can use print_rows(num_rows=m, num_columns=n) to print more rows and columns. ++-------------------------+------------------------+---------+ +| path | image | label | ++-------------------------+------------------------+---------+ +| ./train/default/1.jpg | Height: 224 Width: 224 | default | +| ./train/default/10.jpg | Height: 224 Width: 224 | default | +| ./train/default/100.jpg | Height: 224 Width: 224 | default | +| ./train/default/101.jpg | Height: 224 Width: 224 | default | +| ./train/default/102.jpg | Height: 224 Width: 224 | default | +| ./train/default/103.jpg | Height: 224 Width: 224 | default | +| ./train/default/104.jpg | Height: 224 Width: 224 | default | +| ./train/default/105.jpg | Height: 224 Width: 224 | default | +| ./train/default/106.jpg | Height: 224 Width: 224 | default | +| ./train/default/107.jpg | Height: 224 Width: 224 | default | ++-------------------------+------------------------+---------+ +[2028 rows x 3 columns] +Note: Only the head of the SFrame is printed. +You can use print_rows(num_rows=m, num_columns=n) to print more rows and columns. +
+ +

Making the Model

import turicreate as tc + +# Load the data +data = tc.SFrame('fire-smoke.sframe') + +# Make a train-test split +train_data, test_data = data.random_split(0.8) + +# Create the model +model = tc.image_classifier.create(train_data, target='label') + +# Save predictions to an SArray +predictions = model.predict(test_data) + +# Evaluate the model and print the results +metrics = model.evaluate(test_data) +print(metrics['accuracy']) + +# Save the model for later use in Turi Create +model.save('fire-smoke.model') + +# Export for use in Core ML +model.export_coreml('fire-smoke.mlmodel') +
+ +


Performing feature extraction on resized images... +Completed 64/1633 +Completed 128/1633 +Completed 192/1633 +Completed 256/1633 +Completed 320/1633 +Completed 384/1633 +Completed 448/1633 +Completed 512/1633 +Completed 576/1633 +Completed 640/1633 +Completed 704/1633 +Completed 768/1633 +Completed 832/1633 +Completed 896/1633 +Completed 960/1633 +Completed 1024/1633 +Completed 1088/1633 +Completed 1152/1633 +Completed 1216/1633 +Completed 1280/1633 +Completed 1344/1633 +Completed 1408/1633 +Completed 1472/1633 +Completed 1536/1633 +Completed 1600/1633 +Completed 1633/1633 +PROGRESS: Creating a validation set from 5 percent of training data. This may take a while. + You can set ``validation_set=None`` to disable validation tracking. + +Logistic regression: +-------------------------------------------------------- +Number of examples : 1551 +Number of classes : 3 +Number of feature columns : 1 +Number of unpacked features : 2048 +Number of coefficients : 4098 +Starting L-BFGS +-------------------------------------------------------- ++-----------+----------+-----------+--------------+-------------------+---------------------+ +| Iteration | Passes | Step size | Elapsed Time | Training Accuracy | Validation Accuracy | ++-----------+----------+-----------+--------------+-------------------+---------------------+ +| 0 | 6 | 0.018611 | 0.891830 | 0.553836 | 0.560976 | +| 1 | 10 | 0.390832 | 1.622383 | 0.744681 | 0.792683 | +| 2 | 11 | 0.488541 | 1.943987 | 0.733075 | 0.804878 | +| 3 | 14 | 2.442703 | 2.512545 | 0.727917 | 0.841463 | +| 4 | 15 | 2.442703 | 2.826964 | 0.861380 | 0.853659 | +| 9 | 28 | 2.340435 | 5.492035 | 0.941328 | 0.975610 | ++-----------+----------+-----------+--------------+-------------------+---------------------+ +Performing feature extraction on resized images... +Completed 64/395 +Completed 128/395 +Completed 192/395 +Completed 256/395 +Completed 320/395 +Completed 384/395 +Completed 395/395 +0.9316455696202531 +
+ +

We just got an accuracy of 94% on Training Data and 97% on Validation Data!

Tagged with:
\ No newline at end of file diff --git a/posts/2020-01-16-Image-Classifier-Using-Turicreate/index 5.html b/posts/2020-01-16-Image-Classifier-Using-Turicreate/index 5.html new file mode 100644 index 0000000..187a8d2 --- /dev/null +++ b/posts/2020-01-16-Image-Classifier-Using-Turicreate/index 5.html @@ -0,0 +1,213 @@ +Creating a Custom Image Classifier using Turicreate to detect Smoke and Fire | Navan Chauhan
6 minute readCreated on January 16, 2020Last modified on January 19, 2020

Creating a Custom Image Classifier using Turicreate to detect Smoke and Fire

For setting up Kaggle with Google Colab, please refer to my previous post

Dataset

Mounting Google Drive

import os +from google.colab import drive +drive.mount('/content/drive') +
+ +

Downloading Dataset from Kaggle

os.environ['KAGGLE_CONFIG_DIR'] = "/content/drive/My Drive/" +!kaggle datasets download ashutosh69/fire-and-smoke-dataset +!unzip "fire-and-smoke-dataset.zip" +
+ +

Pre-Processing

!mkdir default smoke fire +
+ +


!ls data/data/img_data/train/default/*.jpg +
+ +


img_1002.jpg img_20.jpg img_519.jpg img_604.jpg img_80.jpg +img_1003.jpg img_21.jpg img_51.jpg img_60.jpg img_8.jpg +img_1007.jpg img_22.jpg img_520.jpg img_61.jpg img_900.jpg +img_100.jpg img_23.jpg img_521.jpg 'img_62 (2).jpg' img_920.jpg +img_1014.jpg img_24.jpg 'img_52 (2).jpg' img_62.jpg img_921.jpg +img_1018.jpg img_29.jpg img_522.jpg 'img_63 (2).jpg' img_922.jpg +img_101.jpg img_3000.jpg img_523.jpg img_63.jpg img_923.jpg +img_1027.jpg img_335.jpg img_524.jpg img_66.jpg img_924.jpg +img_102.jpg img_336.jpg img_52.jpg img_67.jpg img_925.jpg +img_1042.jpg img_337.jpg img_530.jpg img_68.jpg img_926.jpg +img_1043.jpg img_338.jpg img_531.jpg img_700.jpg img_927.jpg +img_1046.jpg img_339.jpg 'img_53 (2).jpg' img_701.jpg img_928.jpg +img_1052.jpg img_340.jpg img_532.jpg img_702.jpg img_929.jpg +img_107.jpg img_341.jpg img_533.jpg img_703.jpg img_930.jpg +img_108.jpg img_3.jpg img_537.jpg img_704.jpg img_931.jpg +img_109.jpg img_400.jpg img_538.jpg img_705.jpg img_932.jpg +img_10.jpg img_471.jpg img_539.jpg img_706.jpg img_933.jpg +img_118.jpg img_472.jpg img_53.jpg img_707.jpg img_934.jpg +img_12.jpg img_473.jpg img_540.jpg img_708.jpg img_935.jpg +img_14.jpg img_488.jpg img_541.jpg img_709.jpg img_938.jpg +img_15.jpg img_489.jpg 'img_54 (2).jpg' img_70.jpg img_958.jpg +img_16.jpg img_490.jpg img_542.jpg img_710.jpg img_971.jpg +img_17.jpg img_491.jpg img_543.jpg 'img_71 (2).jpg' img_972.jpg +img_18.jpg img_492.jpg img_54.jpg img_71.jpg img_973.jpg +img_19.jpg img_493.jpg 'img_55 (2).jpg' img_72.jpg img_974.jpg +img_1.jpg img_494.jpg img_55.jpg img_73.jpg img_975.jpg +img_200.jpg img_495.jpg img_56.jpg img_74.jpg img_980.jpg +img_201.jpg img_496.jpg img_57.jpg img_75.jpg img_988.jpg +img_202.jpg img_497.jpg img_58.jpg img_76.jpg img_9.jpg +img_203.jpg img_4.jpg img_59.jpg img_77.jpg +img_204.jpg img_501.jpg img_601.jpg img_78.jpg +img_205.jpg img_502.jpg img_602.jpg img_79.jpg +img_206.jpg img_50.jpg img_603.jpg img_7.jpg +
+ +

The image files are not actually JPEG, thus we first need to save them in the correct format for Turicreate

from PIL import Image +import glob + + +folders = ["default","smoke","fire"] +for folder in folders: + n = 1 + for file in glob.glob("./data/data/img_data/train/" + folder + "/*.jpg"): + im = Image.open(file) + rgb_im = im.convert('RGB') + rgb_im.save((folder + "/" + str(n) + ".jpg"), quality=100) + n +=1 + for file in glob.glob("./data/data/img_data/train/" + folder + "/*.jpg"): + im = Image.open(file) + rgb_im = im.convert('RGB') + rgb_im.save((folder + "/" + str(n) + ".jpg"), quality=100) + n +=1 +
+ +


!mkdir train +!mv default ./train +!mv smoke ./train +!mv fire ./train +
+ +

Making the Image Classifier

Making an SFrame

!pip install turicreate +
+ +


import turicreate as tc +import os + +data = tc.image_analysis.load_images("./train", with_path=True) + +data["label"] = data["path"].apply(lambda path: os.path.basename(os.path.dirname(path))) + +print(data) + +data.save('fire-smoke.sframe') +
+ +


+-------------------------+------------------------+ +| path | image | ++-------------------------+------------------------+ +| ./train/default/1.jpg | Height: 224 Width: 224 | +| ./train/default/10.jpg | Height: 224 Width: 224 | +| ./train/default/100.jpg | Height: 224 Width: 224 | +| ./train/default/101.jpg | Height: 224 Width: 224 | +| ./train/default/102.jpg | Height: 224 Width: 224 | +| ./train/default/103.jpg | Height: 224 Width: 224 | +| ./train/default/104.jpg | Height: 224 Width: 224 | +| ./train/default/105.jpg | Height: 224 Width: 224 | +| ./train/default/106.jpg | Height: 224 Width: 224 | +| ./train/default/107.jpg | Height: 224 Width: 224 | ++-------------------------+------------------------+ +[2028 rows x 2 columns] +Note: Only the head of the SFrame is printed. +You can use print_rows(num_rows=m, num_columns=n) to print more rows and columns. ++-------------------------+------------------------+---------+ +| path | image | label | ++-------------------------+------------------------+---------+ +| ./train/default/1.jpg | Height: 224 Width: 224 | default | +| ./train/default/10.jpg | Height: 224 Width: 224 | default | +| ./train/default/100.jpg | Height: 224 Width: 224 | default | +| ./train/default/101.jpg | Height: 224 Width: 224 | default | +| ./train/default/102.jpg | Height: 224 Width: 224 | default | +| ./train/default/103.jpg | Height: 224 Width: 224 | default | +| ./train/default/104.jpg | Height: 224 Width: 224 | default | +| ./train/default/105.jpg | Height: 224 Width: 224 | default | +| ./train/default/106.jpg | Height: 224 Width: 224 | default | +| ./train/default/107.jpg | Height: 224 Width: 224 | default | ++-------------------------+------------------------+---------+ +[2028 rows x 3 columns] +Note: Only the head of the SFrame is printed. +You can use print_rows(num_rows=m, num_columns=n) to print more rows and columns. +
+ +

Making the Model

import turicreate as tc + +# Load the data +data = tc.SFrame('fire-smoke.sframe') + +# Make a train-test split +train_data, test_data = data.random_split(0.8) + +# Create the model +model = tc.image_classifier.create(train_data, target='label') + +# Save predictions to an SArray +predictions = model.predict(test_data) + +# Evaluate the model and print the results +metrics = model.evaluate(test_data) +print(metrics['accuracy']) + +# Save the model for later use in Turi Create +model.save('fire-smoke.model') + +# Export for use in Core ML +model.export_coreml('fire-smoke.mlmodel') +
+ +


Performing feature extraction on resized images... +Completed 64/1633 +Completed 128/1633 +Completed 192/1633 +Completed 256/1633 +Completed 320/1633 +Completed 384/1633 +Completed 448/1633 +Completed 512/1633 +Completed 576/1633 +Completed 640/1633 +Completed 704/1633 +Completed 768/1633 +Completed 832/1633 +Completed 896/1633 +Completed 960/1633 +Completed 1024/1633 +Completed 1088/1633 +Completed 1152/1633 +Completed 1216/1633 +Completed 1280/1633 +Completed 1344/1633 +Completed 1408/1633 +Completed 1472/1633 +Completed 1536/1633 +Completed 1600/1633 +Completed 1633/1633 +PROGRESS: Creating a validation set from 5 percent of training data. This may take a while. + You can set ``validation_set=None`` to disable validation tracking. + +Logistic regression: +-------------------------------------------------------- +Number of examples : 1551 +Number of classes : 3 +Number of feature columns : 1 +Number of unpacked features : 2048 +Number of coefficients : 4098 +Starting L-BFGS +-------------------------------------------------------- ++-----------+----------+-----------+--------------+-------------------+---------------------+ +| Iteration | Passes | Step size | Elapsed Time | Training Accuracy | Validation Accuracy | ++-----------+----------+-----------+--------------+-------------------+---------------------+ +| 0 | 6 | 0.018611 | 0.891830 | 0.553836 | 0.560976 | +| 1 | 10 | 0.390832 | 1.622383 | 0.744681 | 0.792683 | +| 2 | 11 | 0.488541 | 1.943987 | 0.733075 | 0.804878 | +| 3 | 14 | 2.442703 | 2.512545 | 0.727917 | 0.841463 | +| 4 | 15 | 2.442703 | 2.826964 | 0.861380 | 0.853659 | +| 9 | 28 | 2.340435 | 5.492035 | 0.941328 | 0.975610 | ++-----------+----------+-----------+--------------+-------------------+---------------------+ +Performing feature extraction on resized images... +Completed 64/395 +Completed 128/395 +Completed 192/395 +Completed 256/395 +Completed 320/395 +Completed 384/395 +Completed 395/395 +0.9316455696202531 +
+ +

We just got an accuracy of 94% on Training Data and 97% on Validation Data!

Tagged with:
\ No newline at end of file diff --git a/posts/2020-01-19-Connect-To-Bluetooth-Devices-Linux-Terminal/index 2.html b/posts/2020-01-19-Connect-To-Bluetooth-Devices-Linux-Terminal/index 2.html new file mode 100644 index 0000000..947a039 --- /dev/null +++ b/posts/2020-01-19-Connect-To-Bluetooth-Devices-Linux-Terminal/index 2.html @@ -0,0 +1 @@ +How to setup Bluetooth on a Raspberry Pi | Navan Chauhan
0 minute readCreated on January 19, 2020Last modified on January 20, 2020

How to setup Bluetooth on a Raspberry Pi

This was tested on a Raspberry Pi Zero W

Enter in the Bluetooth Mode

pi@raspberrypi:~ $ bluetoothctl

[bluetooth]# agent on

[bluetooth]# default-agent

[bluetooth]# scan on

To Pair

While being in bluetooth mode

[bluetooth]# pair XX:XX:XX:XX:XX:XX

To Exit out of bluetoothctl anytime, just type exit

Tagged with:
\ No newline at end of file diff --git a/posts/2020-01-19-Connect-To-Bluetooth-Devices-Linux-Terminal/index 5.html b/posts/2020-01-19-Connect-To-Bluetooth-Devices-Linux-Terminal/index 5.html new file mode 100644 index 0000000..947a039 --- /dev/null +++ b/posts/2020-01-19-Connect-To-Bluetooth-Devices-Linux-Terminal/index 5.html @@ -0,0 +1 @@ +How to setup Bluetooth on a Raspberry Pi | Navan Chauhan
0 minute readCreated on January 19, 2020Last modified on January 20, 2020

How to setup Bluetooth on a Raspberry Pi

This was tested on a Raspberry Pi Zero W

Enter in the Bluetooth Mode

pi@raspberrypi:~ $ bluetoothctl

[bluetooth]# agent on

[bluetooth]# default-agent

[bluetooth]# scan on

To Pair

While being in bluetooth mode

[bluetooth]# pair XX:XX:XX:XX:XX:XX

To Exit out of bluetoothctl anytime, just type exit

Tagged with:
\ No newline at end of file diff --git a/posts/2020-03-02-Open-Peeps/index 2.html b/posts/2020-03-02-Open-Peeps/index 2.html new file mode 100644 index 0000000..7478100 --- /dev/null +++ b/posts/2020-03-02-Open-Peeps/index 2.html @@ -0,0 +1 @@ +Open Peeps | Navan Chauhan
0 minute readCreated on March 2, 2020

Open Peeps

About Open Peeps

Open Peeps is a hand-drawn illustration library to create scenes of people. You can use them in product illustration, marketing, comics, product states, user flows, personas, storyboarding, quinceañera invitations, or whatever you want! - Product Hunt

Some Examples

Example 1
Tagged with:
\ No newline at end of file diff --git a/posts/2020-03-02-Open-Peeps/index 4.html b/posts/2020-03-02-Open-Peeps/index 4.html new file mode 100644 index 0000000..37ce3e7 --- /dev/null +++ b/posts/2020-03-02-Open-Peeps/index 4.html @@ -0,0 +1,4 @@ +Open Peeps | Navan Chauhan
0 minute readCreated on March 2, 2020

Open Peeps

About Open Peeps

Open Peeps is a hand-drawn illustration library to create scenes of people. You can use them in product illustration, marketing, comics, product states, user flows, personas, storyboarding, quinceañera invitations, or whatever you want! - Product Hunt

Some Examples

+ + +
Tagged with:
\ No newline at end of file diff --git a/posts/2020-03-02-Open-Peeps/index.html b/posts/2020-03-02-Open-Peeps/index.html new file mode 100644 index 0000000..4fdf8f4 --- /dev/null +++ b/posts/2020-03-02-Open-Peeps/index.html @@ -0,0 +1,4 @@ +Open Peeps | Navan Chauhan
0 minute readCreated on March 2, 2020

Open Peeps

About Open Peeps

Open Peeps is a hand-drawn illustration library to create scenes of people. You can use them in product illustration, marketing, comics, product states, user flows, personas, storyboarding, quinceañera invitations, or whatever you want! - Product Hunt

Some Examples

Standing

+ + +
Tagged with:
\ No newline at end of file diff --git a/posts/hello-world/index 2.html b/posts/hello-world/index 2.html new file mode 100644 index 0000000..d72ca21 --- /dev/null +++ b/posts/hello-world/index 2.html @@ -0,0 +1 @@ +Hello World | Navan Chauhan
0 minute readCreated on April 16, 2019Last modified on January 4, 2020

Hello World

Why a Hello World post?

Just re-did the entire website using Publish (Publish by John Sundell). So, a new hello world post :)

Tagged with:
\ No newline at end of file diff --git a/posts/hello-world/index 5.html b/posts/hello-world/index 5.html new file mode 100644 index 0000000..d72ca21 --- /dev/null +++ b/posts/hello-world/index 5.html @@ -0,0 +1 @@ +Hello World | Navan Chauhan
0 minute readCreated on April 16, 2019Last modified on January 4, 2020

Hello World

Why a Hello World post?

Just re-did the entire website using Publish (Publish by John Sundell). So, a new hello world post :)

Tagged with:
\ No newline at end of file diff --git a/posts/index 4.html b/posts/index 4.html new file mode 100644 index 0000000..e64d4e2 --- /dev/null +++ b/posts/index 4.html @@ -0,0 +1 @@ +Posts | Navan Chauhan

Posts

Tips, tricks and tutorials which I think might be useful.

\ No newline at end of file diff --git a/posts/index.html b/posts/index.html index ab47a84..e64d4e2 100644 --- a/posts/index.html +++ b/posts/index.html @@ -1 +1 @@ -Posts | Navan Chauhan

Posts

Tips, tricks and tutorials which I think might be useful.

\ No newline at end of file +Posts | Navan Chauhan

Posts

Tips, tricks and tutorials which I think might be useful.

\ No newline at end of file diff --git a/publications/2019-05-14-Detecting-Driver-Fatigue-Over-Speeding-and-Speeding-up-Post-Accident-Response/index 4.html b/publications/2019-05-14-Detecting-Driver-Fatigue-Over-Speeding-and-Speeding-up-Post-Accident-Response/index 4.html new file mode 100644 index 0000000..00347ab --- /dev/null +++ b/publications/2019-05-14-Detecting-Driver-Fatigue-Over-Speeding-and-Speeding-up-Post-Accident-Response/index 4.html @@ -0,0 +1,7 @@ +Detecting Driver Fatigue, Over-Speeding, and Speeding up Post-Accident Response | Navan Chauhan
1 minute readCreated on May 14, 2019Last modified on January 4, 2020

Detecting Driver Fatigue, Over-Speeding, and Speeding up Post-Accident Response

Based on the project showcased at Toyota Hackathon, IITD - 17/18th December 2018

Download paper here

Recommended citation:

ATP

Chauhan, N. (2019). &quot;Detecting Driver Fatigue, Over-Speeding, and Speeding up Post-Accident Response.&quot; <i>International Research Journal of Engineering and Technology (IRJET), 6(5)</i>. +
+ +

BibTeX

@article{chauhan_2019, title={Detecting Driver Fatigue, Over-Speeding, and Speeding up Post-Accident Response}, volume={6}, url={https://www.irjet.net/archives/V6/i5/IRJET-V6I5318.pdf}, number={5}, journal={International Research Journal of Engineering and Technology (IRJET)}, author={Chauhan, Navan}, year={2019}} +
+ +
Tagged with:
\ No newline at end of file diff --git a/publications/2019-05-14-Detecting-Driver-Fatigue-Over-Speeding-and-Speeding-up-Post-Accident-Response/index 8.html b/publications/2019-05-14-Detecting-Driver-Fatigue-Over-Speeding-and-Speeding-up-Post-Accident-Response/index 8.html new file mode 100644 index 0000000..00347ab --- /dev/null +++ b/publications/2019-05-14-Detecting-Driver-Fatigue-Over-Speeding-and-Speeding-up-Post-Accident-Response/index 8.html @@ -0,0 +1,7 @@ +Detecting Driver Fatigue, Over-Speeding, and Speeding up Post-Accident Response | Navan Chauhan
1 minute readCreated on May 14, 2019Last modified on January 4, 2020

Detecting Driver Fatigue, Over-Speeding, and Speeding up Post-Accident Response

Based on the project showcased at Toyota Hackathon, IITD - 17/18th December 2018

Download paper here

Recommended citation:

ATP

Chauhan, N. (2019). &quot;Detecting Driver Fatigue, Over-Speeding, and Speeding up Post-Accident Response.&quot; <i>International Research Journal of Engineering and Technology (IRJET), 6(5)</i>. +
+ +

BibTeX

@article{chauhan_2019, title={Detecting Driver Fatigue, Over-Speeding, and Speeding up Post-Accident Response}, volume={6}, url={https://www.irjet.net/archives/V6/i5/IRJET-V6I5318.pdf}, number={5}, journal={International Research Journal of Engineering and Technology (IRJET)}, author={Chauhan, Navan}, year={2019}} +
+ +
Tagged with:
\ No newline at end of file diff --git a/publications/index 4.html b/publications/index 4.html new file mode 100644 index 0000000..7fc43d8 --- /dev/null +++ b/publications/index 4.html @@ -0,0 +1 @@ +Publications | Navan Chauhan

Publications

Hopefully these grow with time, I already have tons of drafts ready. As I am currently studying in school, this allows me to experiment in Physics, Chemistry and Computer Science. I have started using LaTeX now ;)

\ No newline at end of file diff --git a/pwabuilder-sw 4.js b/pwabuilder-sw 4.js new file mode 100644 index 0000000..0684da5 --- /dev/null +++ b/pwabuilder-sw 4.js @@ -0,0 +1,83 @@ +// This is the service worker with the Cache-first network + +const CACHE = "pwabuilder-precache"; +const precacheFiles = [ + /* Add an array of files to precache for your app */ +]; + +self.addEventListener("install", function (event) { + console.log("[PWA Builder] Install Event processing"); + + console.log("[PWA Builder] Skip waiting on install"); + self.skipWaiting(); + + event.waitUntil( + caches.open(CACHE).then(function (cache) { + console.log("[PWA Builder] Caching pages during install"); + return cache.addAll(precacheFiles); + }) + ); +}); + +// Allow sw to control of current page +self.addEventListener("activate", function (event) { + console.log("[PWA Builder] Claiming clients for current page"); + event.waitUntil(self.clients.claim()); +}); + +// If any fetch fails, it will look for the request in the cache and serve it from there first +self.addEventListener("fetch", function (event) { + if (event.request.method !== "GET") return; + + event.respondWith( + fromCache(event.request).then( + function (response) { + // The response was found in the cache so we responde with it and update the entry + + // This is where we call the server to get the newest version of the + // file to use the next time we show view + event.waitUntil( + fetch(event.request).then(function (response) { + return updateCache(event.request, response); + }) + ); + + return response; + }, + function () { + // The response was not found in the cache so we look for it on the server + return fetch(event.request) + .then(function (response) { + // If request was success, add or update it in the cache + event.waitUntil(updateCache(event.request, response.clone())); + + return response; + }) + .catch(function (error) { + console.log("[PWA Builder] Network request failed and no cache." + error); + }); + } + ) + ); +}); + +function fromCache(request) { + // Check to see if you have it in the cache + // Return response + // If not in the cache, then return + return caches.open(CACHE).then(function (cache) { + return cache.match(request).then(function (matching) { + if (!matching || matching.status === 404) { + return Promise.reject("no-match"); + } + + return matching; + }); + }); +} + +function updateCache(request, response) { + return caches.open(CACHE).then(function (cache) { + return cache.put(request, response); + }); +} diff --git a/pwabuilder-sw-register 4.js b/pwabuilder-sw-register 4.js new file mode 100644 index 0000000..8850330 --- /dev/null +++ b/pwabuilder-sw-register 4.js @@ -0,0 +1,19 @@ +// This is the service worker with the Cache-first network + +// Add this below content to your HTML page, or add the js file to your page at the very top to register service worker + +// Check compatibility for the browser we're running this in +if ("serviceWorker" in navigator) { + if (navigator.serviceWorker.controller) { + console.log("[PWA Builder] active service worker found, no need to register"); + } else { + // Register the service worker + navigator.serviceWorker + .register("/pwabuilder-sw.js", { + scope: "./" + }) + .then(function (reg) { + console.log("[PWA Builder] Service worker has been registered for scope: " + reg.scope); + }); + } +} diff --git a/sitemap 4.xml b/sitemap 4.xml new file mode 100644 index 0000000..a346881 --- /dev/null +++ b/sitemap 4.xml @@ -0,0 +1 @@ +https://navanchauhan.github.io/aboutdaily1.02020-02-07https://navanchauhan.github.io/postsdaily1.02020-03-02https://navanchauhan.github.io/posts/2010-01-24-experimentsmonthly0.52020-02-04https://navanchauhan.github.io/posts/2019-12-08-Image-Classifier-Tensorflowmonthly0.52020-01-18https://navanchauhan.github.io/posts/2019-12-08-Splitting-Zipsmonthly0.52020-01-18https://navanchauhan.github.io/posts/2019-12-10-TensorFlow-Model-Predictionmonthly0.52020-01-18https://navanchauhan.github.io/posts/2019-12-16-TensorFlow-Polynomial-Regressionmonthly0.52020-01-18https://navanchauhan.github.io/posts/2019-12-22-Fake-News-Detectormonthly0.52020-01-18https://navanchauhan.github.io/posts/2020-01-14-Converting-between-PIL-NumPymonthly0.52020-01-18https://navanchauhan.github.io/posts/2020-01-15-Setting-up-Kaggle-to-use-with-Colabmonthly0.52020-01-19https://navanchauhan.github.io/posts/2020-01-16-Image-Classifier-Using-Turicreatemonthly0.52020-01-19https://navanchauhan.github.io/posts/2020-01-19-Connect-To-Bluetooth-Devices-Linux-Terminalmonthly0.52020-01-20https://navanchauhan.github.io/posts/2020-03-02-Open-Peepsmonthly0.52020-03-02https://navanchauhan.github.io/posts/hello-worldmonthly0.52020-01-04https://navanchauhan.github.io/publicationsdaily1.02020-01-14https://navanchauhan.github.io/publications/2019-05-14-Detecting-Driver-Fatigue-Over-Speeding-and-Speeding-up-Post-Accident-Responsemonthly0.52020-01-04 \ No newline at end of file diff --git a/sitemap.xml b/sitemap.xml index 1dde4da..a346881 100644 --- a/sitemap.xml +++ b/sitemap.xml @@ -1 +1 @@ -https://navanchauhan.github.io/aboutdaily1.02020-02-07https://navanchauhan.github.io/postsdaily1.02020-01-19https://navanchauhan.github.io/posts/2010-01-24-experimentsmonthly0.52020-02-04https://navanchauhan.github.io/posts/2019-12-08-Image-Classifier-Tensorflowmonthly0.52020-01-18https://navanchauhan.github.io/posts/2019-12-08-Splitting-Zipsmonthly0.52020-01-18https://navanchauhan.github.io/posts/2019-12-10-TensorFlow-Model-Predictionmonthly0.52020-01-18https://navanchauhan.github.io/posts/2019-12-16-TensorFlow-Polynomial-Regressionmonthly0.52020-01-18https://navanchauhan.github.io/posts/2019-12-22-Fake-News-Detectormonthly0.52020-01-18https://navanchauhan.github.io/posts/2020-01-14-Converting-between-PIL-NumPymonthly0.52020-01-18https://navanchauhan.github.io/posts/2020-01-15-Setting-up-Kaggle-to-use-with-Colabmonthly0.52020-01-19https://navanchauhan.github.io/posts/2020-01-16-Image-Classifier-Using-Turicreatemonthly0.52020-01-19https://navanchauhan.github.io/posts/2020-01-19-Connect-To-Bluetooth-Devices-Linux-Terminalmonthly0.52020-01-20https://navanchauhan.github.io/posts/hello-worldmonthly0.52020-01-04https://navanchauhan.github.io/publicationsdaily1.02020-01-14https://navanchauhan.github.io/publications/2019-05-14-Detecting-Driver-Fatigue-Over-Speeding-and-Speeding-up-Post-Accident-Responsemonthly0.52020-01-04 \ No newline at end of file +https://navanchauhan.github.io/aboutdaily1.02020-02-07https://navanchauhan.github.io/postsdaily1.02020-03-02https://navanchauhan.github.io/posts/2010-01-24-experimentsmonthly0.52020-02-04https://navanchauhan.github.io/posts/2019-12-08-Image-Classifier-Tensorflowmonthly0.52020-01-18https://navanchauhan.github.io/posts/2019-12-08-Splitting-Zipsmonthly0.52020-01-18https://navanchauhan.github.io/posts/2019-12-10-TensorFlow-Model-Predictionmonthly0.52020-01-18https://navanchauhan.github.io/posts/2019-12-16-TensorFlow-Polynomial-Regressionmonthly0.52020-01-18https://navanchauhan.github.io/posts/2019-12-22-Fake-News-Detectormonthly0.52020-01-18https://navanchauhan.github.io/posts/2020-01-14-Converting-between-PIL-NumPymonthly0.52020-01-18https://navanchauhan.github.io/posts/2020-01-15-Setting-up-Kaggle-to-use-with-Colabmonthly0.52020-01-19https://navanchauhan.github.io/posts/2020-01-16-Image-Classifier-Using-Turicreatemonthly0.52020-01-19https://navanchauhan.github.io/posts/2020-01-19-Connect-To-Bluetooth-Devices-Linux-Terminalmonthly0.52020-01-20https://navanchauhan.github.io/posts/2020-03-02-Open-Peepsmonthly0.52020-03-02https://navanchauhan.github.io/posts/hello-worldmonthly0.52020-01-04https://navanchauhan.github.io/publicationsdaily1.02020-01-14https://navanchauhan.github.io/publications/2019-05-14-Detecting-Driver-Fatigue-Over-Speeding-and-Speeding-up-Post-Accident-Responsemonthly0.52020-01-04 \ No newline at end of file diff --git a/styles 4.css b/styles 4.css new file mode 100644 index 0000000..089337b --- /dev/null +++ b/styles 4.css @@ -0,0 +1,398 @@ +* { + margin: 0; + padding: 0; + box-sizing: border-box; + font-size: 16px; + -webkit-text-size-adjust: 100%; +} + +body { + background: #fff; + color: #000; + font-family: -apple-system, BlinkMacSystemFont, Helvetica, Arial; + text-align: center; +} + +.wrapper { + max-width: 900px; + margin-left: auto; + margin-right: auto; + padding: 40px; + text-align: left; +} + +header { + + + position: relative; + color: #ededed; + line-height: 1.5em; + padding: 0 20px; +} + +/* + +header { + background: #ededed; + margin-bottom: 0em; + padding-bottom: 2em; + left: 0px; + top: 0px; + height: 8em; + width: 100%; +} +.header-background { + background-image: url(images/logo.png); + background-size: 100% 100%; + background-repeat: no-repeat; + background-size: cover; + background-position: center; + height: 200px; +} +*/ + +header .wrapper { + padding-top: 20px; + padding-bottom: 20px; + text-align: left; +} + +header a { + text-decoration: none; +} + +header .site-name { + color: #000; + margin: 0; + cursor: pointer; + font-weight: 200; + font-size: 2.3em; + letter-spacing: 1px; +} + +nav { + /*margin-top: 0.5em;*/ + text-align: left; /* right */ +} + +nav li { + margin-top: 0.5em; + display: inline-block; + background-color: #000; + color: #ddd; + padding: 4px 6px; + border-radius: 5px; + margin-right: 5px; + +} + +nav li:hover { + color: #000; + background-color: #ddd; +} +h1 { + margin-bottom: 20px; + font-size: 2em; +} + +h2 { + margin: 20px 0; +} + +p { + margin-bottom: 10px; +} + +a { + color: inherit; + +} + +.description { + margin-bottom: 20px; +} + +.item-list > li { + display: block; + padding: 20px; + border-radius: 20px; + background-color: #eee; + margin-bottom: 20px +} + +.item-list > li:last-child { + margin-bottom: 0; +} + +.item-list h1 { + margin-bottom: 0px; /*15px*/ + font-size: 1.3em; +} +.item-list a { + text-decoration: none; +} + +.item-list p { + margin-bottom: 0; +} + +.reading-time { + display: inline-block; + border-radius: 5px; + background-color: #ddd; + color: #000; + padding: 4px 4px; + margin-bottom: 5px; + margin-right: 5px; + +} + +.tag-list { + margin-bottom: 5px; /* 15px */ +} + +.tag-list li, +.tag { + display: inline-block; + background-color: #000; + color: #ddd; + padding: 4px 6px; + border-radius: 5px; + margin-right: 5px; + margin-top: 0.5em; +} + +.tag-list a, +.tag a { + text-decoration: none; +} + +.item-page .tag-list { + display: inline-block; +} + +.content { + margin-bottom: 40px; +} + +.browse-all { + display: block; + margin-bottom: 30px; +} + +.all-tags li { + font-size: 1.4em; + margin-right: 10px; + padding: 6px 10px; + margin-top: 1em; +} + +img { + max-width: 100%; + margin-bottom: 1em; + margin-top: 1em; + width: auto\9; + height: auto; + vertical-align: middle; + border: 0; + -ms-interpolation-mode: bicubic; +} + +footer { + color: #000; +} + + + +pre { + overflow-x: auto; + font-family: Monaco,Consolas,"Lucida Console",monospace; + display: block; + background-color: #fdf6e3; + color: #586e75; + margin-bottom: 1em; + margin-top: 1em; + border-radius: 4px; +} + +.highlight { background-color: #fdf6e3; color: #586e75; } +.highlight .c { color: #627272; } +.highlight .err { color: #586e75; } +.highlight .g { color: #586e75; } +.highlight .k { color: #677600; } +.highlight .l { color: #586e75; } +.highlight .n { color: #586e75; } +.highlight .o { color: #677600; } +.highlight .x { color: #c14715; } +.highlight .p { color: #586e75; } +.highlight .cm { color: #627272; } +.highlight .cp { color: #677600; } +.highlight .c1 { color: #627272; } +.highlight .cs { color: #677600; } +.highlight .gd { color: #217d74; } +.highlight .ge { color: #586e75; font-style: italic; } +.highlight .gr { color: #d72825; } +.highlight .gh { color: #c14715; } +.highlight .gi { color: #677600; } +.highlight .go { color: #586e75; } +.highlight .gp { color: #586e75; } +.highlight .gs { color: #586e75; font-weight: bold; } +.highlight .gu { color: #c14715; } +.highlight .gt { color: #586e75; } +.highlight .kc { color: #c14715; } +.highlight .kd { color: #1f76b6; } +.highlight .kn { color: #677600; } +.highlight .kp { color: #677600; } +.highlight .kr { color: #1f76b6; } +.highlight .kt { color: #d72825; } +.highlight .ld { color: #586e75; } +.highlight .m { color: #217d74; } +.highlight .s { color: #217d74; } +.highlight .na { color: #586e75; } +.highlight .nb { color: #8d6900; } +.highlight .nc { color: #1f76b6; } +.highlight .no { color: #c14715; } +.highlight .nd { color: #1f76b6; } +.highlight .ni { color: #c14715; } +.highlight .ne { color: #c14715; } +.highlight .nf { color: #1f76b6; } +.highlight .nl { color: #586e75; } +.highlight .nn { color: #586e75; } +.highlight .nx { color: #586e75; } +.highlight .py { color: #586e75; } +.highlight .nt { color: #1f76b6; } +.highlight .nv { color: #1f76b6; } +.highlight .ow { color: #677600; } +.highlight .w { color: #586e75; } +.highlight .mf { color: #217d74; } +.highlight .mh { color: #217d74; } +.highlight .mi { color: #217d74; } +.highlight .mo { color: #217d74; } +.highlight .sb { color: #627272; } +.highlight .sc { color: #217d74; } +.highlight .sd { color: #586e75; } +.highlight .s2 { color: #217d74; } +.highlight .se { color: #c14715; } +.highlight .sh { color: #586e75; } +.highlight .si { color: #217d74; } +.highlight .sx { color: #217d74; } +.highlight .sr { color: #d72825; } +.highlight .s1 { color: #217d74; } +.highlight .ss { color: #217d74; } +.highlight .bp { color: #1f76b6; } +.highlight .vc { color: #1f76b6; } +.highlight .vg { color: #1f76b6; } +.highlight .vi { color: #1f76b6; } +.highlight .il { color: #217d74; } + + +@media (prefers-color-scheme: dark) { + .reading-time { + background-color: #000; + color: #ddd; + } + body { + background-color: #222; + } + + body, + header .site-name { + color: #ddd; + } + nav li { + background-color: #ddd; + color: #000; + + } + nav li:hover { + color: #ddd; + background-color: #000; + } + + .item-list > li { + background-color: #333; + } + + header { + background-color: #000; + } + footer { + color: #ddd; + } + + pre { + background-color: #002b36; + color: #93a1a1; + } + + .highlight { background-color: #002b36; color: #93a1a1; } + .highlight .c { color: #759299; } + .highlight .err { color: #93a1a1; } + .highlight .g { color: #93a1a1; } + .highlight .k { color: #859900; } + .highlight .l { color: #93a1a1; } + .highlight .n { color: #93a1a1; } + .highlight .o { color: #859900; } + .highlight .x { color: #e9662f; } + .highlight .p { color: #93a1a1; } + .highlight .cm { color: #759299; } + .highlight .cp { color: #859900; } + .highlight .c1 { color: #759299; } + .highlight .cs { color: #859900; } + .highlight .gd { color: #2aa198; } + .highlight .ge { color: #93a1a1; font-style: italic; } + .highlight .gr { color: #e8625f; } + .highlight .gh { color: #e9662f; } + .highlight .gi { color: #859900; } + .highlight .go { color: #93a1a1; } + .highlight .gp { color: #93a1a1; } + .highlight .gs { color: #93a1a1; font-weight: bold; } + .highlight .gu { color: #e9662f; } + .highlight .gt { color: #93a1a1; } + .highlight .kc { color: #e9662f; } + .highlight .kd { color: #3294da; } + .highlight .kn { color: #859900; } + .highlight .kp { color: #859900; } + .highlight .kr { color: #3294da; } + .highlight .kt { color: #e8625f; } + .highlight .ld { color: #93a1a1; } + .highlight .m { color: #2aa198; } + .highlight .s { color: #2aa198; } + .highlight .na { color: #93a1a1; } + .highlight .nb { color: #B58900; } + .highlight .nc { color: #3294da; } + .highlight .no { color: #e9662f; } + .highlight .nd { color: #3294da; } + .highlight .ni { color: #e9662f; } + .highlight .ne { color: #e9662f; } + .highlight .nf { color: #3294da; } + .highlight .nl { color: #93a1a1; } + .highlight .nn { color: #93a1a1; } + .highlight .nx { color: #93a1a1; } + .highlight .py { color: #93a1a1; } + .highlight .nt { color: #3294da; } + .highlight .nv { color: #3294da; } + .highlight .ow { color: #859900; } + .highlight .w { color: #93a1a1; } + .highlight .mf { color: #2aa198; } + .highlight .mh { color: #2aa198; } + .highlight .mi { color: #2aa198; } + .highlight .mo { color: #2aa198; } + .highlight .sb { color: #759299; } + .highlight .sc { color: #2aa198; } + .highlight .sd { color: #93a1a1; } + .highlight .s2 { color: #2aa198; } + .highlight .se { color: #e9662f; } + .highlight .sh { color: #93a1a1; } + .highlight .si { color: #2aa198; } + .highlight .sx { color: #2aa198; } + .highlight .sr { color: #e8625f; } + .highlight .s1 { color: #2aa198; } + .highlight .ss { color: #2aa198; } + .highlight .bp { color: #3294da; } + .highlight .vc { color: #3294da; } + .highlight .vg { color: #3294da; } + .highlight .vi { color: #3294da; } + .highlight .il { color: #2aa198; } +} + diff --git a/tags/article/index 4.html b/tags/article/index 4.html new file mode 100644 index 0000000..d25f0ae --- /dev/null +++ b/tags/article/index 4.html @@ -0,0 +1 @@ +Navan Chauhan

Tagged with article

Browse all tags
\ No newline at end of file diff --git a/tags/article/index 8.html b/tags/article/index 8.html new file mode 100644 index 0000000..d25f0ae --- /dev/null +++ b/tags/article/index 8.html @@ -0,0 +1 @@ +Navan Chauhan

Tagged with article

Browse all tags
\ No newline at end of file diff --git a/tags/article/index.html b/tags/article/index.html index b73af07..d25f0ae 100644 --- a/tags/article/index.html +++ b/tags/article/index.html @@ -1 +1 @@ -Navan Chauhan

Tagged with article

Browse all tags
\ No newline at end of file +Navan Chauhan

Tagged with article

Browse all tags
\ No newline at end of file diff --git a/tags/codesnippet/index 11.html b/tags/codesnippet/index 11.html new file mode 100644 index 0000000..77332db --- /dev/null +++ b/tags/codesnippet/index 11.html @@ -0,0 +1 @@ +Navan Chauhan

Tagged with code-snippet

Browse all tags
\ No newline at end of file diff --git a/tags/codesnippet/index 5.html b/tags/codesnippet/index 5.html new file mode 100644 index 0000000..77332db --- /dev/null +++ b/tags/codesnippet/index 5.html @@ -0,0 +1 @@ +Navan Chauhan

Tagged with code-snippet

Browse all tags
\ No newline at end of file diff --git a/tags/colab/index 4.html b/tags/colab/index 4.html new file mode 100644 index 0000000..ea3f47e --- /dev/null +++ b/tags/colab/index 4.html @@ -0,0 +1 @@ +Navan Chauhan

Tagged with colab

Browse all tags
\ No newline at end of file diff --git a/tags/colab/index 8.html b/tags/colab/index 8.html new file mode 100644 index 0000000..ea3f47e --- /dev/null +++ b/tags/colab/index 8.html @@ -0,0 +1 @@ +Navan Chauhan

Tagged with colab

Browse all tags
\ No newline at end of file diff --git a/tags/digitalart/index 2.html b/tags/digitalart/index 2.html new file mode 100644 index 0000000..2b9e356 --- /dev/null +++ b/tags/digitalart/index 2.html @@ -0,0 +1 @@ +Navan Chauhan

Tagged with digital-art

Browse all tags
\ No newline at end of file diff --git a/tags/digitalart/index 4.html b/tags/digitalart/index 4.html new file mode 100644 index 0000000..2b9e356 --- /dev/null +++ b/tags/digitalart/index 4.html @@ -0,0 +1 @@ +Navan Chauhan

Tagged with digital-art

Browse all tags
\ No newline at end of file diff --git a/tags/digitalart/index.html b/tags/digitalart/index.html new file mode 100644 index 0000000..2b9e356 --- /dev/null +++ b/tags/digitalart/index.html @@ -0,0 +1 @@ +Navan Chauhan

Tagged with digital-art

Browse all tags
\ No newline at end of file diff --git a/tags/experiment/index 11.html b/tags/experiment/index 11.html new file mode 100644 index 0000000..7fbb4f7 --- /dev/null +++ b/tags/experiment/index 11.html @@ -0,0 +1 @@ +Navan Chauhan

Tagged with experiment

Browse all tags
  • Experiments

    🕑 0 minute read. January 24, 2010

    Just a markdown file for all experiments related to the website

\ No newline at end of file diff --git a/tags/experiment/index 5.html b/tags/experiment/index 5.html new file mode 100644 index 0000000..7fbb4f7 --- /dev/null +++ b/tags/experiment/index 5.html @@ -0,0 +1 @@ +Navan Chauhan

Tagged with experiment

Browse all tags
  • Experiments

    🕑 0 minute read. January 24, 2010

    Just a markdown file for all experiments related to the website

\ No newline at end of file diff --git a/tags/helloworld/index 2.html b/tags/helloworld/index 2.html new file mode 100644 index 0000000..9236d6c --- /dev/null +++ b/tags/helloworld/index 2.html @@ -0,0 +1 @@ +Navan Chauhan

Tagged with hello-world

Browse all tags
\ No newline at end of file diff --git a/tags/helloworld/index 5.html b/tags/helloworld/index 5.html new file mode 100644 index 0000000..9236d6c --- /dev/null +++ b/tags/helloworld/index 5.html @@ -0,0 +1 @@ +Navan Chauhan

Tagged with hello-world

Browse all tags
\ No newline at end of file diff --git a/tags/index 4.html b/tags/index 4.html new file mode 100644 index 0000000..89d4f6a --- /dev/null +++ b/tags/index 4.html @@ -0,0 +1 @@ +Navan Chauhan
\ No newline at end of file diff --git a/tags/index.html b/tags/index.html index bb64c6f..89d4f6a 100644 --- a/tags/index.html +++ b/tags/index.html @@ -1 +1 @@ -Navan Chauhan
\ No newline at end of file +Navan Chauhan
\ No newline at end of file diff --git a/tags/kaggle/index 4.html b/tags/kaggle/index 4.html new file mode 100644 index 0000000..d716e4e --- /dev/null +++ b/tags/kaggle/index 4.html @@ -0,0 +1 @@ +Navan Chauhan

Tagged with kaggle

Browse all tags
\ No newline at end of file diff --git a/tags/kaggle/index 8.html b/tags/kaggle/index 8.html new file mode 100644 index 0000000..d716e4e --- /dev/null +++ b/tags/kaggle/index 8.html @@ -0,0 +1 @@ +Navan Chauhan

Tagged with kaggle

Browse all tags
\ No newline at end of file diff --git a/tags/linux/index 4.html b/tags/linux/index 4.html new file mode 100644 index 0000000..e008926 --- /dev/null +++ b/tags/linux/index 4.html @@ -0,0 +1 @@ +Navan Chauhan

Tagged with linux

Browse all tags
\ No newline at end of file diff --git a/tags/linux/index 8.html b/tags/linux/index 8.html new file mode 100644 index 0000000..e008926 --- /dev/null +++ b/tags/linux/index 8.html @@ -0,0 +1 @@ +Navan Chauhan

Tagged with linux

Browse all tags
\ No newline at end of file diff --git a/tags/publication/index 4.html b/tags/publication/index 4.html new file mode 100644 index 0000000..b33c152 --- /dev/null +++ b/tags/publication/index 4.html @@ -0,0 +1 @@ +Navan Chauhan

Tagged with publication

Browse all tags
\ No newline at end of file diff --git a/tags/publication/index 8.html b/tags/publication/index 8.html new file mode 100644 index 0000000..b33c152 --- /dev/null +++ b/tags/publication/index 8.html @@ -0,0 +1 @@ +Navan Chauhan

Tagged with publication

Browse all tags
\ No newline at end of file diff --git a/tags/raspberrypi/index 4.html b/tags/raspberrypi/index 4.html new file mode 100644 index 0000000..47a4af9 --- /dev/null +++ b/tags/raspberrypi/index 4.html @@ -0,0 +1 @@ +Navan Chauhan

Tagged with raspberry-pi

Browse all tags
\ No newline at end of file diff --git a/tags/raspberrypi/index 6.html b/tags/raspberrypi/index 6.html new file mode 100644 index 0000000..47a4af9 --- /dev/null +++ b/tags/raspberrypi/index 6.html @@ -0,0 +1 @@ +Navan Chauhan

Tagged with raspberry-pi

Browse all tags
\ No newline at end of file diff --git a/tags/swiftui/index 2.html b/tags/swiftui/index 2.html new file mode 100644 index 0000000..cdedbc7 --- /dev/null +++ b/tags/swiftui/index 2.html @@ -0,0 +1 @@ +Navan Chauhan

Tagged with swiftUI

Browse all tags
\ No newline at end of file diff --git a/tags/swiftui/index 5.html b/tags/swiftui/index 5.html new file mode 100644 index 0000000..cdedbc7 --- /dev/null +++ b/tags/swiftui/index 5.html @@ -0,0 +1 @@ +Navan Chauhan

Tagged with swiftUI

Browse all tags
\ No newline at end of file diff --git a/tags/tensorflow/index 4.html b/tags/tensorflow/index 4.html new file mode 100644 index 0000000..e6cc2b6 --- /dev/null +++ b/tags/tensorflow/index 4.html @@ -0,0 +1 @@ +Navan Chauhan

Tagged with tensorflow

Browse all tags
\ No newline at end of file diff --git a/tags/tensorflow/index 8.html b/tags/tensorflow/index 8.html new file mode 100644 index 0000000..e6cc2b6 --- /dev/null +++ b/tags/tensorflow/index 8.html @@ -0,0 +1 @@ +Navan Chauhan

Tagged with tensorflow

Browse all tags
\ No newline at end of file diff --git a/tags/turicreate/index 4.html b/tags/turicreate/index 4.html new file mode 100644 index 0000000..ff10dd0 --- /dev/null +++ b/tags/turicreate/index 4.html @@ -0,0 +1 @@ +Navan Chauhan

Tagged with turicreate

Browse all tags
\ No newline at end of file diff --git a/tags/turicreate/index 6.html b/tags/turicreate/index 6.html new file mode 100644 index 0000000..ff10dd0 --- /dev/null +++ b/tags/turicreate/index 6.html @@ -0,0 +1 @@ +Navan Chauhan

Tagged with turicreate

Browse all tags
\ No newline at end of file diff --git a/tags/tutorial/index 4.html b/tags/tutorial/index 4.html new file mode 100644 index 0000000..0d4e081 --- /dev/null +++ b/tags/tutorial/index 4.html @@ -0,0 +1 @@ +Navan Chauhan

Tagged with tutorial

Browse all tags
\ No newline at end of file diff --git a/tags/tutorial/index 6.html b/tags/tutorial/index 6.html new file mode 100644 index 0000000..0d4e081 --- /dev/null +++ b/tags/tutorial/index 6.html @@ -0,0 +1 @@ +Navan Chauhan

Tagged with tutorial

Browse all tags
\ No newline at end of file -- cgit v1.2.3