From dfc509b95ff03d0c9027ee74d31d7b171f867bf1 Mon Sep 17 00:00:00 2001 From: Navan Chauhan Date: Wed, 26 May 2021 23:59:17 +0530 Subject: generated website --- docs/CNAME | 1 + docs/Themes/styles.css | 401 +++ docs/about/index.html | 48 + docs/assets/disqus.js | 5 + .../01-teachableMachines/.01-collect.png.icloud | Bin 0 -> 164 bytes .../gciTales/01-teachableMachines/02-train.png | Bin 0 -> 1673773 bytes .../gciTales/01-teachableMachines/03-label.png | Bin 0 -> 134577 bytes .../gciTales/01-teachableMachines/04-alert.png | Bin 0 -> 45710 bytes .../gciTales/01-teachableMachines/05-html.png | Bin 0 -> 15084 bytes .../assets/gciTales/01-teachableMachines/06-js.png | Bin 0 -> 117349 bytes .../assets/gciTales/01-teachableMachines/07-eg.png | Bin 0 -> 787109 bytes .../assets/gciTales/01-teachableMachines/08-eg.png | Bin 0 -> 832865 bytes docs/assets/gciTales/03-regression/1.png | Bin 0 -> 39478 bytes docs/assets/gciTales/03-regression/2.png | Bin 0 -> 67134 bytes docs/assets/gciTales/03-regression/3.png | Bin 0 -> 59514 bytes docs/assets/gciTales/03-regression/4.png | Bin 0 -> 58153 bytes docs/assets/gciTales/03-regression/5.png | Bin 0 -> 58824 bytes docs/assets/gciTales/03-regression/6.png | Bin 0 -> 59554 bytes docs/assets/main.css | 369 +++ docs/assets/manup.min.js | 1 + docs/assets/posts/arjs/.03-knot.png.icloud | Bin 0 -> 161 bytes docs/assets/posts/arjs/01-ngrok.png | Bin 0 -> 950543 bytes docs/assets/posts/arjs/02-box-demo.gif | Bin 0 -> 1689985 bytes docs/assets/posts/arjs/04-nyan.gif | Bin 0 -> 702741 bytes docs/assets/posts/arjs/05-GitHub.jpg | Bin 0 -> 615523 bytes docs/assets/posts/autodock-vina/s1.png | Bin 0 -> 914699 bytes docs/assets/posts/kaggle-colab/ss1.png | Bin 0 -> 151575 bytes docs/assets/posts/kaggle-colab/ss2.png | Bin 0 -> 43899 bytes docs/assets/posts/kaggle-colab/ss3.png | Bin 0 -> 54283 bytes docs/assets/posts/kaggle-colab/ss4.png | Bin 0 -> 14510 bytes docs/assets/posts/obs/.01-pewdiepie.png.icloud | Bin 0 -> 168 bytes docs/assets/posts/obs/.11.png.icloud | Bin 0 -> 156 bytes docs/assets/posts/obs/.3-usb.png.icloud | Bin 0 -> 159 bytes docs/assets/posts/obs/.5S.png.icloud | Bin 0 -> 156 bytes docs/assets/posts/obs/.mirrorless.png.icloud | Bin 0 -> 164 bytes docs/assets/posts/obs/.normal.png.icloud | Bin 0 -> 160 bytes docs/assets/posts/obs/.usb.png.icloud | Bin 0 -> 157 bytes docs/assets/posts/obs/01-media-source.png | Bin 0 -> 144620 bytes docs/assets/posts/obs/01-panel.png | Bin 0 -> 120308 bytes docs/assets/posts/obs/02-remote.png | Bin 0 -> 930081 bytes docs/assets/posts/obs/021-rotate.png | Bin 0 -> 725295 bytes docs/assets/posts/obs/021-source.png | Bin 0 -> 91620 bytes docs/assets/posts/obs/dics.css | 229 ++ docs/assets/posts/obs/dics.js | 638 ++++ docs/assets/posts/open-babel/s1.png | Bin 0 -> 604137 bytes docs/assets/posts/open-babel/s2.jpg | Bin 0 -> 132781 bytes docs/assets/posts/open-babel/s3.jpg | Bin 0 -> 314900 bytes docs/assets/posts/open-peeps/ex-1.svg | 1 + "docs/assets/r\303\251sum\303\251.pdf" | Bin 0 -> 84216 bytes docs/assets/sakura.css | 186 ++ docs/feed.rss | 3337 ++++++++++++++++++++ docs/googlecb0897d479c87d97.html | 1 + ...80b-d347-476a-232d-9568839851cd.webPlatform.png | Bin 0 -> 1711 bytes ...126-4866-93de-8df5-e0e6a3c65da1.webPlatform.png | Bin 0 -> 748 bytes ...abc-6c7c-ffb8-df8d-d2fad23f50b0.webPlatform.png | Bin 0 -> 471 bytes ...f70-557f-0e4b-3d76-127534525db9.webPlatform.png | Bin 0 -> 600 bytes ...f17-2e71-90d8-67a7-587163282ebf.webPlatform.png | Bin 0 -> 3127 bytes ...e9e-b615-96cd-3e18-ab4307c859a0.webPlatform.png | Bin 0 -> 1674 bytes ...18b-2a6c-0abc-136c-8c8faf49c71b.webPlatform.png | Bin 0 -> 1061 bytes ...ee8-92e3-932f-5388-7731928b5692.webPlatform.png | Bin 0 -> 879 bytes ...996-fd1b-b2d3-3627-cef4fa224e25.webPlatform.png | Bin 0 -> 789 bytes ...c53-cfd0-b52e-ca49-1db0cc292b7d.webPlatform.png | Bin 0 -> 577 bytes ...729-56cb-2a63-7e8b-ac62a038a023.webPlatform.png | Bin 0 -> 1033 bytes ...a46-4612-c284-055f-58850c0730bd.webPlatform.png | Bin 0 -> 984 bytes ...a63-85f5-62b0-c68f-2faa4aaea42b.webPlatform.png | Bin 0 -> 11426 bytes ...b1d-0299-9db6-3747-c7aeaaaa37d0.webPlatform.png | Bin 0 -> 1517 bytes ...798-7e86-1f02-565e-39dfab41fe36.webPlatform.png | Bin 0 -> 24811 bytes ...c61-f17f-ff49-3f97-e942f202bebf.webPlatform.png | Bin 0 -> 1293 bytes ...97f-630b-bafd-7c7d-e1287b98a969.webPlatform.png | Bin 0 -> 814 bytes ...aaa-861c-78c0-0919-07a886e57304.webPlatform.png | Bin 0 -> 3966 bytes ...765-fff5-aa39-9f7f-fdd3024d4056.webPlatform.png | Bin 0 -> 1363 bytes docs/images/favicon.png | Bin 0 -> 411 bytes docs/images/logo.png | Bin 0 -> 498 bytes docs/images/me.jpeg | Bin 0 -> 105079 bytes docs/index.html | 436 +++ docs/manifest.json | 119 + docs/posts/2010-01-24-experiments.html | 36 + .../2019-05-05-Custom-Snowboard-Anemone-Theme.html | 448 +++ .../2019-12-04-Google-Teachable-Machines.html | 80 + .../2019-12-08-Image-Classifier-Tensorflow.html | 190 ++ docs/posts/2019-12-08-Splitting-Zips.html | 53 + .../2019-12-10-TensorFlow-Model-Prediction.html | 76 + ...019-12-16-TensorFlow-Polynomial-Regression.html | 510 +++ docs/posts/2019-12-22-Fake-News-Detector.html | 262 ++ .../2020-01-14-Converting-between-PIL-NumPy.html | 52 + ...-01-15-Setting-up-Kaggle-to-use-with-Colab.html | 72 + ...20-01-16-Image-Classifier-Using-Turicreate.html | 276 ++ ...onnect-To-Bluetooth-Devices-Linux-Terminal.html | 52 + docs/posts/2020-03-03-Playing-With-Android-TV.html | 103 + docs/posts/2020-03-08-Making-Vaporwave-Track.html | 62 + ...20-04-13-Fixing-X11-Error-AmberTools-macOS.html | 61 + .../2020-05-31-compiling-open-babel-on-ios.html | 147 + ...r-Docking-Workflow-AutoDock-Vina-and-PyMOL.html | 76 + .../2020-06-02-Compiling-AutoDock-Vina-on-iOS.html | 113 + docs/posts/2020-07-01-Install-rdkit-colab.html | 124 + .../2020-08-01-Natural-Feature-Tracking-ARJS.html | 307 ++ docs/posts/2020-10-11-macOS-Virtual-Cam-OBS.html | 138 + docs/posts/2020-11-17-Lets-Encrypt-DuckDns.html | 98 + docs/posts/2020-12-1-HTML-JS-RSS-Feed.html | 231 ++ docs/posts/hello-world.html | 36 + docs/posts/index.html | 402 +++ ...ing-and-Speeding-up-Post-Accident-Response.html | 52 + .../2020-03-14-generating-vaporwave.html | 66 + ...20-03-17-Possible-Drug-Candidates-COVID-19.html | 36 + docs/publications/index.html | 77 + docs/pwabuilder-sw-register.js | 19 + docs/pwabuilder-sw.js | 83 + 107 files changed, 10110 insertions(+) create mode 100644 docs/CNAME create mode 100644 docs/Themes/styles.css create mode 100644 docs/about/index.html create mode 100644 docs/assets/disqus.js create mode 100644 docs/assets/gciTales/01-teachableMachines/.01-collect.png.icloud create mode 100644 docs/assets/gciTales/01-teachableMachines/02-train.png create mode 100644 docs/assets/gciTales/01-teachableMachines/03-label.png create mode 100644 docs/assets/gciTales/01-teachableMachines/04-alert.png create mode 100644 docs/assets/gciTales/01-teachableMachines/05-html.png create mode 100644 docs/assets/gciTales/01-teachableMachines/06-js.png create mode 100644 docs/assets/gciTales/01-teachableMachines/07-eg.png create mode 100644 docs/assets/gciTales/01-teachableMachines/08-eg.png create mode 100644 docs/assets/gciTales/03-regression/1.png create mode 100644 docs/assets/gciTales/03-regression/2.png create mode 100644 docs/assets/gciTales/03-regression/3.png create mode 100644 docs/assets/gciTales/03-regression/4.png create mode 100644 docs/assets/gciTales/03-regression/5.png create mode 100644 docs/assets/gciTales/03-regression/6.png create mode 100644 docs/assets/main.css create mode 100644 docs/assets/manup.min.js create mode 100644 docs/assets/posts/arjs/.03-knot.png.icloud create mode 100644 docs/assets/posts/arjs/01-ngrok.png create mode 100644 docs/assets/posts/arjs/02-box-demo.gif create mode 100644 docs/assets/posts/arjs/04-nyan.gif create mode 100644 docs/assets/posts/arjs/05-GitHub.jpg create mode 100644 docs/assets/posts/autodock-vina/s1.png create mode 100644 docs/assets/posts/kaggle-colab/ss1.png create mode 100644 docs/assets/posts/kaggle-colab/ss2.png create mode 100644 docs/assets/posts/kaggle-colab/ss3.png create mode 100644 docs/assets/posts/kaggle-colab/ss4.png create mode 100644 docs/assets/posts/obs/.01-pewdiepie.png.icloud create mode 100644 docs/assets/posts/obs/.11.png.icloud create mode 100644 docs/assets/posts/obs/.3-usb.png.icloud create mode 100644 docs/assets/posts/obs/.5S.png.icloud create mode 100644 docs/assets/posts/obs/.mirrorless.png.icloud create mode 100644 docs/assets/posts/obs/.normal.png.icloud create mode 100644 docs/assets/posts/obs/.usb.png.icloud create mode 100644 docs/assets/posts/obs/01-media-source.png create mode 100644 docs/assets/posts/obs/01-panel.png create mode 100644 docs/assets/posts/obs/02-remote.png create mode 100644 docs/assets/posts/obs/021-rotate.png create mode 100644 docs/assets/posts/obs/021-source.png create mode 100644 docs/assets/posts/obs/dics.css create mode 100644 docs/assets/posts/obs/dics.js create mode 100644 docs/assets/posts/open-babel/s1.png create mode 100644 docs/assets/posts/open-babel/s2.jpg create mode 100644 docs/assets/posts/open-babel/s3.jpg create mode 100644 docs/assets/posts/open-peeps/ex-1.svg create mode 100644 "docs/assets/r\303\251sum\303\251.pdf" create mode 100644 docs/assets/sakura.css create mode 100644 docs/feed.rss create mode 100644 docs/googlecb0897d479c87d97.html create mode 100644 docs/images/04d0580b-d347-476a-232d-9568839851cd.webPlatform.png create mode 100644 docs/images/14a6e126-4866-93de-8df5-e0e6a3c65da1.webPlatform.png create mode 100644 docs/images/15294abc-6c7c-ffb8-df8d-d2fad23f50b0.webPlatform.png create mode 100644 docs/images/6b5f7f70-557f-0e4b-3d76-127534525db9.webPlatform.png create mode 100644 docs/images/82e24f17-2e71-90d8-67a7-587163282ebf.webPlatform.png create mode 100644 docs/images/8c0ffe9e-b615-96cd-3e18-ab4307c859a0.webPlatform.png create mode 100644 docs/images/9384518b-2a6c-0abc-136c-8c8faf49c71b.webPlatform.png create mode 100644 docs/images/9bf4aee8-92e3-932f-5388-7731928b5692.webPlatform.png create mode 100644 docs/images/9dc22996-fd1b-b2d3-3627-cef4fa224e25.webPlatform.png create mode 100644 docs/images/afd91c53-cfd0-b52e-ca49-1db0cc292b7d.webPlatform.png create mode 100644 docs/images/b0cac729-56cb-2a63-7e8b-ac62a038a023.webPlatform.png create mode 100644 docs/images/bb0aca46-4612-c284-055f-58850c0730bd.webPlatform.png create mode 100644 docs/images/c5840a63-85f5-62b0-c68f-2faa4aaea42b.webPlatform.png create mode 100644 docs/images/cbac5b1d-0299-9db6-3747-c7aeaaaa37d0.webPlatform.png create mode 100644 docs/images/e429a798-7e86-1f02-565e-39dfab41fe36.webPlatform.png create mode 100644 docs/images/f1579c61-f17f-ff49-3f97-e942f202bebf.webPlatform.png create mode 100644 docs/images/f178697f-630b-bafd-7c7d-e1287b98a969.webPlatform.png create mode 100644 docs/images/f400aaaa-861c-78c0-0919-07a886e57304.webPlatform.png create mode 100644 docs/images/f7842765-fff5-aa39-9f7f-fdd3024d4056.webPlatform.png create mode 100644 docs/images/favicon.png create mode 100644 docs/images/logo.png create mode 100644 docs/images/me.jpeg create mode 100644 docs/index.html create mode 100644 docs/manifest.json create mode 100644 docs/posts/2010-01-24-experiments.html create mode 100644 docs/posts/2019-05-05-Custom-Snowboard-Anemone-Theme.html create mode 100644 docs/posts/2019-12-04-Google-Teachable-Machines.html create mode 100644 docs/posts/2019-12-08-Image-Classifier-Tensorflow.html create mode 100644 docs/posts/2019-12-08-Splitting-Zips.html create mode 100644 docs/posts/2019-12-10-TensorFlow-Model-Prediction.html create mode 100644 docs/posts/2019-12-16-TensorFlow-Polynomial-Regression.html create mode 100644 docs/posts/2019-12-22-Fake-News-Detector.html create mode 100644 docs/posts/2020-01-14-Converting-between-PIL-NumPy.html create mode 100644 docs/posts/2020-01-15-Setting-up-Kaggle-to-use-with-Colab.html create mode 100644 docs/posts/2020-01-16-Image-Classifier-Using-Turicreate.html create mode 100644 docs/posts/2020-01-19-Connect-To-Bluetooth-Devices-Linux-Terminal.html create mode 100644 docs/posts/2020-03-03-Playing-With-Android-TV.html create mode 100644 docs/posts/2020-03-08-Making-Vaporwave-Track.html create mode 100644 docs/posts/2020-04-13-Fixing-X11-Error-AmberTools-macOS.html create mode 100644 docs/posts/2020-05-31-compiling-open-babel-on-ios.html create mode 100644 docs/posts/2020-06-01-Speeding-Up-Molecular-Docking-Workflow-AutoDock-Vina-and-PyMOL.html create mode 100644 docs/posts/2020-06-02-Compiling-AutoDock-Vina-on-iOS.html create mode 100644 docs/posts/2020-07-01-Install-rdkit-colab.html create mode 100644 docs/posts/2020-08-01-Natural-Feature-Tracking-ARJS.html create mode 100644 docs/posts/2020-10-11-macOS-Virtual-Cam-OBS.html create mode 100644 docs/posts/2020-11-17-Lets-Encrypt-DuckDns.html create mode 100644 docs/posts/2020-12-1-HTML-JS-RSS-Feed.html create mode 100644 docs/posts/hello-world.html create mode 100644 docs/posts/index.html create mode 100644 docs/publications/2019-05-14-Detecting-Driver-Fatigue-Over-Speeding-and-Speeding-up-Post-Accident-Response.html create mode 100644 docs/publications/2020-03-14-generating-vaporwave.html create mode 100644 docs/publications/2020-03-17-Possible-Drug-Candidates-COVID-19.html create mode 100644 docs/publications/index.html create mode 100644 docs/pwabuilder-sw-register.js create mode 100644 docs/pwabuilder-sw.js diff --git a/docs/CNAME b/docs/CNAME new file mode 100644 index 0000000..ce3a35d --- /dev/null +++ b/docs/CNAME @@ -0,0 +1 @@ +web.navan.dev diff --git a/docs/Themes/styles.css b/docs/Themes/styles.css new file mode 100644 index 0000000..4e658d7 --- /dev/null +++ b/docs/Themes/styles.css @@ -0,0 +1,401 @@ +* { + margin: 0; + padding: 0; + box-sizing: border-box; + font-size: 16px; + -webkit-text-size-adjust: 100%; +} + +body { + background: #eee; + color: #000; + font-family: -apple-system, BlinkMacSystemFont, Helvetica, Arial; + text-align: center; +} + +.wrapper { + max-width: 900px; + margin-left: auto; + margin-right: auto; + padding: 40px; + text-align: left; +} + +header { + + background: #fff; + position: relative; + color: #ededed; + line-height: 1.5em; + padding: 0 20px; +} + +/* + +header { + background: #ededed; + margin-bottom: 0em; + padding-bottom: 2em; + left: 0px; + top: 0px; + height: 8em; + width: 100%; +} +.header-background { + background-image: url(images/logo.png); + background-size: 100% 100%; + background-repeat: no-repeat; + background-size: cover; + background-position: center; + height: 200px; +} +*/ + +header .wrapper { + padding-top: 20px; + padding-bottom: 20px; + text-align: left; +} + +header a { + text-decoration: none; +} + +header .site-name { + color: #000; + margin: 0; + cursor: pointer; + font-weight: 50; + font-size: 3.5em; /*#2.3em;*/ + line-height: 1em; + letter-spacing: 1px; +} + +nav { + /*margin-top: 0.5em;*/ + text-align: left; /* right */ +} + +nav li { + margin-top: 0.5em; + display: inline-block; + background-color: #000; + color: #ddd; + padding: 4px 6px; + border-radius: 5px; + margin-right: 5px; + +} + +nav li:hover { + color: #000; + background-color: #ddd; +} +h1 { + margin-bottom: 20px; + font-size: 2em; +} + +h2 { + margin: 20px 0; + font-size: 1.5em; +} + +p { + margin-bottom: 10px; + margin-top: 5px; +} + +a { + color: inherit; + +} + +.description { + margin-bottom: 20px; +} + +.item-list > li { + display: block; + padding: 20px; + border-radius: 20px; + background-color: #d3d3d3; + margin-bottom: 20px +} + +.item-list > li:last-child { + margin-bottom: 0; +} + +.item-list h1 { + margin-bottom: 0px; /*15px*/ + font-size: 1.3em; +} +.item-list a { + text-decoration: none; +} + +.item-list p { + margin-bottom: 0; +} + +.reading-time { + display: inline-block; + border-radius: 5px; + background-color: #ddd; + color: #000; + padding: 4px 4px; + margin-bottom: 5px; + margin-right: 5px; + +} + +.tag-list { + margin-bottom: 5px; /* 15px */ +} + +.tag-list li, +.tag { + display: inline-block; + background-color: #000; + color: #ddd; + padding: 4px 6px; + border-radius: 5px; + margin-right: 5px; + margin-top: 0.5em; +} + +.tag-list a, +.tag a { + text-decoration: none; +} + +.item-page .tag-list { + display: inline-block; +} + +.content { + margin-bottom: 40px; +} + +.browse-all { + display: block; + margin-bottom: 30px; +} + +.all-tags li { + font-size: 1.4em; + margin-right: 10px; + padding: 6px 10px; + margin-top: 1em; +} + +img { + max-width: 100%; + margin-bottom: 1em; + margin-top: 1em; + width: auto\9; + height: auto; + vertical-align: middle; + border: 0; + -ms-interpolation-mode: bicubic; +} + +footer { + color: #000; +} + + + +pre { + overflow-x: auto; + font-family: Monaco,Consolas,"Lucida Console",monospace; + display: block; + background-color: #fdf6e3; + color: #586e75; + margin-bottom: 1em; + margin-top: 1em; + border-radius: 4px; +} + +.highlight { background-color: #fdf6e3; color: #586e75; } +.highlight .c { color: #627272; } +.highlight .err { color: #586e75; } +.highlight .g { color: #586e75; } +.highlight .k { color: #677600; } +.highlight .l { color: #586e75; } +.highlight .n { color: #586e75; } +.highlight .o { color: #677600; } +.highlight .x { color: #c14715; } +.highlight .p { color: #586e75; } +.highlight .cm { color: #627272; } +.highlight .cp { color: #677600; } +.highlight .c1 { color: #627272; } +.highlight .cs { color: #677600; } +.highlight .gd { color: #217d74; } +.highlight .ge { color: #586e75; font-style: italic; } +.highlight .gr { color: #d72825; } +.highlight .gh { color: #c14715; } +.highlight .gi { color: #677600; } +.highlight .go { color: #586e75; } +.highlight .gp { color: #586e75; } +.highlight .gs { color: #586e75; font-weight: bold; } +.highlight .gu { color: #c14715; } +.highlight .gt { color: #586e75; } +.highlight .kc { color: #c14715; } +.highlight .kd { color: #1f76b6; } +.highlight .kn { color: #677600; } +.highlight .kp { color: #677600; } +.highlight .kr { color: #1f76b6; } +.highlight .kt { color: #d72825; } +.highlight .ld { color: #586e75; } +.highlight .m { color: #217d74; } +.highlight .s { color: #217d74; } +.highlight .na { color: #586e75; } +.highlight .nb { color: #8d6900; } +.highlight .nc { color: #1f76b6; } +.highlight .no { color: #c14715; } +.highlight .nd { color: #1f76b6; } +.highlight .ni { color: #c14715; } +.highlight .ne { color: #c14715; } +.highlight .nf { color: #1f76b6; } +.highlight .nl { color: #586e75; } +.highlight .nn { color: #586e75; } +.highlight .nx { color: #586e75; } +.highlight .py { color: #586e75; } +.highlight .nt { color: #1f76b6; } +.highlight .nv { color: #1f76b6; } +.highlight .ow { color: #677600; } +.highlight .w { color: #586e75; } +.highlight .mf { color: #217d74; } +.highlight .mh { color: #217d74; } +.highlight .mi { color: #217d74; } +.highlight .mo { color: #217d74; } +.highlight .sb { color: #627272; } +.highlight .sc { color: #217d74; } +.highlight .sd { color: #586e75; } +.highlight .s2 { color: #217d74; } +.highlight .se { color: #c14715; } +.highlight .sh { color: #586e75; } +.highlight .si { color: #217d74; } +.highlight .sx { color: #217d74; } +.highlight .sr { color: #d72825; } +.highlight .s1 { color: #217d74; } +.highlight .ss { color: #217d74; } +.highlight .bp { color: #1f76b6; } +.highlight .vc { color: #1f76b6; } +.highlight .vg { color: #1f76b6; } +.highlight .vi { color: #1f76b6; } +.highlight .il { color: #217d74; } + + +@media (prefers-color-scheme: dark) { + .reading-time { + background-color: #000; + color: #ddd; + } + body { + background-color: #222; + } + + body, + header .site-name { + color: #ddd; + } + nav li { + background-color: #ddd; + color: #000; + + } + nav li:hover { + color: #ddd; + background-color: #000; + } + + .item-list > li { + background-color: #333; + } + + header { + background-color: #000; + } + footer { + color: #ddd; + } + + pre { + background-color: #002b36; + color: #93a1a1; + } + + .highlight { background-color: #002b36; color: #93a1a1; } + .highlight .c { color: #759299; } + .highlight .err { color: #93a1a1; } + .highlight .g { color: #93a1a1; } + .highlight .k { color: #859900; } + .highlight .l { color: #93a1a1; } + .highlight .n { color: #93a1a1; } + .highlight .o { color: #859900; } + .highlight .x { color: #e9662f; } + .highlight .p { color: #93a1a1; } + .highlight .cm { color: #759299; } + .highlight .cp { color: #859900; } + .highlight .c1 { color: #759299; } + .highlight .cs { color: #859900; } + .highlight .gd { color: #2aa198; } + .highlight .ge { color: #93a1a1; font-style: italic; } + .highlight .gr { color: #e8625f; } + .highlight .gh { color: #e9662f; } + .highlight .gi { color: #859900; } + .highlight .go { color: #93a1a1; } + .highlight .gp { color: #93a1a1; } + .highlight .gs { color: #93a1a1; font-weight: bold; } + .highlight .gu { color: #e9662f; } + .highlight .gt { color: #93a1a1; } + .highlight .kc { color: #e9662f; } + .highlight .kd { color: #3294da; } + .highlight .kn { color: #859900; } + .highlight .kp { color: #859900; } + .highlight .kr { color: #3294da; } + .highlight .kt { color: #e8625f; } + .highlight .ld { color: #93a1a1; } + .highlight .m { color: #2aa198; } + .highlight .s { color: #2aa198; } + .highlight .na { color: #93a1a1; } + .highlight .nb { color: #B58900; } + .highlight .nc { color: #3294da; } + .highlight .no { color: #e9662f; } + .highlight .nd { color: #3294da; } + .highlight .ni { color: #e9662f; } + .highlight .ne { color: #e9662f; } + .highlight .nf { color: #3294da; } + .highlight .nl { color: #93a1a1; } + .highlight .nn { color: #93a1a1; } + .highlight .nx { color: #93a1a1; } + .highlight .py { color: #93a1a1; } + .highlight .nt { color: #3294da; } + .highlight .nv { color: #3294da; } + .highlight .ow { color: #859900; } + .highlight .w { color: #93a1a1; } + .highlight .mf { color: #2aa198; } + .highlight .mh { color: #2aa198; } + .highlight .mi { color: #2aa198; } + .highlight .mo { color: #2aa198; } + .highlight .sb { color: #759299; } + .highlight .sc { color: #2aa198; } + .highlight .sd { color: #93a1a1; } + .highlight .s2 { color: #2aa198; } + .highlight .se { color: #e9662f; } + .highlight .sh { color: #93a1a1; } + .highlight .si { color: #2aa198; } + .highlight .sx { color: #2aa198; } + .highlight .sr { color: #e8625f; } + .highlight .s1 { color: #2aa198; } + .highlight .ss { color: #2aa198; } + .highlight .bp { color: #3294da; } + .highlight .vc { color: #3294da; } + .highlight .vg { color: #3294da; } + .highlight .vi { color: #3294da; } + .highlight .il { color: #2aa198; } +} + diff --git a/docs/about/index.html b/docs/about/index.html new file mode 100644 index 0000000..e920f3d --- /dev/null +++ b/docs/about/index.html @@ -0,0 +1,48 @@ + + + + + + + + + Hey - Section + + + + + + +
+

About Me

+ +

Hi! My name is Navan Chauhan. I dabble in the mysterious dark arts of programming and in-silico drug discovery. I am proficient in Python and can work my way with Swift. I only use vanilla JS.

+ +

Contact Me

+ +

The best way to reach out to me is by e-mail. navanchauhan[at]gmail.com Or, if I have made peace with my ISP and corrected the rDNS settings, then: hey[at]navan.dev

+ +

My GPG Fingerprint:

+ +

1DA1 04AA DEB7 7473 A4FA C27B 4EFC A289 7342 A778

+ +
+ + + + + + + + + \ No newline at end of file diff --git a/docs/assets/disqus.js b/docs/assets/disqus.js new file mode 100644 index 0000000..0c52381 --- /dev/null +++ b/docs/assets/disqus.js @@ -0,0 +1,5 @@ +(function() { + var t = document, + e = t.createElement("script"); + e.src = "https://navan-chauhan.disqus.com/embed.js", e.setAttribute("data-timestamp", +new Date), (t.head || t.body).appendChild(e) +})(); diff --git a/docs/assets/gciTales/01-teachableMachines/.01-collect.png.icloud b/docs/assets/gciTales/01-teachableMachines/.01-collect.png.icloud new file mode 100644 index 0000000..b291112 Binary files /dev/null and b/docs/assets/gciTales/01-teachableMachines/.01-collect.png.icloud differ diff --git a/docs/assets/gciTales/01-teachableMachines/02-train.png b/docs/assets/gciTales/01-teachableMachines/02-train.png new file mode 100644 index 0000000..a69fd63 Binary files /dev/null and b/docs/assets/gciTales/01-teachableMachines/02-train.png differ diff --git a/docs/assets/gciTales/01-teachableMachines/03-label.png b/docs/assets/gciTales/01-teachableMachines/03-label.png new file mode 100644 index 0000000..efe450d Binary files /dev/null and b/docs/assets/gciTales/01-teachableMachines/03-label.png differ diff --git a/docs/assets/gciTales/01-teachableMachines/04-alert.png b/docs/assets/gciTales/01-teachableMachines/04-alert.png new file mode 100644 index 0000000..f648bad Binary files /dev/null and b/docs/assets/gciTales/01-teachableMachines/04-alert.png differ diff --git a/docs/assets/gciTales/01-teachableMachines/05-html.png b/docs/assets/gciTales/01-teachableMachines/05-html.png new file mode 100644 index 0000000..f917c07 Binary files /dev/null and b/docs/assets/gciTales/01-teachableMachines/05-html.png differ diff --git a/docs/assets/gciTales/01-teachableMachines/06-js.png b/docs/assets/gciTales/01-teachableMachines/06-js.png new file mode 100644 index 0000000..173a8aa Binary files /dev/null and b/docs/assets/gciTales/01-teachableMachines/06-js.png differ diff --git a/docs/assets/gciTales/01-teachableMachines/07-eg.png b/docs/assets/gciTales/01-teachableMachines/07-eg.png new file mode 100644 index 0000000..cc8198e Binary files /dev/null and b/docs/assets/gciTales/01-teachableMachines/07-eg.png differ diff --git a/docs/assets/gciTales/01-teachableMachines/08-eg.png b/docs/assets/gciTales/01-teachableMachines/08-eg.png new file mode 100644 index 0000000..b1261fa Binary files /dev/null and b/docs/assets/gciTales/01-teachableMachines/08-eg.png differ diff --git a/docs/assets/gciTales/03-regression/1.png b/docs/assets/gciTales/03-regression/1.png new file mode 100644 index 0000000..b07d172 Binary files /dev/null and b/docs/assets/gciTales/03-regression/1.png differ diff --git a/docs/assets/gciTales/03-regression/2.png b/docs/assets/gciTales/03-regression/2.png new file mode 100644 index 0000000..53531ad Binary files /dev/null and b/docs/assets/gciTales/03-regression/2.png differ diff --git a/docs/assets/gciTales/03-regression/3.png b/docs/assets/gciTales/03-regression/3.png new file mode 100644 index 0000000..542d76e Binary files /dev/null and b/docs/assets/gciTales/03-regression/3.png differ diff --git a/docs/assets/gciTales/03-regression/4.png b/docs/assets/gciTales/03-regression/4.png new file mode 100644 index 0000000..16101cd Binary files /dev/null and b/docs/assets/gciTales/03-regression/4.png differ diff --git a/docs/assets/gciTales/03-regression/5.png b/docs/assets/gciTales/03-regression/5.png new file mode 100644 index 0000000..36b9c26 Binary files /dev/null and b/docs/assets/gciTales/03-regression/5.png differ diff --git a/docs/assets/gciTales/03-regression/6.png b/docs/assets/gciTales/03-regression/6.png new file mode 100644 index 0000000..479d0e5 Binary files /dev/null and b/docs/assets/gciTales/03-regression/6.png differ diff --git a/docs/assets/main.css b/docs/assets/main.css new file mode 100644 index 0000000..99962e2 --- /dev/null +++ b/docs/assets/main.css @@ -0,0 +1,369 @@ +/* Generated by Pygments CSS Theme Builder - https://jwarby.github.io/jekyll-pygments-themes/builder.html */ +/* Base Style */ +.codehilite pre { + overflow-x:auto; + color: #fefefe; + text-shadow: 0 0 2px #001716, 0 0 3px #03edf975, 0 0 5px #03edf975, 0 0 8px #03edf975; + background-image: linear-gradient(to bottom, #2a2139 75%, #34294f); + background-color: #2a2139; + margin-bottom: 1rem; + margin-top: 1rem; +} +/* Punctuation */ +.codehilite .p { + color: #ff7edb; + text-shadow: 0 0 2px #100c0f, 0 0 5px #dc078e33, 0 0 10px #fff3; + background-color: #2a2139; +} +/* Error */ +.codehilite .err { + color: #ff7edb; + text-shadow: 0 0 2px #100c0f, 0 0 5px #dc078e33, 0 0 10px #fff3; + background-color: #2a2139; +} +/* Base Style */ +.codehilite .n { + color: #ff7edb; + text-shadow: 0 0 2px #100c0f, 0 0 5px #dc078e33, 0 0 10px #fff3; + text-shadow: 0 0 2px #100c0f, 0 0 5px #dc078e33, 0 0 10px #fff3; + background-color: transparent; +} +/* Name Attribute */ +.codehilite .na { + color: #fede5d; + background-color: transparent; +} +/* Name Builtin */ +.codehilite .nb { + color: #fe4450; + background-color: transparent; +} +/* Name Class */ +.codehilite .nc { + color: #fe4450; + background-color: transparent; +} +/* Name Constant */ +.codehilite .no { + color: transparent; + background-color: transparent; +} +/* Name Decorator */ +.codehilite .nd { + color: transparent; + background-color: transparent; +} +/* Name Entity */ +.codehilite .ni { + color: transparent; + background-color: transparent; +} +/* Name Exception */ +.codehilite .ne { + color: #fe4450; + background-color: transparent; +} +/* Name Function */ +.codehilite .nf { + color: #36f9f6; + background-color: transparent; +} +/* Name Label */ +.codehilite .nl { + color: transparent; + background-color: transparent; +} +/* Name Namespace */ +.codehilite .nn { + color: #f97e72; + background-color: transparent; +} +/* Name Other */ +.codehilite .nx { + color: #36f9f6; + background-color: transparent; +} +/* Name Property */ +.codehilite .py { + color: transparent; + background-color: transparent; +} +/* Name Tag */ +.codehilite .nt { + color: #72f1b8; + text-shadow: 0 0 2px #100c0f, 0 0 10px #257c5575, 0 0 35px #21272475; + background-color: transparent; +} +/* Name Variable */ +.codehilite .nv { + color: #bbbbbb; + background-color: transparent; +} +/* Name Variable Class */ +.codehilite .vc { + color: transparent; + background-color: transparent; +} +/* Name Variable Global */ +.codehilite .vg { + color: transparent; + background-color: transparent; +} +/* Name Variable Instance */ +.codehilite .vi { + color: transparent; + background-color: transparent; +} +/* Name Builtin Pseudo */ +.codehilite .bp { + color: #36f9f6; + background-color: transparent; +} +/* Base Style */ +.codehilite .g { + color: transparent; + background-color: transparent; +} +/* */ +.codehilite .gd { + color: transparent; + background-color: transparent; +} +/* Base Style */ +.codehilite .o { + color: #fefefe; + text-shadow: 0 0 2px #001716, 0 0 3px #03edf975, 0 0 5px #03edf975, 0 0 8px #03edf975; + background-color: transparent; +} +/* Operator Word */ +.codehilite .ow { + color: #fede5d; + background-color: transparent; +} +/* Base Style */ +.codehilite .c { + color: #848bbd; + background-color: transparent; +} +/* Comment Multiline */ +.codehilite .cm { + color: #848bbd; + background-color: transparent; +} +/* Comment Preproc */ +.codehilite .cp { + color: #848bbd; + background-color: transparent; +} +/* Comment Single */ +.codehilite .c1 { + color: #848bbd; + background-color: transparent; +} +/* Comment Special */ +.codehilite .cs { + color: #848bbd; + background-color: transparent; +} +/* Base Style */ +.codehilite .k { + color: #fede5d; + background-color: transparent; +} +/* Keyword Constant */ +.codehilite .kc { + color: #f97e72; + background-color: transparent; +} +/* Keyword Declaration */ +.codehilite .kd { + color: #fede5d; + background-color: transparent; +} +/* Keyword Namespace */ +.codehilite .kn { + color: #fede5d; + background-color: transparent; +} +/* Keyword Pseudo */ +.codehilite .kp { + color: #fede5d; + background-color: transparent; +} +/* Keyword Reserved */ +.codehilite .kr { + color: #fede5d; + background-color: transparent; +} +/* Keyword Type */ +.codehilite .kt { + color: #fede5d; + background-color: transparent; +} +/* Base Style */ +.codehilite .l { + color: #36f9f6; + background-color: transparent; +} +/* Literal Date */ +.codehilite .ld { + color: #36f9f6; + background-color: transparent; +} +/* Literal Number */ +.codehilite .m { + color: #36f9f6; + background-color: transparent; +} +/* Literal Number Float */ +.codehilite .mf { + color: #36f9f6; + background-color: transparent; +} +/* Literal Number Hex */ +.codehilite .mh { + color: #36f9f6; + background-color: transparent; +} +/* Literal Number Integer */ +.codehilite .mi { + color: #f97e72; + background-color: transparent; +} +/* Literal Number Oct */ +.codehilite .mo { + color: #36f9f6; + background-color: transparent; +} +/* Literal Number Integer Long */ +.codehilite .il { + color: #36f9f6; + background-color: transparent; +} +/* Literal String */ +.codehilite .s { + color: #ff8b39; + text-shadow: 0 0 2px #393a33, 0 0 8px #f39f0575, 0 0 2px #f39f0575; + background-color: transparent; +} +/* Literal String Backtick */ +.codehilite .sb { + color: #36f9f6; + background-color: transparent; +} +/* Literal String Char */ +.codehilite .sc { + color: #36f9f6; + background-color: transparent; +} +/* Literal String Doc */ +.codehilite .sd { + color: #848bbd; + background-color: transparent; +} +/* Literal String Double */ +.codehilite .s2 { + color: #36f9f6; + background-color: transparent; +} +/* Literal String Escape */ +.codehilite .se { + color: #36f9f6; + background-color: transparent; +} +/* Literal String Heredoc */ +.codehilite .sh { + color: #36f9f6; + background-color: transparent; +} +/* Literal String Interpol */ +.codehilite .si { + color: #36f9f6; + background-color: transparent; +} +/* Literal String Other */ +.codehilite .sx { + color: #36f9f6; + background-color: transparent; +} +/* Literal String Regex */ +.codehilite .sr { + color: transparent; + background-color: transparent; +} +/* Literal String Single */ +.codehilite .s1 { + color: #ff8b39; + text-shadow: 0 0 2px #393a33, 0 0 8px #f39f0575, 0 0 2px #f39f0575; + background-color: transparent; +} +/* Literal String Symbol */ +.codehilite .ss { + color: #36f9f6; + background-color: transparent; +} +/* Base Style */ +.codehilite .g { + color: transparent; + background-color: transparent; +} +/* Generic Deleted */ +.codehilite .gd { + color: transparent; + background-color: transparent; +} +/* Generic Emph */ +.codehilite .ge { + color: transparent; + background-color: transparent; +} +/* Generic Error */ +.codehilite .gr { + color: transparent; + background-color: transparent; +} +/* Generic Heading */ +.codehilite .gh { + color: transparent; + background-color: transparent; +} +/* Generic Inserted */ +.codehilite .gi { + color: transparent; + background-color: transparent; +} +/* Generic Output */ +.codehilite .go { + color: transparent; + background-color: transparent; +} +/* Generic Prompt */ +.codehilite .gp { + color: transparent; + background-color: transparent; +} +/* Generic Strong */ +.codehilite .gs { + color: transparent; + background-color: transparent; +} +/* Generic Subheading */ +.codehilite .gu { + color: transparent; + background-color: transparent; +} +/* Generic Traceback */ +.codehilite .gt { + color: transparent; + background-color: transparent; +} +/* Other */ +.codehilite .x { + color: transparent; + background-color: transparent; +} +/* Text Whitespace */ +.codehilite .w { + color: transparent; + background-color: transparent; +} diff --git a/docs/assets/manup.min.js b/docs/assets/manup.min.js new file mode 100644 index 0000000..20bc3a1 --- /dev/null +++ b/docs/assets/manup.min.js @@ -0,0 +1 @@ +var manUpObject,tagArray=[],linkArray=[],validMetaValues=[{name:"mobile-web-app-capable",manifestName:"display"},{name:"apple-mobile-web-app-capable",manifestName:"display"},{name:"apple-mobile-web-app-title",manifestName:"short_name"},{name:"application-name",manifestName:"short_name"},{name:"msapplication-TileColor",manifestName:"ms_TileColor"},{name:"msapplication-square70x70logo",manifestName:"icons",imageSize:"70x70"},{name:"msapplication-square150x150logo",manifestName:"icons",imageSize:"150x150"},{name:"msapplication-wide310x150logo",manifestName:"icons",imageSize:"310x150"},{name:"msapplication-square310x310logo",manifestName:"icons",imageSize:"310x310"}],validLinkValues=[{name:"apple-touch-icon",manifestName:"icons",imageSize:"152x152"},{name:"apple-touch-icon",manifestName:"icons",imageSize:"120x120"},{name:"apple-touch-icon",manifestName:"icons",imageSize:"76x76"},{name:"apple-touch-icon",manifestName:"icons",imageSize:"60x60"},{name:"apple-touch-icon",manifestName:"icons",imageSize:"57x57"},{name:"apple-touch-icon",manifestName:"icons",imageSize:"72x72"},{name:"apple-touch-icon",manifestName:"icons",imageSize:"114x114"},{name:"icon",manifestName:"icons",imageSize:"128x128"},{name:"icon",manifestName:"icons",imageSize:"192x192"}],generateFullMetaData=function(){for(var e=0;e 1 && arguments[1] !== undefined ? arguments[1] : 100000; + + if (firstImage.naturalWidth) { + this._buidAfterFirstImageLoad(firstImage); + } else { + if (maxCounter > 0) { + maxCounter--; + setTimeout(function () { + _this._load(firstImage, maxCounter); + }, 100); + } else { + console.error('error loading images'); + } + } +}; +/** + * + * @private + */ + + +Dics.prototype._buidAfterFirstImageLoad = function (firstImage) { + this._setContainerWidth(firstImage); + + this._build(); + + this._setEvents(); +}; +/** + * + * @private + */ + + +Dics.prototype._setContainerWidth = function (firstImage) { + this.options.container.style.height = "".concat(this._calcContainerHeight(firstImage), "px"); +}; +/** + * + * @private + */ + + +Dics.prototype._setOpacityContainerForLoading = function (opacity) { + this.options.container.style.opacity = opacity; +}; +/** + * Build HTML + * @private + */ + + +Dics.prototype._build = function () { + var dics = this; + + dics._applyGlobalClass(dics.options); + + var imagesLength = dics.images.length; + var initialImagesContainerWidth = dics.container.getBoundingClientRect()[dics.config.sizeField] / imagesLength; + + for (var i = 0; i < imagesLength; i++) { + var image = dics.images[i]; + + var section = dics._createElement('div', 'b-dics__section'); + + var imageContainer = dics._createElement('div', 'b-dics__image-container'); + + var slider = dics._createSlider(i, initialImagesContainerWidth); + + dics._createAltText(image, i, imageContainer); + + dics._applyFilter(image, i, dics.options.filters); + + dics._rotate(image, imageContainer); + + section.setAttribute('data-function', 'b-dics__section'); + section.style.flex = "0 0 ".concat(initialImagesContainerWidth, "px"); + image.classList.add('b-dics__image'); + section.appendChild(imageContainer); + imageContainer.appendChild(image); + + if (i < imagesLength - 1) { + section.appendChild(slider); + } + + dics.container.appendChild(section); + image.style[this.config.positionField] = "".concat(i * -initialImagesContainerWidth, "px"); + } + + this.sections = this._getSections(); + + this._setOpacityContainerForLoading(1); +}; +/** + * + * @returns {NodeListOf | NodeListOf | NodeListOf} + * @private + */ + + +Dics.prototype._getImages = function () { + return this.container.querySelectorAll('img'); +}; +/** + * + * @returns {NodeListOf | NodeListOf | NodeListOf} + * @private + */ + + +Dics.prototype._getSections = function () { + return this.container.querySelectorAll('[data-function="b-dics__section"]'); +}; +/** + * + * @param elementClass + * @param className + * @returns {HTMLElement | HTMLSelectElement | HTMLLegendElement | HTMLTableCaptionElement | HTMLTextAreaElement | HTMLModElement | HTMLHRElement | HTMLOutputElement | HTMLPreElement | HTMLEmbedElement | HTMLCanvasElement | HTMLFrameSetElement | HTMLMarqueeElement | HTMLScriptElement | HTMLInputElement | HTMLUnknownElement | HTMLMetaElement | HTMLStyleElement | HTMLObjectElement | HTMLTemplateElement | HTMLBRElement | HTMLAudioElement | HTMLIFrameElement | HTMLMapElement | HTMLTableElement | HTMLAnchorElement | HTMLMenuElement | HTMLPictureElement | HTMLParagraphElement | HTMLTableDataCellElement | HTMLTableSectionElement | HTMLQuoteElement | HTMLTableHeaderCellElement | HTMLProgressElement | HTMLLIElement | HTMLTableRowElement | HTMLFontElement | HTMLSpanElement | HTMLTableColElement | HTMLOptGroupElement | HTMLDataElement | HTMLDListElement | HTMLFieldSetElement | HTMLSourceElement | HTMLBodyElement | HTMLDirectoryElement | HTMLDivElement | HTMLUListElement | HTMLHtmlElement | HTMLAreaElement | HTMLMeterElement | HTMLAppletElement | HTMLFrameElement | HTMLOptionElement | HTMLImageElement | HTMLLinkElement | HTMLHeadingElement | HTMLSlotElement | HTMLVideoElement | HTMLBaseFontElement | HTMLTitleElement | HTMLButtonElement | HTMLHeadElement | HTMLParamElement | HTMLTrackElement | HTMLOListElement | HTMLDataListElement | HTMLLabelElement | HTMLFormElement | HTMLTimeElement | HTMLBaseElement} + * @private + */ + + +Dics.prototype._createElement = function (elementClass, className) { + var newElement = document.createElement(elementClass); + newElement.classList.add(className); + return newElement; +}; +/** + * Set need DOM events + * @private + */ + + +Dics.prototype._setEvents = function () { + var dics = this; + + dics._disableImageDrag(); + + dics._isGoingRight = null; + var oldx = 0; + + var listener = function listener(event) { + var xPageCoord = event.pageX ? event.pageX : event.touches[0].pageX; + + if (xPageCoord < oldx) { + dics._isGoingRight = false; + } else if (xPageCoord > oldx) { + dics._isGoingRight = true; + } + + oldx = xPageCoord; + + var position = dics._calcPosition(event); + + var beforeSectionsWidth = dics._beforeSectionsWidth(dics.sections, dics.images, dics._activeSlider); + + var calcMovePixels = position - beforeSectionsWidth; + dics.sliders[dics._activeSlider].style[dics.config.positionField] = "".concat(position, "px"); + + dics._pushSections(calcMovePixels, position); + }; + + dics.container.addEventListener('click', listener); + + var _loop = function _loop(i) { + var slider = dics.sliders[i]; + utils.setMultiEvents(slider, ['mousedown', 'touchstart'], function (event) { + dics._activeSlider = i; + dics._clickPosition = dics._calcPosition(event); + slider.classList.add('b-dics__slider--active'); + utils.setMultiEvents(dics.container, ['mousemove', 'touchmove'], listener); + }); + }; + + for (var i = 0; i < dics.sliders.length; i++) { + _loop(i); + } + + var listener2 = function listener2() { + var activeElements = dics.container.querySelectorAll('.b-dics__slider--active'); + var _iteratorNormalCompletion = true; + var _didIteratorError = false; + var _iteratorError = undefined; + + try { + for (var _iterator = activeElements[Symbol.iterator](), _step; !(_iteratorNormalCompletion = (_step = _iterator.next()).done); _iteratorNormalCompletion = true) { + var activeElement = _step.value; + activeElement.classList.remove('b-dics__slider--active'); + utils.removeMultiEvents(dics.container, ['mousemove', 'touchmove'], listener); + } + } catch (err) { + _didIteratorError = true; + _iteratorError = err; + } finally { + try { + if (!_iteratorNormalCompletion && _iterator.return != null) { + _iterator.return(); + } + } finally { + if (_didIteratorError) { + throw _iteratorError; + } + } + } + }; + + utils.setMultiEvents(document.body, ['mouseup', 'touchend'], listener2); +}; +/** + * + * @param sections + * @param images + * @param activeSlider + * @returns {number} + * @private + */ + + +Dics.prototype._beforeSectionsWidth = function (sections, images, activeSlider) { + var width = 0; + + for (var i = 0; i < sections.length; i++) { + var section = sections[i]; + + if (i !== activeSlider) { + width += section.getBoundingClientRect()[this.config.sizeField]; + } else { + return width; + } + } +}; +/** + * + * @returns {number} + * @private + */ + + +Dics.prototype._calcContainerHeight = function (firstImage) { + var imgHeight = firstImage.naturalHeight; + var imgWidth = firstImage.naturalWidth; + var containerWidth = this.options.container.getBoundingClientRect().width; + return containerWidth / imgWidth * imgHeight; +}; +/** + * + * @param sections + * @param images + * @private + */ + + +Dics.prototype._setLeftToImages = function (sections, images) { + var size = 0; + + for (var i = 0; i < images.length; i++) { + var image = images[i]; + image.style[this.config.positionField] = "-".concat(size, "px"); + size += sections[i].getBoundingClientRect()[this.config.sizeField]; + this.sliders[i].style[this.config.positionField] = "".concat(size, "px"); + } +}; +/** + * + * @private + */ + + +Dics.prototype._disableImageDrag = function () { + for (var i = 0; i < this.images.length; i++) { + this.sliders[i].addEventListener('dragstart', function (e) { + e.preventDefault(); + }); + this.images[i].addEventListener('dragstart', function (e) { + e.preventDefault(); + }); + } +}; +/** + * + * @param image + * @param index + * @param filters + * @private + */ + + +Dics.prototype._applyFilter = function (image, index, filters) { + if (filters) { + image.style.filter = filters[index]; + } +}; +/** + * + * @param options + * @private + */ + + +Dics.prototype._applyGlobalClass = function (options) { + var container = options.container; + + if (options.hideTexts) { + container.classList.add('b-dics--hide-texts'); + } + + if (options.linesOrientation === 'vertical') { + container.classList.add('b-dics--vertical'); + } + + if (options.textPosition === 'center') { + container.classList.add('b-dics--tp-center'); + } else if (options.textPosition === 'bottom') { + container.classList.add('b-dics--tp-bottom'); + } else if (options.textPosition === 'left') { + container.classList.add('b-dics--tp-left'); + } else if (options.textPosition === 'right') { + container.classList.add('b-dics--tp-right'); + } +}; + +Dics.prototype._createSlider = function (i, initialImagesContainerWidth) { + var slider = this._createElement('div', 'b-dics__slider'); + + if (this.options.linesColor) { + slider.style.color = this.options.linesColor; + } + + slider.style[this.config.positionField] = "".concat(initialImagesContainerWidth * (i + 1), "px"); + this.sliders.push(slider); + return slider; +}; +/** + * + * @param image + * @param i + * @param imageContainer + * @private + */ + + +Dics.prototype._createAltText = function (image, i, imageContainer) { + var textContent = image.getAttribute('alt'); + + if (textContent) { + var text = this._createElement('p', 'b-dics__text'); + + if (this.options.arrayBackgroundColorText) { + text.style.backgroundColor = this.options.arrayBackgroundColorText[i]; + } + + if (this.options.arrayColorText) { + text.style.color = this.options.arrayColorText[i]; + } + + text.appendChild(document.createTextNode(textContent)); + imageContainer.appendChild(text); + } +}; +/** + * + * @param image + * @param imageContainer + * @private + */ + + +Dics.prototype._rotate = function (image, imageContainer) { + image.style.rotate = "-".concat(this.options.rotate); + imageContainer.style.rotate = this.options.rotate; +}; +/** + * + * @private + */ + + +Dics.prototype._removeActiveElements = function () { + var activeElements = Dics.container.querySelectorAll('.b-dics__slider--active'); + var _iteratorNormalCompletion2 = true; + var _didIteratorError2 = false; + var _iteratorError2 = undefined; + + try { + for (var _iterator2 = activeElements[Symbol.iterator](), _step2; !(_iteratorNormalCompletion2 = (_step2 = _iterator2.next()).done); _iteratorNormalCompletion2 = true) { + var activeElement = _step2.value; + activeElement.classList.remove('b-dics__slider--active'); + utils.removeMultiEvents(Dics.container, ['mousemove', 'touchmove'], Dics.prototype._removeActiveElements); + } + } catch (err) { + _didIteratorError2 = true; + _iteratorError2 = err; + } finally { + try { + if (!_iteratorNormalCompletion2 && _iterator2.return != null) { + _iterator2.return(); + } + } finally { + if (_didIteratorError2) { + throw _iteratorError2; + } + } + } +}; +/** + * + * @param linesOrientation + * @private + */ + + +Dics.prototype._setOrientation = function (linesOrientation) { + this.config = {}; + + if (linesOrientation === 'vertical') { + this.config.offsetSizeField = 'offsetHeight'; + this.config.offsetPositionField = 'offsetTop'; + this.config.sizeField = 'height'; + this.config.positionField = 'top'; + this.config.clientField = 'clientY'; + this.config.pageField = 'pageY'; + } else { + this.config.offsetSizeField = 'offsetWidth'; + this.config.offsetPositionField = 'offsetLeft'; + this.config.sizeField = 'width'; + this.config.positionField = 'left'; + this.config.clientField = 'clientX'; + this.config.pageField = 'pageX'; + } +}; +/** + * + * @param event + * @returns {number} + * @private + */ + + +Dics.prototype._calcPosition = function (event) { + var containerCoords = this.container.getBoundingClientRect(); + var pixel = !isNaN(event[this.config.clientField]) ? event[this.config.clientField] : event.touches[0][this.config.clientField]; + return containerCoords[this.config.positionField] < pixel ? pixel - containerCoords[this.config.positionField] : 0; +}; +/** + * + * @private + */ + + +Dics.prototype._pushSections = function (calcMovePixels, position) { + // if (this._rePosUnderActualSections(position)) { + this._setFlex(position, this._isGoingRight); + + var section = this.sections[this._activeSlider]; + var postActualSection = this.sections[this._activeSlider + 1]; + + var sectionWidth = postActualSection.getBoundingClientRect()[this.config.sizeField] - (calcMovePixels - this.sections[this._activeSlider].getBoundingClientRect()[this.config.sizeField]); + + section.style.flex = this._isGoingRight === true ? "2 0 ".concat(calcMovePixels, "px") : "1 1 ".concat(calcMovePixels, "px"); + postActualSection.style.flex = this._isGoingRight === true ? " ".concat(sectionWidth, "px") : "2 0 ".concat(sectionWidth, "px"); + + this._setLeftToImages(this.sections, this.images); // } + +}; +/** + * + * @private + */ + + +Dics.prototype._setFlex = function (position, isGoingRight) { + var beforeSumSectionsSize = 0; + + for (var i = 0; i < this.sections.length; i++) { + var section = this.sections[i]; + var sectionSize = section.getBoundingClientRect()[this.config.sizeField]; + beforeSumSectionsSize += sectionSize; + + if (isGoingRight && position > beforeSumSectionsSize - sectionSize && i > this._activeSlider || !isGoingRight && position < beforeSumSectionsSize && i < this._activeSlider) { + section.style.flex = "1 100 ".concat(sectionSize, "px"); + } else { + section.style.flex = "0 0 ".concat(sectionSize, "px"); + } + } +}; +/** + * + * @type {{extend: (function(*=, *, *): *), setMultiEvents: setMultiEvents, removeMultiEvents: removeMultiEvents, getConstructor: (function(*=): string)}} + */ + + +var utils = { + /** + * Native extend object + * @param target + * @param objects + * @param options + * @returns {*} + */ + extend: function extend(target, objects, options) { + for (var object in objects) { + if (objects.hasOwnProperty(object)) { + recursiveMerge(target, objects[object]); + } + } + + function recursiveMerge(target, object) { + for (var property in object) { + if (object.hasOwnProperty(property)) { + var current = object[property]; + + if (utils.getConstructor(current) === 'Object') { + if (!target[property]) { + target[property] = {}; + } + + recursiveMerge(target[property], current); + } else { + // clearEmpty + if (options.clearEmpty) { + if (current == null) { + continue; + } + } + + target[property] = current; + } + } + } + } + + return target; + }, + + /** + * Set Multi addEventListener + * @param element + * @param events + * @param func + */ + setMultiEvents: function setMultiEvents(element, events, func) { + for (var i = 0; i < events.length; i++) { + element.addEventListener(events[i], func); + } + }, + + /** + * + * @param element + * @param events + * @param func + */ + removeMultiEvents: function removeMultiEvents(element, events, func) { + for (var i = 0; i < events.length; i++) { + element.removeEventListener(events[i], func, false); + } + }, + + /** + * Get object constructor + * @param object + * @returns {string} + */ + getConstructor: function getConstructor(object) { + return Object.prototype.toString.call(object).slice(8, -1); + } +}; \ No newline at end of file diff --git a/docs/assets/posts/open-babel/s1.png b/docs/assets/posts/open-babel/s1.png new file mode 100644 index 0000000..ec988a4 Binary files /dev/null and b/docs/assets/posts/open-babel/s1.png differ diff --git a/docs/assets/posts/open-babel/s2.jpg b/docs/assets/posts/open-babel/s2.jpg new file mode 100644 index 0000000..7ece652 Binary files /dev/null and b/docs/assets/posts/open-babel/s2.jpg differ diff --git a/docs/assets/posts/open-babel/s3.jpg b/docs/assets/posts/open-babel/s3.jpg new file mode 100644 index 0000000..5803e97 Binary files /dev/null and b/docs/assets/posts/open-babel/s3.jpg differ diff --git a/docs/assets/posts/open-peeps/ex-1.svg b/docs/assets/posts/open-peeps/ex-1.svg new file mode 100644 index 0000000..7831d9b --- /dev/null +++ b/docs/assets/posts/open-peeps/ex-1.svg @@ -0,0 +1 @@ +Asset 1 \ No newline at end of file diff --git "a/docs/assets/r\303\251sum\303\251.pdf" "b/docs/assets/r\303\251sum\303\251.pdf" new file mode 100644 index 0000000..8931b18 Binary files /dev/null and "b/docs/assets/r\303\251sum\303\251.pdf" differ diff --git a/docs/assets/sakura.css b/docs/assets/sakura.css new file mode 100644 index 0000000..61c929e --- /dev/null +++ b/docs/assets/sakura.css @@ -0,0 +1,186 @@ +/* Sakura.css v1.3.0 + * ================ + * Minimal css theme. + * Project: https://github.com/oxalorg/sakura/ + */ +/* Body */ +html { + font-size: 62.5%; + font-family: -apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, "Helvetica Neue", Arial, "Noto Sans", sans-serif; } + +body { + font-size: 1.8rem; + line-height: 1.618; + max-width: 38em; + margin: auto; + color: #4a4a4a; + background-color: #f9f9f9; + padding: 13px; } + +@media (max-width: 684px) { + body { + font-size: 1.53rem; } } + +@media (max-width: 382px) { + body { + font-size: 1.35rem; } } + +h1, h2, h3, h4, h5, h6 { + line-height: 1.1; + font-family: -apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, "Helvetica Neue", Arial, "Noto Sans", sans-serif; + font-weight: 700; + margin-top: 3rem; + margin-bottom: 1.5rem; + overflow-wrap: break-word; + word-wrap: break-word; + -ms-word-break: break-all; + word-break: break-word; + -ms-hyphens: auto; + -moz-hyphens: auto; + -webkit-hyphens: auto; + hyphens: auto; } + +h1 { + font-size: 2.35em; } + +h2 { + font-size: 2.00em; } + +h3 { + font-size: 1.75em; } + +h4 { + font-size: 1.5em; } + +h5 { + font-size: 1.25em; } + +h6 { + font-size: 1em; } + +p { + margin-top: 0px; + margin-bottom: 2.5rem; } + +small, sub, sup { + font-size: 75%; } + +hr { + border-color: #2c8898; } + +a { + text-decoration: none; + color: #2c8898; } + a:hover { + color: #982c61; + border-bottom: 2px solid #4a4a4a; } + +ul { + padding-left: 1.4em; + margin-top: 0px; + margin-bottom: 2.5rem; } + +li { + margin-bottom: 0.4em; } + +blockquote { + margin-left: 0px; + margin-right: 0px; + padding-left: 1em; + padding-top: 0.8em; + padding-bottom: 0.8em; + padding-right: 0.8em; + border-left: 5px solid #2c8898; + margin-bottom: 2.5rem; + background-color: #f1f1f1; } + +blockquote p { + margin-bottom: 0; } + +img { + height: auto; + max-width: 100%; + margin-top: 0px; + margin-bottom: 2.5rem; } + +/* Pre and Code */ +pre { + background-color: #f1f1f1; + display: block; + padding: 1em; + overflow-x: auto; + margin-top: 0px; + margin-bottom: 2.5rem; } + +code { + font-size: 0.9em; + padding: 0 0.5em; + background-color: #f1f1f1; + white-space: pre-wrap; } + +pre > code { + padding: 0; + background-color: transparent; + white-space: pre; } + +/* Tables */ +table { + text-align: justify; + width: 100%; + border-collapse: collapse; } + +td, th { + padding: 0.5em; + border-bottom: 1px solid #f1f1f1; } + +/* Buttons, forms and input */ +input, textarea { + border: 1px solid #4a4a4a; } + input:focus, textarea:focus { + border: 1px solid #2c8898; } + +textarea { + width: 100%; } + +.button, button, input[type="submit"], input[type="reset"], input[type="button"] { + display: inline-block; + padding: 5px 10px; + text-align: center; + text-decoration: none; + white-space: nowrap; + background-color: #2c8898; + color: #f9f9f9; + border-radius: 1px; + border: 1px solid #2c8898; + cursor: pointer; + box-sizing: border-box; } + .button[disabled], button[disabled], input[type="submit"][disabled], input[type="reset"][disabled], input[type="button"][disabled] { + cursor: default; + opacity: .5; } + .button:focus:enabled, .button:hover:enabled, button:focus:enabled, button:hover:enabled, input[type="submit"]:focus:enabled, input[type="submit"]:hover:enabled, input[type="reset"]:focus:enabled, input[type="reset"]:hover:enabled, input[type="button"]:focus:enabled, input[type="button"]:hover:enabled { + background-color: #982c61; + border-color: #982c61; + color: #f9f9f9; + outline: 0; } + +textarea, select, input[type] { + color: #4a4a4a; + padding: 6px 10px; + /* The 6px vertically centers text on FF, ignored by Webkit */ + margin-bottom: 10px; + background-color: #f1f1f1; + border: 1px solid #f1f1f1; + border-radius: 4px; + box-shadow: none; + box-sizing: border-box; } + textarea:focus, select:focus, input[type]:focus { + border: 1px solid #2c8898; + outline: 0; } + +input[type="checkbox"]:focus { + outline: 1px dotted #2c8898; } + +label, legend, fieldset { + display: block; + margin-bottom: .5rem; + font-weight: 600; } diff --git a/docs/feed.rss b/docs/feed.rss new file mode 100644 index 0000000..9620b24 --- /dev/null +++ b/docs/feed.rss @@ -0,0 +1,3337 @@ + + + + Navan's Archive + Rare Tips, Tricks and Posts + https://web.navan.dev/en + Wed, 26 May 2021 23:59:03 -0000 + Wed, 26 May 2021 23:59:03 -0000 + 250 + + + + + https://web.navan.dev/posts/2020-03-08-Making-Vaporwave-Track.html + + + Making My First Vaporwave Track (Remix) + + + I made my first vaporwave remix + + https://web.navan.dev/posts/2020-03-08-Making-Vaporwave-Track.html + Sun, 08 Mar 2020 23:17:00 -0000 + Making My First Vaporwave Track (Remix) + +

I finally completed my first quick and dirty vaporwave remix of "I Want It That Way" by the Backstreet Boys

+ +

V A P O R W A V E

+ +

Vaporwave is all about A E S T H E T I C S. Vaporwave is a type of music genre that emerged as a parody of Chillwave, shared more as a meme rather than a proper musical genre. Of course this changed as the genre become mature

+ +

How to Vaporwave

+ +

The first track which is considered to be actual Vaporwave is Ramona Xavier's Macintosh Plus, this set the the guidelines for making Vaporwave

+ +
    +
  • Take a 1980s RnB song
  • +
  • Slow it down
  • +
  • Add Bass and Treble
  • +
  • Add again
  • +
  • Add Reverb ( make sure its wet )
  • +
+ +

There you have your very own Vaporwave track.

+ +

( Now, there are some tracks being produced which are not remixes and are original )

+ +

My Remix

+ + + +

Where is the Programming?

+ +

The fact that there are steps on producing Vaporwave, this gave me the idea that Vaporwave can actually be made using programming, stay tuned for when I publish the program which I am working on ( Generating A E S T H E T I C artwork and remixes)

+]]>
+
+ + + + https://web.navan.dev/posts/hello-world.html + + + Hello World + + + My first post. + + https://web.navan.dev/posts/hello-world.html + Tue, 16 Apr 2019 17:39:00 -0000 + Hello World + +

Why a Hello World post?

+ +

Just re-did the entire website using Publish (Publish by John Sundell). So, a new hello world post :)

+]]>
+
+ + + + https://web.navan.dev/posts/2010-01-24-experiments.html + + + Experiments + + + Just a markdown file for all experiments related to the website + + https://web.navan.dev/posts/2010-01-24-experiments.html + Sun, 24 Jan 2010 23:43:00 -0000 + Experiments + +

https://s3-us-west-2.amazonaws.com/s.cdpn.io/148866/img-original.jpg

+ + +]]>
+
+ + + + https://web.navan.dev/posts/2020-01-15-Setting-up-Kaggle-to-use-with-Colab.html + + + Setting up Kaggle to use with Google Colab + + + Tutorial on setting up kaggle, to use with Google Colab + + https://web.navan.dev/posts/2020-01-15-Setting-up-Kaggle-to-use-with-Colab.html + Wed, 15 Jan 2020 23:36:00 -0000 + Setting up Kaggle to use with Google Colab + +

In order to be able to access Kaggle Datasets, you will need to have an account on Kaggle (which is Free)

+ +

Grabbing Our Tokens

+ +

Go to Kaggle

+ +

"Homepage"

+ +

Click on your User Profile and Click on My Account

+ +

"Account"

+ +

Scroll Down until you see Create New API Token

+ +

+ +

This will download your token as a JSON file

+ +

+ +

Copy the File to the root folder of your Google Drive

+ +

Setting up Colab

+ +

Mounting Google Drive

+ +
import os
+from google.colab import drive
+drive.mount('/content/drive')
+
+ +

After this click on the URL in the output section, login and then paste the Auth Code

+ +

Configuring Kaggle

+ +
os.environ['KAGGLE_CONFIG_DIR'] = "/content/drive/My Drive/"
+
+ +

Voila! You can now download Kaggle datasets

+]]>
+
+ + + + https://web.navan.dev/posts/2019-12-08-Image-Classifier-Tensorflow.html + + + Creating a Custom Image Classifier using Tensorflow 2.x and Keras for Detecting Malaria + + + Tutorial on creating an image classifier model using TensorFlow which detects malaria + + https://web.navan.dev/posts/2019-12-08-Image-Classifier-Tensorflow.html + Sun, 08 Dec 2019 14:16:00 -0000 + Creating a Custom Image Classifier using Tensorflow 2.x and Keras for Detecting Malaria + +

Done during Google Code-In. Org: Tensorflow.

+ +

Imports

+ +
%tensorflow_version 2.x #This is for telling Colab that you want to use TF 2.0, ignore if running on local machine
+
+from PIL import Image # We use the PIL Library to resize images
+import numpy as np
+import os
+import cv2
+import tensorflow as tf
+from tensorflow.keras import datasets, layers, models
+import pandas as pd
+import matplotlib.pyplot as plt
+from keras.models import Sequential
+from keras.layers import Conv2D,MaxPooling2D,Dense,Flatten,Dropout
+
+ +

Dataset

+ +

Fetching the Data

+ +
!wget ftp://lhcftp.nlm.nih.gov/Open-Access-Datasets/Malaria/cell_images.zip
+!unzip cell_images.zip
+
+ +

Processing the Data

+ +

We resize all the images as 50x50 and add the numpy array of that image as well as their label names (Infected or Not) to common arrays.

+ +
data = []
+labels = []
+
+Parasitized = os.listdir("./cell_images/Parasitized/")
+for parasite in Parasitized:
+    try:
+        image=cv2.imread("./cell_images/Parasitized/"+parasite)
+        image_from_array = Image.fromarray(image, 'RGB')
+        size_image = image_from_array.resize((50, 50))
+        data.append(np.array(size_image))
+        labels.append(0)
+    except AttributeError:
+        print("")
+
+Uninfected = os.listdir("./cell_images/Uninfected/")
+for uninfect in Uninfected:
+    try:
+        image=cv2.imread("./cell_images/Uninfected/"+uninfect)
+        image_from_array = Image.fromarray(image, 'RGB')
+        size_image = image_from_array.resize((50, 50))
+        data.append(np.array(size_image))
+        labels.append(1)
+    except AttributeError:
+        print("")
+
+ +

Splitting Data

+ +
df = np.array(data)
+labels = np.array(labels)
+(X_train, X_test) = df[(int)(0.1*len(df)):],df[:(int)(0.1*len(df))]
+(y_train, y_test) = labels[(int)(0.1*len(labels)):],labels[:(int)(0.1*len(labels))]
+
+ +
s=np.arange(X_train.shape[0])
+np.random.shuffle(s)
+X_train=X_train[s]
+y_train=y_train[s]
+X_train = X_train/255.0
+
+ +

Model

+ +

Creating Model

+ +

By creating a sequential model, we create a linear stack of layers.

+ +

Note: The input shape for the first layer is 50,50 which corresponds with the sizes of the resized images

+ +
model = models.Sequential()
+model.add(layers.Conv2D(filters=16, kernel_size=2, padding='same', activation='relu', input_shape=(50,50,3)))
+model.add(layers.MaxPooling2D(pool_size=2))
+model.add(layers.Conv2D(filters=32,kernel_size=2,padding='same',activation='relu'))
+model.add(layers.MaxPooling2D(pool_size=2))
+model.add(layers.Conv2D(filters=64,kernel_size=2,padding="same",activation="relu"))
+model.add(layers.MaxPooling2D(pool_size=2))
+model.add(layers.Dropout(0.2))
+model.add(layers.Flatten())
+model.add(layers.Dense(500,activation="relu"))
+model.add(layers.Dropout(0.2))
+model.add(layers.Dense(2,activation="softmax"))#2 represent output layer neurons 
+model.summary()
+
+ +

Compiling Model

+ +

We use the Adam optimiser as it is an adaptive learning rate optimisation algorithm that's been designed specifically for training deep neural networks, which means it changes its learning rate automatically to get the best results

+ +
model.compile(optimizer="adam",
+              loss="sparse_categorical_crossentropy", 
+             metrics=["accuracy"])
+
+ +

Training Model

+ +

We train the model for 10 epochs on the training data and then validate it using the testing data

+ +
history = model.fit(X_train,y_train, epochs=10, validation_data=(X_test,y_test))
+
+ +
Train on 24803 samples, validate on 2755 samples
+Epoch 1/10
+24803/24803 [==============================] - 57s 2ms/sample - loss: 0.0786 - accuracy: 0.9729 - val_loss: 0.0000e+00 - val_accuracy: 1.0000
+Epoch 2/10
+24803/24803 [==============================] - 58s 2ms/sample - loss: 0.0746 - accuracy: 0.9731 - val_loss: 0.0290 - val_accuracy: 0.9996
+Epoch 3/10
+24803/24803 [==============================] - 58s 2ms/sample - loss: 0.0672 - accuracy: 0.9764 - val_loss: 0.0000e+00 - val_accuracy: 1.0000
+Epoch 4/10
+24803/24803 [==============================] - 58s 2ms/sample - loss: 0.0601 - accuracy: 0.9789 - val_loss: 0.0000e+00 - val_accuracy: 1.0000
+Epoch 5/10
+24803/24803 [==============================] - 58s 2ms/sample - loss: 0.0558 - accuracy: 0.9804 - val_loss: 0.0000e+00 - val_accuracy: 1.0000
+Epoch 6/10
+24803/24803 [==============================] - 57s 2ms/sample - loss: 0.0513 - accuracy: 0.9819 - val_loss: 0.0000e+00 - val_accuracy: 1.0000
+Epoch 7/10
+24803/24803 [==============================] - 58s 2ms/sample - loss: 0.0452 - accuracy: 0.9849 - val_loss: 0.3190 - val_accuracy: 0.9985
+Epoch 8/10
+24803/24803 [==============================] - 58s 2ms/sample - loss: 0.0404 - accuracy: 0.9858 - val_loss: 0.0000e+00 - val_accuracy: 1.0000
+Epoch 9/10
+24803/24803 [==============================] - 58s 2ms/sample - loss: 0.0352 - accuracy: 0.9878 - val_loss: 0.0000e+00 - val_accuracy: 1.0000
+Epoch 10/10
+24803/24803 [==============================] - 58s 2ms/sample - loss: 0.0373 - accuracy: 0.9865 - val_loss: 0.0000e+00 - val_accuracy: 1.0000
+
+ +

Results

+ +
accuracy = history.history['accuracy'][-1]*100
+loss = history.history['loss'][-1]*100
+val_accuracy = history.history['val_accuracy'][-1]*100
+val_loss = history.history['val_loss'][-1]*100
+
+print(
+    'Accuracy:', accuracy,
+    '\nLoss:', loss,
+    '\nValidation Accuracy:', val_accuracy,
+    '\nValidation Loss:', val_loss
+)
+
+ +
Accuracy: 98.64532351493835 
+Loss: 3.732407123270176 
+Validation Accuracy: 100.0 
+Validation Loss: 0.0
+
+ +

We have achieved 98% Accuracy!

+ +

Link to Colab Notebook

+]]>
+
+ + + + https://web.navan.dev/posts/2020-05-31-compiling-open-babel-on-ios.html + + + Compiling Open Babel on iOS + + + Compiling Open Babel on iOS + + https://web.navan.dev/posts/2020-05-31-compiling-open-babel-on-ios.html + Sun, 31 May 2020 23:30:00 -0000 + Compiling Open Babel on iOS + +

Due to the fact that my summer vacations started today, +I had the brilliant idea of trying to run open babel on my iPad. +To give a little background, I had tried to compile AutoDock Vina using a cross-compiler but I had miserably failed.

+ +

I am running the Checkr1n jailbreak on my iPad and the Unc0ver jailbreak on my phone.

+ +

But Why?

+ +

Well, just because I can. This is literally the only reason I tried compiling it and also partially because in the long run I want to compile AutoDock Vina so I can do Molecular Docking on the go.

+ +

Let's Go!

+ +

How hard can it be to compile open babel right? It is just a simple software with clear and concise build instructions. I just need to use cmake to build and the make to install.

+ +

It is 11 AM in the morning. I install clang, cmake and make from the Sam Bingner's repository, fired up ssh, downloaded the source code and ran the build command.`clang

+ +

Fail No. 1

+ +

I couldn't even get cmake to run, I did a little digging around StackOverflow and founf that I needed the iOS SDK, sure no problem. I waited for Xcode to update and transferred the SDKs to my iPad

+ +
scp -r /Applications/Xcode.app/Contents/Developer/Platforms/iPhoneOS.platform/Developer/SDKs/iPhoneOS.sdk root@192.168.1.8:/var/sdks/
+
+ +

Them I told cmake that this is the location for my SDK 😠. Successful! Now I just needed to use make.

+ +

Fail No. 2

+ +

It was giving the error that thread-local-storage was not supported on this device.

+ +
[  0%] Building CXX object src/CMakeFiles/openbabel.dir/alias.cpp.o
+[  1%] Building CXX object src/CMakeFiles/openbabel.dir/atom.cpp.o
+In file included from /var/root/obabel/ob-src/src/atom.cpp:28:
+In file included from /var/root/obabel/ob-src/include/openbabel/ring.h:29:
+/var/root/obabel/ob-src/include/openbabel/typer.h:70:1: error: thread-local storage is not supported for the current target
+THREAD_LOCAL OB_EXTERN OBAtomTyper      atomtyper;
+^
+/var/root/obabel/ob-src/include/openbabel/mol.h:35:24: note: expanded from macro 'THREAD_LOCAL'
+#  define THREAD_LOCAL thread_local
+                       ^
+In file included from /var/root/obabel/ob-src/src/atom.cpp:28:
+In file included from /var/root/obabel/ob-src/include/openbabel/ring.h:29:
+/var/root/obabel/ob-src/include/openbabel/typer.h:84:1: error: thread-local storage is not supported for the current target
+THREAD_LOCAL OB_EXTERN OBAromaticTyper  aromtyper;
+^
+/var/root/obabel/ob-src/include/openbabel/mol.h:35:24: note: expanded from macro 'THREAD_LOCAL'
+#  define THREAD_LOCAL thread_local
+                       ^
+/var/root/obabel/ob-src/src/atom.cpp:107:10: error: thread-local storage is not supported for the current target
+  extern THREAD_LOCAL OBAromaticTyper  aromtyper;
+         ^
+/var/root/obabel/ob-src/include/openbabel/mol.h:35:24: note: expanded from macro 'THREAD_LOCAL'
+#  define THREAD_LOCAL thread_local
+                       ^
+/var/root/obabel/ob-src/src/atom.cpp:108:10: error: thread-local storage is not supported for the current target
+  extern THREAD_LOCAL OBAtomTyper      atomtyper;
+         ^
+/var/root/obabel/ob-src/include/openbabel/mol.h:35:24: note: expanded from macro 'THREAD_LOCAL'
+#  define THREAD_LOCAL thread_local
+                       ^
+/var/root/obabel/ob-src/src/atom.cpp:109:10: error: thread-local storage is not supported for the current target
+  extern THREAD_LOCAL OBPhModel        phmodel;
+         ^
+/var/root/obabel/ob-src/include/openbabel/mol.h:35:24: note: expanded from macro 'THREAD_LOCAL'
+#  define THREAD_LOCAL thread_local
+                       ^
+5 errors generated.
+make[2]: *** [src/CMakeFiles/openbabel.dir/build.make:76: src/CMakeFiles/openbabel.dir/atom.cpp.o] Error 1
+make[1]: *** [CMakeFiles/Makefile2:1085: src/CMakeFiles/openbabel.dir/all] Error 2
+make: *** [Makefile:129: all] Error 2
+
+ +

Strange but it is alright, there is nothing that hasn't been answered on the internet.

+ +

I did a little digging around and could not find a solution 😔

+ +

As a temporary fix, I disabled multithreading by going and commenting the lines in the source code.

+ +

"Open-Babel running on my iPad"

+ +

Packaging as a deb

+ +

This was pretty straight forward, I tried installing it on my iPad and it was working pretty smoothly.

+ +

Moment of Truth

+ +

So I airdropped the .deb to my phone and tried installing it, the installation was successful but when I tried obabel it just aborted.

+ +

"Open Babel crashing"

+ +

Turns out because I had created an install target of a separate folder while compiling, the binaries were referencing a non-existing dylib rather than those in the /usr/lib folder. As a quick workaround I transferred the deb folder to my laptop and used otool and install_name tool: install_name_tool -change /var/root/obabel/ob-build/lib/libopenbabel.7.dylib /usr/lib/libopenbabel.7.dylib for all the executables and then signed them using jtool

+ +

I then installed it and everything went smoothly, I even ran obabel and it executed perfectly, showing the version number 3.1.0 ✌️ Ahh, smooth victory.

+ +

Nope. When I tried converting from SMILES to pdbqt, it gave an error saying plugin not found. This was weird.

+ +

"Open Babel Plugin Error"

+ +

So I just copied the entire build folder from my iPad to my phone and tried running it. Oops, Apple Sandbox Error, Oh no!

+ +

I spent 2 hours around this problem, only to see the documentation and realise I hadn't setup the environment variable 🤦‍♂️

+ +

The Final Fix ( For Now )

+ +
export BABEL_DATADIR="/usr/share/openbabel/3.1.0"
+export BABEL_LIBDIR="/usr/lib/openbabel/3.1.0"
+
+ +

This was the tragedy of trying to compile something without knowing enough about compiling. It is 11:30 as of writing this. Something as trivial as this should not have taken me so long. Am I going to try to compile AutoDock Vina next? 🤔 Maybe.

+ +

Also, if you want to try Open Babel on you jailbroken iDevice, install the package from my repository ( You, need to run the above mentioned final fix :p ). This was tested on iOS 13.5, I cannot tell if it will work on others or not.

+ +

Hopefully, I add some more screenshots to this post.

+ +

Edit 1: Added Screenshots, had to replicate the errors.

+]]>
+
+ + + + https://web.navan.dev/posts/2020-08-01-Natural-Feature-Tracking-ARJS.html + + + Introduction to AR.js and Natural Feature Tracking + + + An introduction to AR.js and NFT + + https://web.navan.dev/posts/2020-08-01-Natural-Feature-Tracking-ARJS.html + Sat, 01 Aug 2020 15:43:00 -0000 + Introduction to AR.js and Natural Feature Tracking + +

AR.js

+ +

AR.js is a lightweight library for Augmented Reality on the Web, coming with features like Image Tracking, Location based AR and Marker tracking. It is the easiest option for cross-browser augmented reality.

+ +

The same code works for iOS, Android, Desktops and even VR Browsers!

+ +

It was initially created by Jerome Etienne and is now maintained by Nicolo Carpignoli and the AR-js Organisation

+ +

NFT

+ +

Usually for augmented reality you need specialised markers, like this Hiro marker (notice the thick non-aesthetic borders 🤢)

+ +

+ +

This is called marker based tracking where the code knows what to look for. NFT or Natural Feature Tracing converts normal images into markers by extracting 'features' from it, this way you can use any image of your liking!

+ +

I'll be using my GitHub profile picture

+ +

+ +

Creating the Marker!

+ +

First we need to create the marker files required by AR.js for NFT. For this we use Carnaux's repository 'NFT-Marker-Creator'.

+ +
$ git clone https://github.com/Carnaux/NFT-Marker-Creator
+
+Cloning into 'NFT-Marker-Creator'...
+remote: Enumerating objects: 79, done.
+remote: Counting objects: 100% (79/79), done.
+remote: Compressing objects: 100% (72/72), done.
+remote: Total 580 (delta 10), reused 59 (delta 7), pack-reused 501
+Receiving objects: 100% (580/580), 9.88 MiB | 282.00 KiB/s, done.
+Resolving deltas: 100% (262/262), done.
+
+$ cd NFT-Makrer-Creator
+
+ +

Install the dependencies

+ +
$ npm install
+
+npm WARN nodegenerator@1.0.0 No repository field.
+
+added 67 packages from 56 contributors and audited 67 packages in 2.96s
+
+1 package is looking for funding
+  run `npm fund` for details
+
+found 0 vulnerabilities
+
+
+
+   ╭────────────────────────────────────────────────────────────────╮
+   │                                                                │
+   │      New patch version of npm available! 6.14.5 → 6.14.7       │
+   │   Changelog: https://github.com/npm/cli/releases/tag/v6.14.7   │
+   │               Run npm install -g npm to update!                │
+   │                                                                │
+   ╰────────────────────────────────────────────────────────────────╯
+
+
+
+ +

Copy the target marker to the folder

+ +
$ cp ~/CodingAndStuff/ARjs/me.png .
+
+ +

Generate Marker

+ +
$ node app.js -i me.png
+
+Confidence level: [ * * * * * ] 5/5 || Entropy: 5.24 || Current max: 5.17 min: 4.6
+
+Do you want to continue? (Y/N)
+y
+writeStringToMemory is deprecated and should not be called! Use stringToUTF8() instead!
+[info] 
+Commands: 
+[info] --
+Generator started at 2020-08-01 16:01:41 +0580
+[info] Tracking Extraction Level = 2
+[info] MAX_THRESH  = 0.900000
+[info] MIN_THRESH  = 0.550000
+[info] SD_THRESH   = 8.000000
+[info] Initialization Extraction Level = 1
+[info] SURF_FEATURE = 100
+[info]  min allow 3.699000.
+[info] Image DPI (1): 3.699000
+[info] Image DPI (2): 4.660448
+[info] Image DPI (3): 5.871797
+[info] Image DPI (4): 7.398000
+[info] Image DPI (5): 9.320896
+[info] Image DPI (6): 11.743593
+[info] Image DPI (7): 14.796000
+[info] Image DPI (8): 18.641792
+[info] Image DPI (9): 23.487186
+[info] Image DPI (10): 29.592001
+[info] Image DPI (11): 37.283585
+[info] Image DPI (12): 46.974373
+[info] Image DPI (13): 59.184002
+[info] Image DPI (14): 72.000000
+[info] Generating ImageSet...
+[info]    (Source image xsize=568, ysize=545, channels=3, dpi=72.0).
+[info]   Done.
+[info] Saving to asa.iset...
+[info]   Done.
+[info] Generating FeatureList...
+
+...
+
+[info] (46, 44) 5.871797[dpi]
+[info] Freak features - 23[info] ========= 23 ===========
+[info] (37, 35) 4.660448[dpi]
+[info] Freak features - 19[info] ========= 19 ===========
+[info] (29, 28) 3.699000[dpi]
+[info] Freak features - 9[info] ========= 9 ===========
+[info]   Done.
+[info] Saving FeatureSet3...
+[info]   Done.
+[info] Generator finished at 2020-08-01 16:02:02 +0580
+--
+
+Finished marker creation!
+Now configuring demo! 
+
+Finished!
+To run demo use: 'npm run demo'
+
+ +

Now we have the required files in the output folder

+ +
$ ls output
+
+me.fset  me.fset3 me.iset
+
+ +

Creating the HTML Page

+ +

Create a new file called index.html in your project folder. This is the basic template we are going to use. Replace me with the root filename of your image, for example NeverGonnaGiveYouUp.png will become NeverGonnaGiveYouUp. Make sure you have copied all three files from the output folder in the previous step to the root of your project folder.

+ +
<script src="https://cdn.jsdelivr.net/gh/aframevr/aframe@1c2407b26c61958baa93967b5412487cd94b290b/dist/aframe-master.min.js"></script>
+<script src="https://raw.githack.com/AR-js-org/AR.js/master/aframe/build/aframe-ar-nft.js"></script>
+
+<style>
+  .arjs-loader {
+    height: 100%;
+    width: 100%;
+    position: absolute;
+    top: 0;
+    left: 0;
+    background-color: rgba(0, 0, 0, 0.8);
+    z-index: 9999;
+    display: flex;
+    justify-content: center;
+    align-items: center;
+  }
+
+  .arjs-loader div {
+    text-align: center;
+    font-size: 1.25em;
+    color: white;
+  }
+</style>
+
+<body style="margin : 0px; overflow: hidden;">
+  <div class="arjs-loader">
+    <div>Calculating Image Descriptors....</div>
+  </div>
+  <a-scene
+    vr-mode-ui="enabled: false;"
+    renderer="logarithmicDepthBuffer: true;"
+    embedded
+    arjs="trackingMethod: best; sourceType: webcam;debugUIEnabled: false;"
+  >
+    <a-nft
+      type="nft"
+      url="./me"
+      smooth="true"
+      smoothCount="10"
+      smoothTolerance=".01"
+      smoothThreshold="5"
+    >
+
+    </a-nft>
+    <a-entity camera></a-entity>
+  </a-scene>
+</body>
+
+ +

In this we are creating a AFrame scene and we are telling it that we want to use NFT Tracking. The amazing part about using AFrame is that we are able to use all AFrame objects!

+ +

Adding a simple box

+ +

Let us add a simple box!

+ +
<a-nft .....>
+    <a-box position='100 0.5 -180' material='opacity: 0.5; side: double' scale="100 100 100"></a-box>
+</a-nft>
+
+ +

Now to test it out we will need to create a simple server, I use Python's inbuilt SimpleHTTPServer alongside ngrok

+ +

In one terminal window, cd to the project directory. Currently your project folder should have 4 files, index.html, me.fset3, me.fset and me.iset

+ +

Open up two terminal windows and cd into your project folder then run the following commands to start up your server.

+ +

In the first terminal window start the Python Server

+ +
$ cd ~/CodingAndStuff/ARjs
+$ python2 -m SimpleHTTPServer
+
+Serving HTTP on 0.0.0.0 port 8000 ...
+
+
+ +

In the other window run ngrok ( Make sure you have installed it prior to running this step )

+ +
$ ngrok http 8000
+
+
+ +

+ +

Now copy the url to your phone and try running the example

+ +

+ +

👏 Congratulations! You just built an Augmented Reality experience using AR.js and AFrame

+ +

Adding a Torus-Knot in the box

+ +

Edit your index.html

+ +
<a-nft ..>
+    <a-box ..>
+        <a-torus-knot radius='0.26' radius-tubular='0.05' ></a-torus-knot>
+    </ a-box>
+</ a-nft>
+
+ +

+ +

Where are the GIFs?

+ +

Now that we know how to place a box in the scene and add a torus knot in it, what do we do next? We bring the classic internet back!

+ +

AFrame GIF Shader is a gif shader for A-Frame created by mayognaise.

+ +

First things first

+ +

Add <script src="https://rawgit.com/mayognaise/aframe-gif-shader/master/dist/aframe-gif-shader.min.js"></script> to <head>

+ +

Change the box's material to add the GIF shader

+ +
...
+<a-box position='100 0.5 -180' material="shader:gif;src:url(https://media.tenor.com/images/412b1aa9149d98d561df62db221e0789/tenor.gif);opacity:.5" .....>
+
+ +

+ +

Bonus Idea: Integrate it with GitHub's new profile Readme Feature!

+ +

1) Host the code using GitHub Pages

+ +

2) Create a new repository ( the name should be your GitHub username )

+ +

3) Add QR Code to the page and tell the users to scan your profile picture

+ +

??) Profit 💸

+ +

Here is a screenshot of me scanning a rounded version of my profile picture ( It still works! Even though the image is cropped and I haven't changed any line of code )

+ +

+]]>
+
+ + + + https://web.navan.dev/posts/2020-03-03-Playing-With-Android-TV.html + + + Tinkering with an Android TV + + + Tinkering with an Android TV + + https://web.navan.dev/posts/2020-03-03-Playing-With-Android-TV.html + Tue, 03 Mar 2020 18:37:00 -0000 + Tinkering with an Android TV + +

So I have an Android TV, this posts covers everything I have tried on it

+ +

Contents

+ +
    +
  1. Getting TV's IP Address
  2. +
  3. Enable Developer Settings
  4. +
  5. Enable ADB
  6. +
  7. Connect ADB
  8. +
  9. Manipulating Packages
  10. +
+ +

IP-Address

+ +

These steps should be similar for all Android-TVs

+ +
    +
  • Go To Settings
  • +
  • Go to Network
  • +
  • Advanced Settings
  • +
  • Network Status
  • +
  • Note Down IP-Address
  • +
+ +

The other option is to go to your router's server page and get connected devices

+ +

Developer-Settings

+ +
    +
  • Go To Settings
  • +
  • About
  • +
  • Continously click on the "Build" option until it says "You are a Developer"
  • +
+ +

Enable-ADB

+ +
    +
  • Go to Settings
  • +
  • Go to Developer Options
  • +
  • Scroll untill you find ADB Debugging and enable that option
  • +
+ +

Connect-ADB

+ +
    +
  • Open Terminal (Make sure you have ADB installed)
  • +
  • Enter the following command adb connect <IP_ADDRESS>
  • +
  • To test the connection run adb logcat
  • +
+ +

Manipulating Apps / Packages

+ +

Listing Packages

+ +
    +
  • adb shell
  • +
  • pm list packages
  • +
+ +

Installing Packages

+ +
    +
  • adb install -r package.apk
  • +
+ +

Uninstalling Packages

+ +
    +
  • adb uninstall com.company.yourpackagename
  • +
+]]>
+
+ + + + https://web.navan.dev/posts/2019-12-16-TensorFlow-Polynomial-Regression.html + + + Polynomial Regression Using TensorFlow + + + Polynomial regression using TensorFlow + + https://web.navan.dev/posts/2019-12-16-TensorFlow-Polynomial-Regression.html + Mon, 16 Dec 2019 14:16:00 -0000 + Polynomial Regression Using TensorFlow + +

In this tutorial you will learn about polynomial regression and how you can implement it in Tensorflow.

+ +

In this, we will be performing polynomial regression using 5 types of equations -

+ +
    +
  • Linear
  • +
  • Quadratic
  • +
  • Cubic
  • +
  • Quartic
  • +
  • Quintic
  • +
+ +

Regression

+ +

What is Regression?

+ +

Regression is a statistical measurement that is used to try to determine the relationship between a +dependent variable (often denoted by Y), and series of varying variables (called independent variables, often denoted by X ).

+ +

What is Polynomial Regression

+ +

This is a form of Regression Analysis where the relationship between Y and X is denoted as the nth degree/power of X. +Polynomial regression even fits a non-linear relationship (e.g when the points don't form a straight line).

+ +

Imports

+ +
import tensorflow.compat.v1 as tf
+tf.disable_v2_behavior()
+import matplotlib.pyplot as plt
+import numpy as np
+import pandas as pd
+
+ +

Dataset

+ +

Creating Random Data

+ +

Even though in this tutorial we will use a Position Vs Salary dataset, it is important to know how to create synthetic data

+ +

To create 50 values spaced evenly between 0 and 50, we use NumPy's linspace function

+ +

linspace(lower_limit, upper_limit, no_of_observations)

+ +
x = np.linspace(0, 50, 50)
+y = np.linspace(0, 50, 50)
+
+ +

We use the following function to add noise to the data, so that our values

+ +
x += np.random.uniform(-4, 4, 50)
+y += np.random.uniform(-4, 4, 50)
+
+ +

Position vs Salary Dataset

+ +

We will be using https://drive.google.com/file/d/1tNL4jxZEfpaP4oflfSn6pIHJX7Pachm9/view (Salary vs Position Dataset)

+ +
!wget --no-check-certificate 'https://docs.google.com/uc?export=download&id=1tNL4jxZEfpaP4oflfSn6pIHJX7Pachm9' -O data.csv
+
+ +
df = pd.read_csv("data.csv")
+
+ +
df # this gives us a preview of the dataset we are working with
+
+ +
| Position          | Level | Salary  |
+|-------------------|-------|---------|
+| Business Analyst  | 1     | 45000   |
+| Junior Consultant | 2     | 50000   |
+| Senior Consultant | 3     | 60000   |
+| Manager           | 4     | 80000   |
+| Country Manager   | 5     | 110000  |
+| Region Manager    | 6     | 150000  |
+| Partner           | 7     | 200000  |
+| Senior Partner    | 8     | 300000  |
+| C-level           | 9     | 500000  |
+| CEO               | 10    | 1000000 |
+
+ +

We convert the salary column as the ordinate (y-coordinate) and level column as the abscissa

+ +
abscissa = df["Level"].to_list() # abscissa = [1,2,3,4,5,6,7,8,9,10]
+ordinate = df["Salary"].to_list() # ordinate = [45000,50000,60000,80000,110000,150000,200000,300000,500000,1000000]
+
+ +
n = len(abscissa) # no of observations
+plt.scatter(abscissa, ordinate)
+plt.ylabel('Salary')
+plt.xlabel('Position')
+plt.title("Salary vs Position")
+plt.show()
+
+ +

+ +

Defining Stuff

+ +
X = tf.placeholder("float")
+Y = tf.placeholder("float")
+
+ +

Defining Variables

+ +

We first define all the coefficients and constant as tensorflow variables having a random initial value

+ +
a = tf.Variable(np.random.randn(), name = "a")
+b = tf.Variable(np.random.randn(), name = "b")
+c = tf.Variable(np.random.randn(), name = "c")
+d = tf.Variable(np.random.randn(), name = "d")
+e = tf.Variable(np.random.randn(), name = "e")
+f = tf.Variable(np.random.randn(), name = "f")
+
+ +

Model Configuration

+ +
learning_rate = 0.2
+no_of_epochs = 25000
+
+ +

Equations

+ +
deg1 = a*X + b
+deg2 = a*tf.pow(X,2) + b*X + c
+deg3 = a*tf.pow(X,3) + b*tf.pow(X,2) + c*X + d
+deg4 = a*tf.pow(X,4) + b*tf.pow(X,3) + c*tf.pow(X,2) + d*X + e
+deg5 = a*tf.pow(X,5) + b*tf.pow(X,4) + c*tf.pow(X,3) + d*tf.pow(X,2) + e*X + f
+
+ +

Cost Function

+ +

We use the Mean Squared Error Function

+ +
mse1 = tf.reduce_sum(tf.pow(deg1-Y,2))/(2*n)
+mse2 = tf.reduce_sum(tf.pow(deg2-Y,2))/(2*n)
+mse3 = tf.reduce_sum(tf.pow(deg3-Y,2))/(2*n)
+mse4 = tf.reduce_sum(tf.pow(deg4-Y,2))/(2*n)
+mse5 = tf.reduce_sum(tf.pow(deg5-Y,2))/(2*n)
+
+ +

Optimizer

+ +

We use the AdamOptimizer for the polynomial functions and GradientDescentOptimizer for the linear function

+ +
optimizer1 = tf.train.GradientDescentOptimizer(learning_rate).minimize(mse1)
+optimizer2 = tf.train.AdamOptimizer(learning_rate).minimize(mse2)
+optimizer3 = tf.train.AdamOptimizer(learning_rate).minimize(mse3)
+optimizer4 = tf.train.AdamOptimizer(learning_rate).minimize(mse4)
+optimizer5 = tf.train.AdamOptimizer(learning_rate).minimize(mse5)
+
+ +
init=tf.global_variables_initializer()
+
+ +

Model Predictions

+ +

For each type of equation first we make the model predict the values of the coefficient(s) and constant, once we get these values we use it to predict the Y +values using the X values. We then plot it to compare the actual data and predicted line.

+ +

Linear Equation

+ +
with tf.Session() as sess:
+    sess.run(init)
+    for epoch in range(no_of_epochs):
+      for (x,y) in zip(abscissa, ordinate):
+        sess.run(optimizer1, feed_dict={X:x, Y:y})
+      if (epoch+1)%1000==0:
+        cost = sess.run(mse1,feed_dict={X:abscissa,Y:ordinate})
+        print("Epoch",(epoch+1), ": Training Cost:", cost," a,b:",sess.run(a),sess.run(b))
+
+        training_cost = sess.run(mse1,feed_dict={X:abscissa,Y:ordinate})
+        coefficient1 = sess.run(a)
+        constant = sess.run(b)
+
+print(training_cost, coefficient1, constant)
+
+ +
Epoch 1000 : Training Cost: 88999125000.0  a,b: 180396.42 -478869.12
+Epoch 2000 : Training Cost: 88999125000.0  a,b: 180396.42 -478869.12
+Epoch 3000 : Training Cost: 88999125000.0  a,b: 180396.42 -478869.12
+Epoch 4000 : Training Cost: 88999125000.0  a,b: 180396.42 -478869.12
+Epoch 5000 : Training Cost: 88999125000.0  a,b: 180396.42 -478869.12
+Epoch 6000 : Training Cost: 88999125000.0  a,b: 180396.42 -478869.12
+Epoch 7000 : Training Cost: 88999125000.0  a,b: 180396.42 -478869.12
+Epoch 8000 : Training Cost: 88999125000.0  a,b: 180396.42 -478869.12
+Epoch 9000 : Training Cost: 88999125000.0  a,b: 180396.42 -478869.12
+Epoch 10000 : Training Cost: 88999125000.0  a,b: 180396.42 -478869.12
+Epoch 11000 : Training Cost: 88999125000.0  a,b: 180396.42 -478869.12
+Epoch 12000 : Training Cost: 88999125000.0  a,b: 180396.42 -478869.12
+Epoch 13000 : Training Cost: 88999125000.0  a,b: 180396.42 -478869.12
+Epoch 14000 : Training Cost: 88999125000.0  a,b: 180396.42 -478869.12
+Epoch 15000 : Training Cost: 88999125000.0  a,b: 180396.42 -478869.12
+Epoch 16000 : Training Cost: 88999125000.0  a,b: 180396.42 -478869.12
+Epoch 17000 : Training Cost: 88999125000.0  a,b: 180396.42 -478869.12
+Epoch 18000 : Training Cost: 88999125000.0  a,b: 180396.42 -478869.12
+Epoch 19000 : Training Cost: 88999125000.0  a,b: 180396.42 -478869.12
+Epoch 20000 : Training Cost: 88999125000.0  a,b: 180396.42 -478869.12
+Epoch 21000 : Training Cost: 88999125000.0  a,b: 180396.42 -478869.12
+Epoch 22000 : Training Cost: 88999125000.0  a,b: 180396.42 -478869.12
+Epoch 23000 : Training Cost: 88999125000.0  a,b: 180396.42 -478869.12
+Epoch 24000 : Training Cost: 88999125000.0  a,b: 180396.42 -478869.12
+Epoch 25000 : Training Cost: 88999125000.0  a,b: 180396.42 -478869.12
+88999125000.0 180396.42 -478869.12
+
+ +
predictions = []
+for x in abscissa:
+  predictions.append((coefficient1*x + constant))
+plt.plot(abscissa , ordinate, 'ro', label ='Original data')
+plt.plot(abscissa, predictions, label ='Fitted line')
+plt.title('Linear Regression Result')
+plt.legend()
+plt.show()
+
+ +

+ +

Quadratic Equation

+ +
with tf.Session() as sess:
+    sess.run(init)
+    for epoch in range(no_of_epochs):
+      for (x,y) in zip(abscissa, ordinate):
+        sess.run(optimizer2, feed_dict={X:x, Y:y})
+      if (epoch+1)%1000==0:
+        cost = sess.run(mse2,feed_dict={X:abscissa,Y:ordinate})
+        print("Epoch",(epoch+1), ": Training Cost:", cost," a,b,c:",sess.run(a),sess.run(b),sess.run(c))
+
+        training_cost = sess.run(mse2,feed_dict={X:abscissa,Y:ordinate})
+        coefficient1 = sess.run(a)
+        coefficient2 = sess.run(b)
+        constant = sess.run(c)
+
+print(training_cost, coefficient1, coefficient2, constant)
+
+ +
Epoch 1000 : Training Cost: 52571360000.0  a,b,c: 1002.4456 1097.0197 1276.6921
+Epoch 2000 : Training Cost: 37798890000.0  a,b,c: 1952.4263 2130.2825 2469.7756
+Epoch 3000 : Training Cost: 26751185000.0  a,b,c: 2839.5825 3081.6118 3554.351
+Epoch 4000 : Training Cost: 19020106000.0  a,b,c: 3644.56 3922.9563 4486.3135
+Epoch 5000 : Training Cost: 14060446000.0  a,b,c: 4345.042 4621.4233 5212.693
+Epoch 6000 : Training Cost: 11201084000.0  a,b,c: 4921.1855 5148.1504 5689.0713
+Epoch 7000 : Training Cost: 9732740000.0  a,b,c: 5364.764 5493.0156 5906.754
+Epoch 8000 : Training Cost: 9050918000.0  a,b,c: 5685.4067 5673.182 5902.0728
+Epoch 9000 : Training Cost: 8750394000.0  a,b,c: 5906.9814 5724.8906 5734.746
+Epoch 10000 : Training Cost: 8613128000.0  a,b,c: 6057.3677 5687.3364 5461.167
+Epoch 11000 : Training Cost: 8540034600.0  a,b,c: 6160.547 5592.3022 5122.8633
+Epoch 12000 : Training Cost: 8490983000.0  a,b,c: 6233.9175 5462.025 4747.111
+Epoch 13000 : Training Cost: 8450816500.0  a,b,c: 6289.048 5310.7583 4350.6997
+Epoch 14000 : Training Cost: 8414082000.0  a,b,c: 6333.199 5147.394 3943.9294
+Epoch 15000 : Training Cost: 8378841600.0  a,b,c: 6370.7944 4977.1704 3532.476
+Epoch 16000 : Training Cost: 8344471000.0  a,b,c: 6404.468 4803.542 3120.2087
+Epoch 17000 : Training Cost: 8310785500.0  a,b,c: 6435.365 4628.1523 2709.1445
+Epoch 18000 : Training Cost: 8277482000.0  a,b,c: 6465.5493 4451.833 2300.2783
+Epoch 19000 : Training Cost: 8244650000.0  a,b,c: 6494.609 4274.826 1894.3738
+Epoch 20000 : Training Cost: 8212349000.0  a,b,c: 6522.8247 4098.1733 1491.9915
+Epoch 21000 : Training Cost: 8180598300.0  a,b,c: 6550.6567 3922.7405 1093.3868
+Epoch 22000 : Training Cost: 8149257700.0  a,b,c: 6578.489 3747.8362 698.53357
+Epoch 23000 : Training Cost: 8118325000.0  a,b,c: 6606.1973 3573.2742 307.3541
+Epoch 24000 : Training Cost: 8088001000.0  a,b,c: 6632.96 3399.878 -79.89219
+Epoch 25000 : Training Cost: 8058094600.0  a,b,c: 6659.793 3227.2517 -463.03156
+8058094600.0 6659.793 3227.2517 -463.03156
+
+ +
predictions = []
+for x in abscissa:
+  predictions.append((coefficient1*pow(x,2) + coefficient2*x + constant))
+plt.plot(abscissa , ordinate, 'ro', label ='Original data')
+plt.plot(abscissa, predictions, label ='Fitted line')
+plt.title('Quadratic Regression Result')
+plt.legend()
+plt.show()
+
+ +

+ +

Cubic

+ +
with tf.Session() as sess:
+    sess.run(init)
+    for epoch in range(no_of_epochs):
+      for (x,y) in zip(abscissa, ordinate):
+        sess.run(optimizer3, feed_dict={X:x, Y:y})
+      if (epoch+1)%1000==0:
+        cost = sess.run(mse3,feed_dict={X:abscissa,Y:ordinate})
+        print("Epoch",(epoch+1), ": Training Cost:", cost," a,b,c,d:",sess.run(a),sess.run(b),sess.run(c),sess.run(d))
+
+        training_cost = sess.run(mse3,feed_dict={X:abscissa,Y:ordinate})
+        coefficient1 = sess.run(a)
+        coefficient2 = sess.run(b)
+        coefficient3 = sess.run(c)
+        constant = sess.run(d)
+
+print(training_cost, coefficient1, coefficient2, coefficient3, constant)
+
+ +
Epoch 1000 : Training Cost: 4279814000.0  a,b,c,d: 670.1527 694.4212 751.4653 903.9527
+Epoch 2000 : Training Cost: 3770950400.0  a,b,c,d: 742.6414 666.3489 636.94525 859.2088
+Epoch 3000 : Training Cost: 3717708300.0  a,b,c,d: 756.2582 569.3339 448.105 748.23956
+Epoch 4000 : Training Cost: 3667464000.0  a,b,c,d: 769.4476 474.0318 265.5761 654.75525
+Epoch 5000 : Training Cost: 3620040700.0  a,b,c,d: 782.32324 380.54272 89.39888 578.5136
+Epoch 6000 : Training Cost: 3575265800.0  a,b,c,d: 794.8898 288.83356 -80.5215 519.13654
+Epoch 7000 : Training Cost: 3532972000.0  a,b,c,d: 807.1608 198.87044 -244.31102 476.2061
+Epoch 8000 : Training Cost: 3493009200.0  a,b,c,d: 819.13513 110.64169 -402.0677 449.3291
+Epoch 9000 : Training Cost: 3455228400.0  a,b,c,d: 830.80255 24.0964 -553.92804 438.0652
+Epoch 10000 : Training Cost: 3419475500.0  a,b,c,d: 842.21594 -60.797424 -700.0123 441.983
+Epoch 11000 : Training Cost: 3385625300.0  a,b,c,d: 853.3363 -144.08699 -840.467 460.6356
+Epoch 12000 : Training Cost: 3353544700.0  a,b,c,d: 864.19135 -225.8125 -975.4196 493.57703
+Epoch 13000 : Training Cost: 3323125000.0  a,b,c,d: 874.778 -305.98932 -1104.9867 540.39465
+Epoch 14000 : Training Cost: 3294257000.0  a,b,c,d: 885.1007 -384.63474 -1229.277 600.65607
+Epoch 15000 : Training Cost: 3266820000.0  a,b,c,d: 895.18823 -461.819 -1348.4417 673.9051
+Epoch 16000 : Training Cost: 3240736000.0  a,b,c,d: 905.0128 -537.541 -1462.6171 759.7118
+Epoch 17000 : Training Cost: 3215895000.0  a,b,c,d: 914.60065 -611.8676 -1571.9058 857.6638
+Epoch 18000 : Training Cost: 3192216800.0  a,b,c,d: 923.9603 -684.8093 -1676.4642 967.30475
+Epoch 19000 : Training Cost: 3169632300.0  a,b,c,d: 933.08594 -756.3582 -1776.4275 1088.2198
+Epoch 20000 : Training Cost: 3148046300.0  a,b,c,d: 941.9928 -826.6257 -1871.9355 1219.9702
+Epoch 21000 : Training Cost: 3127394800.0  a,b,c,d: 950.67896 -895.6205 -1963.0989 1362.1665
+Epoch 22000 : Training Cost: 3107608600.0  a,b,c,d: 959.1487 -963.38116 -2050.0586 1514.4026
+Epoch 23000 : Training Cost: 3088618200.0  a,b,c,d: 967.4355 -1029.9625 -2132.961 1676.2717
+Epoch 24000 : Training Cost: 3070361300.0  a,b,c,d: 975.52875 -1095.4292 -2211.854 1847.4485
+Epoch 25000 : Training Cost: 3052791300.0  a,b,c,d: 983.4346 -1159.7922 -2286.9412 2027.4857
+3052791300.0 983.4346 -1159.7922 -2286.9412 2027.4857
+
+ +
predictions = []
+for x in abscissa:
+  predictions.append((coefficient1*pow(x,3) + coefficient2*pow(x,2) + coefficient3*x + constant))
+plt.plot(abscissa , ordinate, 'ro', label ='Original data')
+plt.plot(abscissa, predictions, label ='Fitted line')
+plt.title('Cubic Regression Result')
+plt.legend()
+plt.show()
+
+ +

+ +

Quartic

+ +
with tf.Session() as sess:
+    sess.run(init)
+    for epoch in range(no_of_epochs):
+      for (x,y) in zip(abscissa, ordinate):
+        sess.run(optimizer4, feed_dict={X:x, Y:y})
+      if (epoch+1)%1000==0:
+        cost = sess.run(mse4,feed_dict={X:abscissa,Y:ordinate})
+        print("Epoch",(epoch+1), ": Training Cost:", cost," a,b,c,d:",sess.run(a),sess.run(b),sess.run(c),sess.run(d),sess.run(e))
+
+        training_cost = sess.run(mse4,feed_dict={X:abscissa,Y:ordinate})
+        coefficient1 = sess.run(a)
+        coefficient2 = sess.run(b)
+        coefficient3 = sess.run(c)
+        coefficient4 = sess.run(d)
+        constant = sess.run(e)
+
+print(training_cost, coefficient1, coefficient2, coefficient3, coefficient4, constant)
+
+ +
Epoch 1000 : Training Cost: 1902632600.0  a,b,c,d: 84.48304 52.210594 54.791424 142.51952 512.0343
+Epoch 2000 : Training Cost: 1854316200.0  a,b,c,d: 88.998955 13.073557 14.276088 223.55667 1056.4655
+Epoch 3000 : Training Cost: 1812812400.0  a,b,c,d: 92.9462 -22.331177 -15.262934 327.41858 1634.9054
+Epoch 4000 : Training Cost: 1775716000.0  a,b,c,d: 96.42522 -54.64535 -35.829437 449.5028 2239.1392
+Epoch 5000 : Training Cost: 1741494100.0  a,b,c,d: 99.524734 -84.43976 -49.181057 585.85876 2862.4915
+Epoch 6000 : Training Cost: 1709199600.0  a,b,c,d: 102.31984 -112.19895 -56.808075 733.1876 3499.6199
+Epoch 7000 : Training Cost: 1678261800.0  a,b,c,d: 104.87324 -138.32709 -59.9442 888.79626 4146.2944
+Epoch 8000 : Training Cost: 1648340600.0  a,b,c,d: 107.23536 -163.15173 -59.58964 1050.524 4798.979
+Epoch 9000 : Training Cost: 1619243400.0  a,b,c,d: 109.44742 -186.9409 -56.53944 1216.6432 5454.9463
+Epoch 10000 : Training Cost: 1590821900.0  a,b,c,d: 111.54233 -209.91287 -51.423084 1385.8513 6113.5137
+Epoch 11000 : Training Cost: 1563042200.0  a,b,c,d: 113.54405 -232.21953 -44.73371 1557.1084 6771.7046
+Epoch 12000 : Training Cost: 1535855600.0  a,b,c,d: 115.471565 -253.9838 -36.851135 1729.535 7429.069
+Epoch 13000 : Training Cost: 1509255300.0  a,b,c,d: 117.33939 -275.29697 -28.0714 1902.5308 8083.9634
+Epoch 14000 : Training Cost: 1483227000.0  a,b,c,d: 119.1605 -296.2472 -18.618649 2075.6094 8735.381
+Epoch 15000 : Training Cost: 1457726700.0  a,b,c,d: 120.94584 -316.915 -8.650095 2248.3247 9384.197
+Epoch 16000 : Training Cost: 1432777300.0  a,b,c,d: 122.69806 -337.30704 1.7027153 2420.5771 10028.871
+Epoch 17000 : Training Cost: 1408365000.0  a,b,c,d: 124.42179 -357.45245 12.33499 2592.2983 10669.157
+Epoch 18000 : Training Cost: 1384480000.0  a,b,c,d: 126.12332 -377.39734 23.168756 2763.0933 11305.027
+Epoch 19000 : Training Cost: 1361116800.0  a,b,c,d: 127.80568 -397.16415 34.160156 2933.0452 11935.669
+Epoch 20000 : Training Cost: 1338288100.0  a,b,c,d: 129.4674 -416.72803 45.259155 3101.7727 12561.179
+Epoch 21000 : Training Cost: 1315959700.0  a,b,c,d: 131.11403 -436.14285 56.4436 3269.3142 13182.058
+Epoch 22000 : Training Cost: 1294164700.0  a,b,c,d: 132.74377 -455.3779 67.6757 3435.3833 13796.807
+Epoch 23000 : Training Cost: 1272863600.0  a,b,c,d: 134.35779 -474.45316 78.96117 3600.264 14406.58
+Epoch 24000 : Training Cost: 1252052600.0  a,b,c,d: 135.9583 -493.38254 90.268616 3764.0078 15010.481
+Epoch 25000 : Training Cost: 1231713700.0  a,b,c,d: 137.54753 -512.1876 101.59372 3926.4897 15609.368
+1231713700.0 137.54753 -512.1876 101.59372 3926.4897 15609.368
+
+ +
predictions = []
+for x in abscissa:
+  predictions.append((coefficient1*pow(x,4) + coefficient2*pow(x,3) + coefficient3*pow(x,2) + coefficient4*x + constant))
+plt.plot(abscissa , ordinate, 'ro', label ='Original data')
+plt.plot(abscissa, predictions, label ='Fitted line')
+plt.title('Quartic Regression Result')
+plt.legend()
+plt.show()
+
+ +

+ +

Quintic

+ +
with tf.Session() as sess:
+    sess.run(init)
+    for epoch in range(no_of_epochs):
+      for (x,y) in zip(abscissa, ordinate):
+        sess.run(optimizer5, feed_dict={X:x, Y:y})
+      if (epoch+1)%1000==0:
+        cost = sess.run(mse5,feed_dict={X:abscissa,Y:ordinate})
+        print("Epoch",(epoch+1), ": Training Cost:", cost," a,b,c,d,e,f:",sess.run(a),sess.run(b),sess.run(c),sess.run(d),sess.run(e),sess.run(f))
+
+        training_cost = sess.run(mse5,feed_dict={X:abscissa,Y:ordinate})
+        coefficient1 = sess.run(a)
+        coefficient2 = sess.run(b)
+        coefficient3 = sess.run(c)
+        coefficient4 = sess.run(d)
+        coefficient5 = sess.run(e)
+        constant = sess.run(f)
+
+ +
Epoch 1000 : Training Cost: 1409200100.0  a,b,c,d,e,f: 7.949472 7.46219 55.626034 184.29028 484.00223 1024.0083
+Epoch 2000 : Training Cost: 1306882400.0  a,b,c,d,e,f: 8.732181 -4.0085897 73.25298 315.90103 904.08887 2004.9749
+Epoch 3000 : Training Cost: 1212606000.0  a,b,c,d,e,f: 9.732249 -16.90125 86.28379 437.06552 1305.055 2966.2188
+Epoch 4000 : Training Cost: 1123640400.0  a,b,c,d,e,f: 10.74851 -29.82692 98.59997 555.331 1698.4631 3917.9155
+Epoch 5000 : Training Cost: 1039694300.0  a,b,c,d,e,f: 11.75426 -42.598194 110.698326 671.64355 2085.5513 4860.8535
+Epoch 6000 : Training Cost: 960663550.0  a,b,c,d,e,f: 12.745439 -55.18337 122.644936 786.00214 2466.1638 5794.3735
+Epoch 7000 : Training Cost: 886438340.0  a,b,c,d,e,f: 13.721028 -67.57168 134.43822 898.3691 2839.9958 6717.659
+Epoch 8000 : Training Cost: 816913100.0  a,b,c,d,e,f: 14.679965 -79.75113 146.07385 1008.66895 3206.6692 7629.812
+Epoch 9000 : Training Cost: 751971500.0  a,b,c,d,e,f: 15.62181 -91.71608 157.55713 1116.7715 3565.8323 8529.976
+Epoch 10000 : Training Cost: 691508740.0  a,b,c,d,e,f: 16.545347 -103.4531 168.88321 1222.6348 3916.9785 9416.236
+Epoch 11000 : Training Cost: 635382000.0  a,b,c,d,e,f: 17.450052 -114.954254 180.03932 1326.1565 4259.842 10287.99
+Epoch 12000 : Training Cost: 583477250.0  a,b,c,d,e,f: 18.334944 -126.20821 191.02948 1427.2095 4593.8 11143.449
+Epoch 13000 : Training Cost: 535640400.0  a,b,c,d,e,f: 19.198917 -137.20206 201.84718 1525.6926 4918.5327 11981.633
+Epoch 14000 : Training Cost: 491722240.0  a,b,c,d,e,f: 20.041153 -147.92719 212.49709 1621.5496 5233.627 12800.468
+Epoch 15000 : Training Cost: 451559520.0  a,b,c,d,e,f: 20.860966 -158.37456 222.97133 1714.7141 5538.676 13598.337
+Epoch 16000 : Training Cost: 414988960.0  a,b,c,d,e,f: 21.657421 -168.53406 233.27422 1805.0874 5833.1978 14373.658
+Epoch 17000 : Training Cost: 381837920.0  a,b,c,d,e,f: 22.429693 -178.39536 243.39914 1892.5883 6116.847 15124.394
+Epoch 18000 : Training Cost: 351931300.0  a,b,c,d,e,f: 23.176882 -187.94789 253.3445 1977.137 6389.117 15848.417
+Epoch 19000 : Training Cost: 325074400.0  a,b,c,d,e,f: 23.898485 -197.18741 263.12512 2058.6716 6649.8037 16543.95
+Epoch 20000 : Training Cost: 301073570.0  a,b,c,d,e,f: 24.593851 -206.10497 272.72385 2137.1797 6898.544 17209.367
+Epoch 21000 : Training Cost: 279727000.0  a,b,c,d,e,f: 25.262104 -214.69217 282.14642 2212.6372 7135.217 17842.854
+Epoch 22000 : Training Cost: 260845550.0  a,b,c,d,e,f: 25.903376 -222.94969 291.4003 2284.9844 7359.4644 18442.408
+Epoch 23000 : Training Cost: 244218030.0  a,b,c,d,e,f: 26.517094 -230.8697 300.45532 2354.3003 7571.261 19007.49
+Epoch 24000 : Training Cost: 229660080.0  a,b,c,d,e,f: 27.102589 -238.44817 309.35342 2420.4185 7770.5728 19536.19
+Epoch 25000 : Training Cost: 216972400.0  a,b,c,d,e,f: 27.660324 -245.69016 318.10062 2483.3608 7957.354 20027.707
+216972400.0 27.660324 -245.69016 318.10062 2483.3608 7957.354 20027.707
+
+ +
predictions = []
+for x in abscissa:
+  predictions.append((coefficient1*pow(x,5) + coefficient2*pow(x,4) + coefficient3*pow(x,3) + coefficient4*pow(x,2) + coefficient5*x + constant))
+plt.plot(abscissa , ordinate, 'ro', label ='Original data')
+plt.plot(abscissa, predictions, label ='Fitted line')
+plt.title('Quintic Regression Result')
+plt.legend()
+plt.show()
+
+ +

+ +

Results and Conclusion

+ +

You just learnt Polynomial Regression using TensorFlow!

+ +

Notes

+ +

Overfitting

+ +
+
+

Overfitting refers to a model that models the training data too well. + Overfitting happens when a model learns the detail and noise in the training data to the extent that it negatively impacts the performance of the model on new data. This means that the noise or random fluctuations in the training data is picked up and learned as concepts by the model. The problem is that these concepts do not apply to new data and negatively impact the models ability to generalise.

+
+
+ +
+

Source: Machine Learning Mastery

+
+ +

Basically if you train your machine learning model on a small dataset for a really large number of epochs, the model will learn all the deformities/noise in the data and will actually think that it is a normal part. Therefore when it will see some new data, it will discard that new data as noise and will impact the accuracy of the model in a negative manner

+]]>
+
+ + + + https://web.navan.dev/posts/2019-12-04-Google-Teachable-Machines.html + + + Image Classifier With Teachable Machines + + + Tutorial on creating a custom image classifier quickly with Google Teachable Machines + + https://web.navan.dev/posts/2019-12-04-Google-Teachable-Machines.html + Wed, 04 Dec 2019 18:23:00 -0000 + Image Classifier With Teachable Machines + +

Made for Google Code-In

+ +

Task Description

+ +

Using Glitch and the Teachable Machines, build a Book Detector with Tensorflow.js. When a book is recognized, the code would randomly suggest a book/tell a famous quote from a book. Here is an example Project to get you started: https://glitch.com/~voltaic-acorn

+ +

Details

+ +

1) Collecting Data

+ +

Teachable Machine allows you to create your dataset just by using your webcam! I created a database consisting of three classes ( Three Books ) and approximately grabbed 100 pictures for each book/class

+ +

+ +

2) Training

+ +

Training on teachable machines is as simple as clicking the train button. I did not even have to modify any configurations.

+ +

+ +

3) Finding Labels

+ +

Because I originally entered the entire name of the book and it's author's name as the label, the class name got truncated (Note to self, use shorter class names :p ). I then modified the code to print the modified label names in an alert box.

+ +

+ +

+ +

4) Adding a suggestions function

+ +

I first added a text field on the main page and then modified the JavaScript file to suggest a similar book whenever the model predicted with an accuracy >= 98%

+ +

+ +

+ +

5) Running!

+ +

Here it is running!

+ +

+ +

+ +

Remix this project:-

+ +

https://luminous-opinion.glitch.me

+]]>
+
+ + + + https://web.navan.dev/posts/2019-05-05-Custom-Snowboard-Anemone-Theme.html + + + Creating your own custom theme for Snowboard or Anemone + + + Tutorial on creating your own custom theme for Snowboard or Anemone + + https://web.navan.dev/posts/2019-05-05-Custom-Snowboard-Anemone-Theme.html + Sun, 05 May 2019 12:34:00 -0000 + Creating your own custom theme for Snowboard or Anemone + +

Contents

+ +
    +
  • Getting Started
  • +
  • Theme Configuration
  • +
  • Creating Icons
  • +
  • Exporting Icons
  • +
  • Icon Masks
  • +
  • Packaging
  • +
  • Building the DEB
  • +
+ +

Getting Started

+ +

Note: Without the proper folder structure, your theme may not show up!

+ +
    +
  • Create a new folder called themeName.theme (Replace themeName with your desired theme name)
  • +
  • Within themeName.theme folder, create another folder called IconBundles (You cannot change this name)
  • +
+ +

Theme Configuration

+ +
    +
  • Now, inside the themeName.theme folder, create a file called Info.plist and paste the following
  • +
+ +
<?xml version="1.0" encoding="UTF-8"?>
+<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
+  <plist version="1.0">
+  <dict>
+    <key>PackageName</key>
+    <string>ThemeName</string>
+    <key>ThemeType</key>
+    <string>Icons</string>
+  </dict>
+</plist>
+
+ +
    +
  • Replace PackageName with the name of the Package and replace ThemeName with the Theme Name
  • +
+ +

Now, you might ask what is the difference between PackageName and ThemeName?

+ +

Well, if for example you want to publish two variants of your icons, one dark and one white but you do not want the user to seperately install them. +Then, you would name the package MyTheme and include two themes Blackie and White thus creating two entries. More about this in the end

+ +

Creating Icons

+ +
    +
  • Open up the Image Editor of your choice and create a new file having a resolution of 512x512
  • +
+ +

Note: Due to IconBundles, we just need to create the icons in one size and they get resized automatically :ghost:

+ +

Want to create rounded icons? +Create them squared only, we will learn how to apply masks!

+ +

Exporting Icons

+ +

Note: All icons must be saved as *.png (Tip: This means you can even create partially transparent icons!)

+ +
    +
  • All Icons must be saved in themeName.theme>IconBundles as bundleID-large.png
  • +
+ +
Finding BundleIDs
+ +

Stock Application BundleIDs

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameBundleID
App Storecom.apple.AppStore
Apple Watchcom.apple.Bridge
Calculatorcom.apple.calculator
Calendarcom.apple.mobilecal
Cameracom.apple.camera
Classroomcom.apple.classroom
Clockcom.apple.mobiletimer
Compasscom.apple.compass
FaceTimecom.apple.facetime
Filescom.apple.DocumentsApp
Game Centercom.apple.gamecenter
Healthcom.apple.Health
Homecom.apple.Home
iBookscom.apple.iBooks
iTunes Storecom.apple.MobileStore
Mailcom.apple.mobilemail
Mapscom.apple.Maps
Measurecom.apple.measure
Messagescom.apple.MobileSMS
Musiccom.apple.Music
Newscom.apple.news
Notescom.apple.mobilenotes
Phonecom.apple.mobilephone
Photo Boothcom.apple.Photo-Booth
Photoscom.apple.mobileslideshow
Playgroundscome.apple.Playgrounds
Podcastscom.apple.podcasts
Reminderscom.apple.reminders
Safaricom.apple.mobilesafari
Settingscom.apple.Preferences
Stockscom.apple.stocks
Tipscom.apple.tips
TVcom.apple.tv
Videoscom.apple.videos
Voice Memoscom.apple.VoiceMemos
Walletcom.apple.Passbook
Weathercom.apple.weather
+ +

3rd Party Applications BundleID +Click here

+ +

Icon Masks

+ +
    +
  • Getting the Classic Rounded Rectangle Masks
  • +
+ +

In your Info.plist file add the following value between <dict> and

+ +
<key>IB-MaskIcons</key>
+    <true/>
+
+ +
    +
  • Custom Icon Masks
  • +
+ +

NOTE: This is an optional step, if you do not want Icon Masks, skip this step

+ +
    +
  • Inside your themeName.theme folder, create another folder called 'Bundles' +
      +
    • Inside Bundles create another folder called com.apple.mobileicons.framework
    • +
  • +
+ +

Designing Masks

+ +

Masking does not support IconBundles, therefore you need to save the masks for each of the following

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FileResolution
AppIconMask@2x~ipad.png152x512
AppIconMask@2x~iphone.png120x120
AppIconMask@3x~ipad.png180x180
AppIconMask@3x~iphone.png180x180
AppIconMask~ipad.png76x76
DocumentBadgeMask-20@2x.png40x40
DocumentBadgeMask-145@2x.png145x145
GameAppIconMask@2x.png84x84
NotificationAppIconMask@2x.png40x40
NotificationAppIconMask@3x.png60x60
SpotlightAppIconMask@2x.png80x80
SpotlightAppIconMask@3x.png120x120
TableIconMask@2x.png58x58
TableIconOutline@2x.png58x58
+ +
    +
  • While creating the mask, make sure that the background is not a solid colour and is transparent
  • +
  • Whichever area you want to make visible, it should be coloured in black
  • +
+ +

Example (Credits: Pinpal):

+ +

Credit: Pinpal

+ +

would result in

+ +

Credit: Pinpal

+ +

Packaging

+ +
    +
  • Create a new folder outside themeName.theme with the name you want to be shown on Cydia, e.g themeNameForCydia
  • +
  • Create another folder called DEBIAN in themeNameForCydia (It needs to be uppercase)
  • +
  • In DEBIAN create an extension-less file called control and edit it using your favourite text editor
  • +
+ +

Paste the following in it, replacing yourname, themename, Theme Name, A theme with beautiful icons! and Your Name with your details:

+ +
Package: com.yourname.themename
+Name: Theme Name
+Version: 1.0
+Architecture: iphoneos-arm
+Description: A theme with beautiful icons!
+Author: Your Name
+Maintainer: Your Name
+Section: Themes
+
+ +
    +
  • Important Notes:

    + +
      +
    • The package field MUST be lower case!
    • +
    • The version field MUST be changed every-time you update your theme!
    • +
    • The control file MUST have an extra blank line at the bottom!
    • +
  • +
  • Now, Create another folder called Library in themeNameForCydia

  • +
  • In Library create another folder called Themes
  • +
  • Finally, copy themeName.theme to the Themes folder (Copy the entire folder, not just the contents)
  • +
+ +

Building the DEB

+ +

For building the deb you need a *nix system, otherwise you can build it using your iPhones

+ +
Pre-Requisite for MacOS users
+ +

1) Install Homenbrew /usr/bin/ruby -e "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install)" (Run this in the terminal) +2) Install dpkg, by running brew install dpkg

+ +

There is a terrible thing called .DS_Store which if not removed, will cause a problem during either build or installation

+ +
    +
  • To remove this we first need to open the folder in the terminal

  • +
  • Launch the Terminal and then drag-and-drop the 'themeNameForCydia' folder on the Terminal icon in the dock

  • +
  • Now, run find . -name "*.DS_Store" -type f -delete
  • +
+ +
Pre-Requisite for Windows Users
+ +
    +
  • SSH into your iPhone and drag and drop the themeNameForCyia folder on the terminal
  • +
+ +
Common Instructions
+ +
    +
  • You should be at the root of the folder in the terminal, i.e Inside themeNameForCydia
  • +
  • running ls should show the following output
  • +
+ +
DEBIAN  Library
+
+ +
    +
  • Now, in the terminal enter the following cd .. && dpkg -b themeNameForCydia
  • +
+ +

Now you will have the themeNameForCydia.deb in the same directory

+ +

You can share this with your friends :+1:

+]]>
+
+ + + + https://web.navan.dev/posts/2020-04-13-Fixing-X11-Error-AmberTools-macOS.html + + + Fixing X11 Error on macOS Catalina for AmberTools 18/19 + + + Fixing Could not find the X11 libraries; you may need to edit config.h, AmberTools macOS Catalina + + https://web.navan.dev/posts/2020-04-13-Fixing-X11-Error-AmberTools-macOS.html + Mon, 13 Apr 2020 11:41:00 -0000 + Fixing X11 Error on macOS Catalina for AmberTools 18/19 + +

I was trying to install AmberTools on my macOS Catalina Installation. Running ./configure -macAccelerate clang gave me an error that it could not find X11 libraries, even though locate libXt showed that my installation was correct.

+ +

Error:

+ +
Could not find the X11 libraries; you may need to edit config.h
+   to set the XHOME and XLIBS variables.
+Error: The X11 libraries are not in the usual location !
+       To search for them try the command: locate libXt
+       On new Fedora OS's install the libXt-devel libXext-devel
+       libX11-devel libICE-devel libSM-devel packages.
+       On old Fedora OS's install the xorg-x11-devel package.
+       On RedHat OS's install the XFree86-devel package.
+       On Ubuntu OS's install the xorg-dev and xserver-xorg packages.
+
+          ...more info for various linuxes at ambermd.org/ubuntu.html
+
+       To build Amber without XLEaP, re-run configure with '-noX11:
+            ./configure -noX11 --with-python /usr/local/bin/python3 -macAccelerate clang
+Configure failed due to the errors above!
+
+ +

I searched on Google for a solution. Sadly, there was not even a single thread which had a solution about this error.

+ +

The Fix

+ +

Simply reinstalling XQuartz using homebrew fixed the error brew cask reinstall xquartz

+ +

If you do not have XQuartz installed, you need to run brew cask install xquartz

+]]>
+
+ + + + https://web.navan.dev/posts/2019-12-10-TensorFlow-Model-Prediction.html + + + Making Predictions using Image Classifier (TensorFlow) + + + Making predictions for image classification models built using TensorFlow + + https://web.navan.dev/posts/2019-12-10-TensorFlow-Model-Prediction.html + Tue, 10 Dec 2019 11:10:00 -0000 + Making Predictions using Image Classifier (TensorFlow) + +

This was tested on TF 2.x and works as of 2019-12-10

+ +

If you want to understand how to make your own custom image classifier, please refer to my previous post.

+ +

If you followed my last post, then you created a model which took an image of dimensions 50x50 as an input.

+ +

First we import the following if we have not imported these before

+ +
import cv2
+import os
+
+ +

Then we read the file using OpenCV.

+ +
image=cv2.imread(imagePath)
+
+ +

The cv2. imread() function returns a NumPy array representing the image. Therefore, we need to convert it before we can use it.

+ +
image_from_array = Image.fromarray(image, 'RGB')
+
+ +

Then we resize the image

+ +
size_image = image_from_array.resize((50,50))
+
+ +

After this we create a batch consisting of only one image

+ +
p = np.expand_dims(size_image, 0)
+
+ +

We then convert this uint8 datatype to a float32 datatype

+ +
img = tf.cast(p, tf.float32)
+
+ +

Finally we make the prediction

+ +
print(['Infected','Uninfected'][np.argmax(model.predict(img))])
+
+ +

Infected

+]]>
+
+ + + + https://web.navan.dev/posts/2020-07-01-Install-rdkit-colab.html + + + Installing RDKit on Google Colab + + + Install RDKit on Google Colab with one code snippet. + + https://web.navan.dev/posts/2020-07-01-Install-rdkit-colab.html + Wed, 01 Jul 2020 14:23:00 -0000 + Installing RDKit on Google Colab + +

RDKit is one of the most integral part of any Cheminfomatic specialist's toolkit but it is notoriously difficult to install unless you already have conda installed. I originally found this in a GitHub Gist but I have not been able to find that gist again :/

+ +

Just copy and paste this in a Colab cell and it will install it 👍

+ +
import sys
+import os
+import requests
+import subprocess
+import shutil
+from logging import getLogger, StreamHandler, INFO
+
+
+logger = getLogger(__name__)
+logger.addHandler(StreamHandler())
+logger.setLevel(INFO)
+
+
+def install(
+        chunk_size=4096,
+        file_name="Miniconda3-latest-Linux-x86_64.sh",
+        url_base="https://repo.continuum.io/miniconda/",
+        conda_path=os.path.expanduser(os.path.join("~", "miniconda")),
+        rdkit_version=None,
+        add_python_path=True,
+        force=False):
+    """install rdkit from miniconda
+    ```
+    import rdkit_installer
+    rdkit_installer.install()
+    ```
+    """
+
+    python_path = os.path.join(
+        conda_path,
+        "lib",
+        "python{0}.{1}".format(*sys.version_info),
+        "site-packages",
+    )
+
+    if add_python_path and python_path not in sys.path:
+        logger.info("add {} to PYTHONPATH".format(python_path))
+        sys.path.append(python_path)
+
+    if os.path.isdir(os.path.join(python_path, "rdkit")):
+        logger.info("rdkit is already installed")
+        if not force:
+            return
+
+        logger.info("force re-install")
+
+    url = url_base + file_name
+    python_version = "{0}.{1}.{2}".format(*sys.version_info)
+
+    logger.info("python version: {}".format(python_version))
+
+    if os.path.isdir(conda_path):
+        logger.warning("remove current miniconda")
+        shutil.rmtree(conda_path)
+    elif os.path.isfile(conda_path):
+        logger.warning("remove {}".format(conda_path))
+        os.remove(conda_path)
+
+    logger.info('fetching installer from {}'.format(url))
+    res = requests.get(url, stream=True)
+    res.raise_for_status()
+    with open(file_name, 'wb') as f:
+        for chunk in res.iter_content(chunk_size):
+            f.write(chunk)
+    logger.info('done')
+
+    logger.info('installing miniconda to {}'.format(conda_path))
+    subprocess.check_call(["bash", file_name, "-b", "-p", conda_path])
+    logger.info('done')
+
+    logger.info("installing rdkit")
+    subprocess.check_call([
+        os.path.join(conda_path, "bin", "conda"),
+        "install",
+        "--yes",
+        "-c", "rdkit",
+        "python=={}".format(python_version),
+        "rdkit" if rdkit_version is None else "rdkit=={}".format(rdkit_version)])
+    logger.info("done")
+
+    import rdkit
+    logger.info("rdkit-{} installation finished!".format(rdkit.__version__))
+
+
+if __name__ == "__main__":
+    install()
+
+]]>
+
+ + + + https://web.navan.dev/posts/2020-11-17-Lets-Encrypt-DuckDns.html + + + Generating HTTPS Certificate using DNS a Challenge through Let's Encrypt + + + Short code-snippet to generate HTTPS certificates using the DNS Challenge through Lets Encrypt for a web-server using DuckDNS. + + https://web.navan.dev/posts/2020-11-17-Lets-Encrypt-DuckDns.html + Tue, 17 Nov 2020 15:04:00 -0000 + Generating HTTPS Certificate using DNS a Challenge through Let's Encrypt + +

I have a Raspberry-Pi running a Flask app through Gunicorn (Ubuntu 20.04 LTS). I am exposing it to the internet using DuckDNS.

+ +

Dependencies

+ +
sudo apt update && sudo apt install certbot -y
+
+ +

Get the Certificate

+ +
sudo certbot certonly --manual --preferred-challenges dns-01 --email senpai@email.com -d mydomain.duckdns.org
+
+ +

After you accept that you are okay with you IP address being logged, it will prompt you with updating your dns record. You need to create a new TXT record in the DNS settings for your domain.

+ +

For DuckDNS users it is as simple as entering this URL in their browser:

+ +
http://duckdns.org/update?domains=mydomain&token=duckdnstoken&txt=certbotdnstxt
+
+ +

Where mydomain is your DuckDNS domain, duckdnstoken is your DuckDNS Token ( Found on the dashboard when you login) and certbotdnstxt is the TXT record value given by the prompt.

+ +

You can check if the TXT records have been updated by using the dig command:

+ +
dig navanspi.duckdns.org TXT
+; <<>> DiG 9.16.1-Ubuntu <<>> navanspi.duckdns.org TXT
+;; global options: +cmd
+;; Got answer:
+;; ->>HEADER<<- opcode: QUERY, status: NOERROR, id: 27592
+;; flags: qr rd ra; QUERY: 1, ANSWER: 1, AUTHORITY: 0, ADDITIONAL: 1
+
+;; OPT PSEUDOSECTION:
+; EDNS: version: 0, flags:; udp: 65494
+;; QUESTION SECTION:
+;navanspi.duckdns.org.        IN    TXT
+
+;; ANSWER SECTION:
+navanspi.duckdns.org.    60    IN    TXT    "4OKbijIJmc82Yv2NiGVm1RmaBHSCZ_230qNtj9YA-qk"
+
+;; Query time: 275 msec
+;; SERVER: 127.0.0.53#53(127.0.0.53)
+;; WHEN: Tue Nov 17 15:23:15 IST 2020
+;; MSG SIZE  rcvd: 105
+
+ +

DuckDNS almost instantly propagates the changes but for other domain hosts, it could take a while.

+ +

Once you can ensure that the TXT record changes has been successfully applied and is visible through the dig command, press enter on the Certbot prompt and your certificate should be generated.

+ +

Renewing

+ +

As we manually generated the certificate certbot renew will fail, to renew the certificate you need to simply re-generate the certificate using the above steps.

+ +

Using the Certificate with Gunicorn

+ +

Example Gunicorn command for running a web-app:

+ +
gunicorn api:app -k uvicorn.workers.UvicornWorker -b 0.0.0.0:7589
+
+ +

To use the certificate with it, simply copy the cert.pem and privkey.pem to your working directory ( change the appropriate permissions ) and include them in the command

+ +
gunicorn api:app -k uvicorn.workers.UvicornWorker -b 0.0.0.0:7589 --certfile=cert.pem --keyfile=privkey.pem
+
+ +

Caveats with copying the certificate: If you renew the certificate you will have to re-copy the files

+]]>
+
+ + + + https://web.navan.dev/posts/2019-12-22-Fake-News-Detector.html + + + Building a Fake News Detector with Turicreate + + + In this tutorial we will build a fake news detecting app from scratch, using Turicreate for the machine learning model and SwiftUI for building the app + + https://web.navan.dev/posts/2019-12-22-Fake-News-Detector.html + Sun, 22 Dec 2019 11:10:00 -0000 + Building a Fake News Detector with Turicreate + +

In this tutorial we will build a fake news detecting app from scratch, using Turicreate for the machine learning model and SwiftUI for building the app

+ +

Note: These commands are written as if you are running a jupyter notebook.

+ +

Building the Machine Learning Model

+ +

Data Gathering

+ +

To build a classifier, you need a lot of data. George McIntire (GH: @joolsa) has created a wonderful dataset containing the headline, body and whether it is fake or real. +Whenever you are looking for a dataset, always try searching on Kaggle and GitHub before you start building your own

+ +

Dependencies

+ +

I used a Google Colab instance for training my model. If you also plan on using Google Colab then I recommend choosing a GPU Instance (It is Free) +This allows you to train the model on the GPU. Turicreate is built on top of Apache's MXNet Framework, for us to use GPU we need to install +a CUDA compatible MXNet package.

+ +
!pip install turicreate
+!pip uninstall -y mxnet
+!pip install mxnet-cu100==1.4.0.post0
+
+ +

If you do not wish to train on GPU or are running it on your computer, you can ignore the last two lines

+ +

Downloading the Dataset

+ +
!wget -q "https://github.com/joolsa/fake_real_news_dataset/raw/master/fake_or_real_news.csv.zip"
+!unzip fake_or_real_news.csv.zip
+
+ +

Model Creation

+ +
import turicreate as tc
+tc.config.set_num_gpus(-1) # If you do not wish to use GPUs, set it to 0
+
+ +
dataSFrame = tc.SFrame('fake_or_real_news.csv')
+
+ +

The dataset contains a column named "X1", which is of no use to us. Therefore, we simply drop it

+ +
dataSFrame.remove_column('X1')
+
+ +

Splitting Dataset

+ +
train, test = dataSFrame.random_split(.9)
+
+ +

Training

+ +
model = tc.text_classifier.create(
+    dataset=train,
+    target='label',
+    features=['title','text']
+)
+
+ +
+-----------+----------+-----------+--------------+-------------------+---------------------+
+| Iteration | Passes   | Step size | Elapsed Time | Training Accuracy | Validation Accuracy |
++-----------+----------+-----------+--------------+-------------------+---------------------+
+| 0         | 2        | 1.000000  | 1.156349     | 0.889680          | 0.790036            |
+| 1         | 4        | 1.000000  | 1.359196     | 0.985952          | 0.918149            |
+| 2         | 6        | 0.820091  | 1.557205     | 0.990260          | 0.914591            |
+| 3         | 7        | 1.000000  | 1.684872     | 0.998689          | 0.925267            |
+| 4         | 8        | 1.000000  | 1.814194     | 0.999063          | 0.925267            |
+| 9         | 14       | 1.000000  | 2.507072     | 1.000000          | 0.911032            |
++-----------+----------+-----------+--------------+-------------------+---------------------+
+
+ +

Testing the Model

+ +
est_predictions = model.predict(test)
+accuracy = tc.evaluation.accuracy(test['label'], test_predictions)
+print(f'Topic classifier model has a testing accuracy of {accuracy*100}% ', flush=True)
+
+ +
Topic classifier model has a testing accuracy of 92.3076923076923%
+
+ +

We have just created our own Fake News Detection Model which has an accuracy of 92%!

+ +
example_text = {"title": ["Middling ‘Rise Of Skywalker’ Review Leaves Fan On Fence About Whether To Threaten To Kill Critic"], "text": ["Expressing ambivalence toward the relatively balanced appraisal of the film, Star Wars fan Miles Ariely admitted Thursday that an online publication’s middling review of The Rise Of Skywalker had left him on the fence about whether he would still threaten to kill the critic who wrote it. “I’m really of two minds about this, because on the one hand, he said the new movie fails to live up to the original trilogy, which makes me at least want to throw a brick through his window with a note telling him to watch his back,” said Ariely, confirming he had already drafted an eight-page-long death threat to Stan Corimer of the website Screen-On Time, but had not yet decided whether to post it to the reviewer’s Facebook page. “On the other hand, though, he commended J.J. Abrams’ skillful pacing and faithfulness to George Lucas’ vision, which makes me wonder if I should just call the whole thing off. Now, I really don’t feel like camping outside his house for hours. Maybe I could go with a response that’s somewhere in between, like, threatening to kill his dog but not everyone in his whole family? I don’t know. This is a tough one.” At press time, sources reported that Ariely had resolved to wear his Ewok costume while he murdered the critic in his sleep."]}
+example_prediction = model.classify(tc.SFrame(example_text))
+print(example_prediction, flush=True)
+
+ +
+-------+--------------------+
+| class |    probability     |
++-------+--------------------+
+|  FAKE | 0.9245648658345308 |
++-------+--------------------+
+[1 rows x 2 columns]
+
+ +

Exporting the Model

+ +
model_name = 'FakeNews'
+coreml_model_name = model_name + '.mlmodel'
+exportedModel = model.export_coreml(coreml_model_name)
+
+ +

Note: To download files from Google Colab, simply click on the files section in the sidebar, right click on filename and then click on download

+ +

Link to Colab Notebook

+ +

Building the App using SwiftUI

+ +

Initial Setup

+ +

First we create a single view app (make sure you check the use SwiftUI button)

+ +

Then we copy our .mlmodel file to our project (Just drag and drop the file in the XCode Files Sidebar)

+ +

Our ML Model does not take a string directly as an input, rather it takes bag of words as an input. +DescriptionThe bag-of-words model is a simplifying representation used in NLP, in this text is represented as a bag of words, without any regard for grammar or order, but noting multiplicity

+ +

We define our bag of words function

+ +
func bow(text: String) -> [String: Double] {
+        var bagOfWords = [String: Double]()
+
+        let tagger = NSLinguisticTagger(tagSchemes: [.tokenType], options: 0)
+        let range = NSRange(location: 0, length: text.utf16.count)
+        let options: NSLinguisticTagger.Options = [.omitPunctuation, .omitWhitespace]
+        tagger.string = text
+
+        tagger.enumerateTags(in: range, unit: .word, scheme: .tokenType, options: options) { _, tokenRange, _ in
+            let word = (text as NSString).substring(with: tokenRange)
+            if bagOfWords[word] != nil {
+                bagOfWords[word]! += 1
+            } else {
+                bagOfWords[word] = 1
+            }
+        }
+
+        return bagOfWords
+    }
+
+ +

We also declare our variables

+ +
@State private var title: String = ""
+@State private var headline: String = ""
+@State private var alertTitle = ""
+@State private var alertText = ""
+@State private var showingAlert = false
+
+ +

Finally, we implement a simple function which reads the two text fields, creates their bag of words representation and displays an alert with the appropriate result

+ +

Complete Code

+ +
import SwiftUI
+
+struct ContentView: View {
+    @State private var title: String = ""
+    @State private var headline: String = ""
+
+    @State private var alertTitle = ""
+    @State private var alertText = ""
+    @State private var showingAlert = false
+
+    var body: some View {
+        NavigationView {
+            VStack(alignment: .leading) {
+                Text("Headline").font(.headline)
+                TextField("Please Enter Headline", text: $title)
+                    .lineLimit(nil)
+                Text("Body").font(.headline)
+                TextField("Please Enter the content", text: $headline)
+                .lineLimit(nil)
+            }
+                .navigationBarTitle("Fake News Checker")
+            .navigationBarItems(trailing:
+                Button(action: classifyFakeNews) {
+                    Text("Check")
+                })
+            .padding()
+                .alert(isPresented: $showingAlert){
+                    Alert(title: Text(alertTitle), message: Text(alertText), dismissButton: .default(Text("OK")))
+            }
+        }
+
+    }
+
+    func classifyFakeNews(){
+        let model = FakeNews()
+        let myTitle = bow(text: title)
+        let myText = bow(text: headline)
+        do {
+            let prediction = try model.prediction(title: myTitle, text: myText)
+            alertTitle = prediction.label
+            alertText = "It is likely that this piece of news is \(prediction.label.lowercased())."
+            print(alertText)
+        } catch {
+            alertTitle = "Error"
+            alertText = "Sorry, could not classify if the input news was fake or not."
+        }
+
+        showingAlert = true
+    }
+    func bow(text: String) -> [String: Double] {
+        var bagOfWords = [String: Double]()
+
+        let tagger = NSLinguisticTagger(tagSchemes: [.tokenType], options: 0)
+        let range = NSRange(location: 0, length: text.utf16.count)
+        let options: NSLinguisticTagger.Options = [.omitPunctuation, .omitWhitespace]
+        tagger.string = text
+
+        tagger.enumerateTags(in: range, unit: .word, scheme: .tokenType, options: options) { _, tokenRange, _ in
+            let word = (text as NSString).substring(with: tokenRange)
+            if bagOfWords[word] != nil {
+                bagOfWords[word]! += 1
+            } else {
+                bagOfWords[word] = 1
+            }
+        }
+
+        return bagOfWords
+    }
+}
+
+struct ContentView_Previews: PreviewProvider {
+    static var previews: some View {
+        ContentView()
+    }
+}
+
+]]>
+
+ + + + https://web.navan.dev/posts/2020-12-1-HTML-JS-RSS-Feed.html + + + RSS Feed written in HTML + JavaScript + + + Short code-snippet for an RSS feed, written in HTML and JavaScript + + https://web.navan.dev/posts/2020-12-1-HTML-JS-RSS-Feed.html + Tue, 01 Dec 2020 20:52:00 -0000 + RSS Feed written in HTML + JavaScript + +

If you want to directly open the HTML file in your browser after saving, don't forget to set CORS_PROXY=""

+ +
<!doctype html>
+<html lang="en">
+<head>
+  <meta charset="utf-8">
+  <meta name="viewport" content="width=device-width, initial-scale=1">
+    <title>
+        RSS Feed
+    </title>
+    <link rel="stylesheet" href="https://stackpath.bootstrapcdn.com/bootstrap/4.1.3/css/bootstrap.min.css" integrity="sha384-MCw98/SFnGE8fJT3GXwEOngsV7Zt27NXFoaoApmYm81iuXoPkFOJwJ8ERdknLPMO" crossorigin="anonymous">
+</head>
+<body>
+
+<h1 align="center" class="display-1">RSS Feed</h1>
+<main>
+    <div class="container">
+    <div class="list-group pb-4" id="contents"></div>
+<div id="feed">
+</div></div>
+</main>
+
+<script src="https://gitcdn.xyz/repo/rbren/rss-parser/master/dist/rss-parser.js"></script>
+<script>
+
+const feeds = {
+    "BuzzFeed - India": {
+      "link":"https://www.buzzfeed.com/in.xml",
+      "summary":true
+    },
+    "New Yorker": {
+      "link":"http://www.newyorker.com/feed/news",
+    },
+    "Vox":{
+      "link":"https://www.vox.com/rss/index.xml",
+      "limit": 3
+    },
+    "r/Jokes":{
+      "link":"https://reddit.com/r/Jokes/hot/.rss?sort=hot",
+      "ignore": ["repost","discord"]
+    }
+}
+
+const config_extra = {
+"Responsive-Images": true,
+"direct-link": false,
+"show-date":false,
+"left-column":false,
+"defaults": {
+  "limit": 5,
+  "summary": true
+}
+}
+
+const CORS_PROXY = "https://cors-anywhere.herokuapp.com/"
+
+var contents_title = document.createElement("h2")
+contents_title.textContent = "Contents"
+contents_title.classList.add("pb-1")
+document.getElementById("contents").appendChild(contents_title)
+
+async function myfunc(key){
+
+  var count_lim = feeds[key]["limit"]
+  var count_lim = (count_lim === undefined) ? config_extra["defaults"]["limit"] : count_lim
+
+  var show_summary = feeds[key]["summary"]
+  var show_summary = (show_summary === undefined) ? config_extra["defaults"]["summary"] : show_summary
+
+  var ignore_tags = feeds[key]["ignore"]
+  var ignore_tags = (ignore_tags === undefined) ? [] : ignore_tags
+
+  var contents = document.createElement("a")
+  contents.href = "#" + key
+  contents.classList.add("list-group-item","list-group-item-action")
+  contents.textContent = key
+  document.getElementById("contents").appendChild(contents)
+  var feed_div = document.createElement("div")
+  feed_div.id = key
+  feed_div.setAttribute("id", key);
+  var title = document.createElement("h2");
+  title.textContent = "From " + key;
+  title.classList.add("pb-1")
+  feed_div.appendChild(title)
+  document.getElementById("feed").appendChild(feed_div)
+  var parser = new RSSParser();
+  var countPosts = 0
+  parser.parseURL(CORS_PROXY + feeds[key]["link"], function(err, feed) {
+    if (err) throw err;
+    feed.items.forEach(function(entry) {
+      if (countPosts < count_lim) {
+
+      var skip = false
+      for(var i = 0; i < ignore_tags.length; i++) {
+        if (entry.title.includes(ignore_tags[i])){
+          var skip = true
+        } else if (entry.content.includes(ignore_tags[i])){
+          var skip = true
+        }
+      }
+
+      if (!skip) {
+
+      var node = document.createElement("div");
+      node.classList.add("card","mb-3");
+      var row = document.createElement("div")
+      row.classList.add("row","no-gutters")
+
+      if (config_extra["left-column"]){
+      var left_col = document.createElement("div")
+      left_col.classList.add("col-md-2")
+      var left_col_body = document.createElement("div")
+      left_col_body.classList.add("card-body")
+      }
+
+      var right_col = document.createElement("div")
+      if (config_extra["left-column"]){
+        right_col.classList.add("col-md-10")
+      }
+      var node_title = document.createElement("h5")
+
+      node_title.classList.add("card-header")
+      node_title.innerHTML = entry.title
+
+      node_body = document.createElement("div")
+      node_body.classList.add("card-body")
+
+      node_content = document.createElement("p")
+
+      if (show_summary){
+        node_content.innerHTML = entry.content
+      }
+      node_content.classList.add("card-text")
+
+      if (config_extra["direct-link"]){
+      node_link = document.createElement("p")
+      node_link.classList.add("card-text")
+      node_link.innerHTML = "<b>Link:</b> <a href='" + entry.link +"'>Direct Link</a>"
+      if (config_extra["left-column"]){
+      left_col_body.appendChild(node_link)
+        } else {
+          node_content.appendChild(node_link)
+        }
+      }
+
+      if (config_extra["show-date"]){
+        node_date = document.createElement("p")
+        node_date.classList.add("card-text")
+        node_date.innerHTML = "<p><b>Date: </b>" + entry.pubDate + "</p>"
+        if (config_extra["left-column"]){
+        left_col_body.appendChild(node_date)
+          } else {
+            node_content.appendChild(node_date)
+
+        }
+      }
+
+      node.appendChild(node_title)
+
+      node_body.appendChild(node_content)
+
+      right_col.appendChild(node_body)
+
+      if (config_extra["left-column"]){
+        left_col.appendChild(left_col_body)
+        row.appendChild(left_col)
+      }
+
+      row.appendChild(right_col)
+
+      node.appendChild(row)
+
+      document.getElementById(key).appendChild(node)
+      countPosts+=1
+    }
+    }
+  })
+
+  if (config_extra["Responsive-Images"]){
+  var inputs = document.getElementsByTagName('img')
+      for(var i = 0; i < inputs.length; i++) {
+        inputs[i].classList.add("img-fluid")
+      }
+  }
+
+  })
+
+  return true
+}
+(async () => {
+for(var key in feeds) {
+  let result = await myfunc(key);
+}})();
+
+</script>
+<noscript>Uh Oh! Your browser does not support JavaScript or JavaScript is currently disabled. Please enable JavaScript or switch to a different browser.</noscript>
+</body></html>
+
+]]>
+
+ + + + https://web.navan.dev/posts/2020-01-19-Connect-To-Bluetooth-Devices-Linux-Terminal.html + + + How to setup Bluetooth on a Raspberry Pi + + + Connecting to Bluetooth Devices using terminal, tested on Raspberry Pi Zero W + + https://web.navan.dev/posts/2020-01-19-Connect-To-Bluetooth-Devices-Linux-Terminal.html + Sun, 19 Jan 2020 15:27:00 -0000 + How to setup Bluetooth on a Raspberry Pi + +

This was tested on a Raspberry Pi Zero W

+ +

Enter in the Bluetooth Mode

+ +

pi@raspberrypi:~ $ bluetoothctl

+ +

[bluetooth]# agent on

+ +

[bluetooth]# default-agent

+ +

[bluetooth]# scan on

+ +

To Pair

+ +

While being in bluetooth mode

+ +

[bluetooth]# pair XX:XX:XX:XX:XX:XX

+ +

To Exit out of bluetoothctl anytime, just type exit

+]]>
+
+ + + + https://web.navan.dev/posts/2020-01-14-Converting-between-PIL-NumPy.html + + + Converting between image and NumPy array + + + Short code snippet for converting between PIL image and NumPy arrays. + + https://web.navan.dev/posts/2020-01-14-Converting-between-PIL-NumPy.html + Tue, 14 Jan 2020 00:10:00 -0000 + Converting between image and NumPy array + +
import numpy
+import PIL
+
+# Convert PIL Image to NumPy array
+img = PIL.Image.open("foo.jpg")
+arr = numpy.array(img)
+
+# Convert array to Image
+img = PIL.Image.fromarray(arr)
+
+ +

Saving an Image

+ +
try:
+    img.save(destination, "JPEG", quality=80, optimize=True, progressive=True)
+except IOError:
+    PIL.ImageFile.MAXBLOCK = img.size[0] * img.size[1]
+    img.save(destination, "JPEG", quality=80, optimize=True, progressive=True)
+
+]]>
+
+ + + + https://web.navan.dev/posts/2019-12-08-Splitting-Zips.html + + + Splitting ZIPs into Multiple Parts + + + Short code snippet for splitting zips. + + https://web.navan.dev/posts/2019-12-08-Splitting-Zips.html + Sun, 08 Dec 2019 13:27:00 -0000 + Splitting ZIPs into Multiple Parts + +

Tested on macOS

+ +

Creating the archive:

+ +
zip -r -s 5 oodlesofnoodles.zip website/
+
+ +

5 stands for each split files' size (in mb, kb and gb can also be specified)

+ +

For encrypting the zip:

+ +
zip -er -s 5 oodlesofnoodles.zip website
+
+ +

Extracting Files

+ +

First we need to collect all parts, then

+ +
zip -F oodlesofnoodles.zip --out merged.zip
+
+]]>
+
+ + + + https://web.navan.dev/posts/2020-06-02-Compiling-AutoDock-Vina-on-iOS.html + + + Compiling AutoDock Vina on iOS + + + Compiling AutoDock Vina on iOS + + https://web.navan.dev/posts/2020-06-02-Compiling-AutoDock-Vina-on-iOS.html + Tue, 02 Jun 2020 23:23:00 -0000 + Compiling AutoDock Vina on iOS + +

Why? Because I can.

+ +

Installing makedepend

+ +

makedepend is a Unix tool used to generate dependencies of C source files. Most modern programs do not use this anymore, but then again AutoDock Vina's source code hasn't been changed since 2011. The first hurdle came when I saw that there was no makedepend command, neither was there any package on any development repository for iOS. So, I tracked down the original source code for makedepend (https://github.com/DerellLicht/makedepend). According to the repository this is actually the source code for the makedepend utility that came with some XWindows distribution back around Y2K. I am pretty sure there is a problem with my current compiler configuration because I had to manually edit the Makefile to provide the path to the iOS SDKs using the -isysroot flag.

+ +

Editing the Makefile

+ +

Original Makefile ( I used the provided mac Makefile base )

+ +
BASE=/usr/local
+BOOST_VERSION=1_41
+BOOST_INCLUDE = $(BASE)/include
+C_PLATFORM=-arch i386 -arch ppc -isysroot /Developer/SDKs/MacOSX10.5.sdk -mmacosx-version-min=10.4
+GPP=/usr/bin/g++
+C_OPTIONS= -O3 -DNDEBUG
+BOOST_LIB_VERSION=
+
+include ../../makefile_common
+
+ +

I installed Boost 1.68.0-1 from Sam Bingner's repository. ( Otherwise I would have had to compile boost too 😫 )

+ +

Edited Makefile

+ +
BASE=/usr
+BOOST_VERSION=1_68
+BOOST_INCLUDE = $(BASE)/include
+C_PLATFORM=-arch arm64 -isysroot /var/sdks/Latest.sdk
+GPP=/usr/bin/g++
+C_OPTIONS= -O3 -DNDEBUG
+BOOST_LIB_VERSION=
+
+include ../../makefile_common
+
+
+ +

Updating the Source Code

+ +

Of course since Boost 1.41 many things have been added and deprecated, that is why I had to edit the source code to make it work with version 1.68

+ +

Error 1 - No Matching Constructor

+ +
../../../src/main/main.cpp:50:9: error: no matching constructor for initialization of 'path' (aka 'boost::filesystem::path')
+return path(str, boost::filesystem::native);
+
+ +

This was an easy fix, I just commented this and added a return statement to return the path

+ +
return path(str)
+
+ +

Error 2 - No Member Named 'nativefilestring'

+ +
../../../src/main/main.cpp:665:57: error: no member named 'native_file_string' in 'boost::filesystem::path'
+                std::cerr << "\n\nError: could not open \"" << e.name.native_file_string() << "\" for " << (e.in ? "reading" : "writing") << ".\n";
+                                                               ~~~~~~ ^
+../../../src/main/main.cpp:677:80: error: no member named 'native_file_string' in 'boost::filesystem::path'
+                std::cerr << "\n\nParse error on line " << e.line << " in file \"" << e.file.native_file_string() << "\": " << e.reason << '\n';
+                                                                                      ~~~~~~ ^
+2 errors generated.
+
+ +

Turns out native_file_string was deprecated in Boost 1.57 and replaced with just string

+ +

Error 3 - Library Not Found

+ +

This one still boggles me because there was no reason for it to not work, as a workaround I downloaded the DEB, extracted it and used that path for compiling.

+ +

Error 4 - No Member Named 'nativefilestring' Again.

+ +

But, this time in another file and I quickly fixed it

+ +

Moment of Truth

+ +

Obviously it was working on my iPad, but would it work on another device? I transferred the compiled binary and

+ +

"AutoDock Vina running on my iPhone"

+ +

The package is available on my repository and only depends on boost. ( Both, Vina and Vina-Split are part of the package)

+]]>
+
+ + + + https://web.navan.dev/posts/2020-01-16-Image-Classifier-Using-Turicreate.html + + + Creating a Custom Image Classifier using Turicreate to detect Smoke and Fire + + + Tutorial on creating a custom Image Classifier using Turicreate and a dataset from Kaggle + + https://web.navan.dev/posts/2020-01-16-Image-Classifier-Using-Turicreate.html + Thu, 16 Jan 2020 10:36:00 -0000 + Creating a Custom Image Classifier using Turicreate to detect Smoke and Fire + +

For setting up Kaggle with Google Colab, please refer to my previous post

+ +

Dataset

+ +

Mounting Google Drive

+ +
import os
+from google.colab import drive
+drive.mount('/content/drive')
+
+ +

Downloading Dataset from Kaggle

+ +
os.environ['KAGGLE_CONFIG_DIR'] = "/content/drive/My Drive/"
+!kaggle datasets download ashutosh69/fire-and-smoke-dataset
+!unzip "fire-and-smoke-dataset.zip"
+
+ +

Pre-Processing

+ +
!mkdir default smoke fire
+
+ +

\

+ +
!ls data/data/img_data/train/default/*.jpg
+
+ +

\

+ +
img_1002.jpg   img_20.jpg     img_519.jpg     img_604.jpg       img_80.jpg
+img_1003.jpg   img_21.jpg     img_51.jpg     img_60.jpg       img_8.jpg
+img_1007.jpg   img_22.jpg     img_520.jpg     img_61.jpg       img_900.jpg
+img_100.jpg    img_23.jpg     img_521.jpg    'img_62 (2).jpg'   img_920.jpg
+img_1014.jpg   img_24.jpg    'img_52 (2).jpg'     img_62.jpg       img_921.jpg
+img_1018.jpg   img_29.jpg     img_522.jpg    'img_63 (2).jpg'   img_922.jpg
+img_101.jpg    img_3000.jpg   img_523.jpg     img_63.jpg       img_923.jpg
+img_1027.jpg   img_335.jpg    img_524.jpg     img_66.jpg       img_924.jpg
+img_102.jpg    img_336.jpg    img_52.jpg     img_67.jpg       img_925.jpg
+img_1042.jpg   img_337.jpg    img_530.jpg     img_68.jpg       img_926.jpg
+img_1043.jpg   img_338.jpg    img_531.jpg     img_700.jpg       img_927.jpg
+img_1046.jpg   img_339.jpg   'img_53 (2).jpg'     img_701.jpg       img_928.jpg
+img_1052.jpg   img_340.jpg    img_532.jpg     img_702.jpg       img_929.jpg
+img_107.jpg    img_341.jpg    img_533.jpg     img_703.jpg       img_930.jpg
+img_108.jpg    img_3.jpg      img_537.jpg     img_704.jpg       img_931.jpg
+img_109.jpg    img_400.jpg    img_538.jpg     img_705.jpg       img_932.jpg
+img_10.jpg     img_471.jpg    img_539.jpg     img_706.jpg       img_933.jpg
+img_118.jpg    img_472.jpg    img_53.jpg     img_707.jpg       img_934.jpg
+img_12.jpg     img_473.jpg    img_540.jpg     img_708.jpg       img_935.jpg
+img_14.jpg     img_488.jpg    img_541.jpg     img_709.jpg       img_938.jpg
+img_15.jpg     img_489.jpg   'img_54 (2).jpg'     img_70.jpg       img_958.jpg
+img_16.jpg     img_490.jpg    img_542.jpg     img_710.jpg       img_971.jpg
+img_17.jpg     img_491.jpg    img_543.jpg    'img_71 (2).jpg'   img_972.jpg
+img_18.jpg     img_492.jpg    img_54.jpg     img_71.jpg       img_973.jpg
+img_19.jpg     img_493.jpg   'img_55 (2).jpg'     img_72.jpg       img_974.jpg
+img_1.jpg      img_494.jpg    img_55.jpg     img_73.jpg       img_975.jpg
+img_200.jpg    img_495.jpg    img_56.jpg     img_74.jpg       img_980.jpg
+img_201.jpg    img_496.jpg    img_57.jpg     img_75.jpg       img_988.jpg
+img_202.jpg    img_497.jpg    img_58.jpg     img_76.jpg       img_9.jpg
+img_203.jpg    img_4.jpg      img_59.jpg     img_77.jpg
+img_204.jpg    img_501.jpg    img_601.jpg     img_78.jpg
+img_205.jpg    img_502.jpg    img_602.jpg     img_79.jpg
+img_206.jpg    img_50.jpg     img_603.jpg     img_7.jpg
+
+ +

The image files are not actually JPEG, thus we first need to save them in the correct format for Turicreate

+ +
from PIL import Image
+import glob
+
+
+folders = ["default","smoke","fire"]
+for folder in folders:
+  n = 1
+  for file in glob.glob("./data/data/img_data/train/" + folder + "/*.jpg"):
+    im = Image.open(file)
+    rgb_im = im.convert('RGB')
+    rgb_im.save((folder + "/" + str(n) + ".jpg"), quality=100)
+    n +=1 
+  for file in glob.glob("./data/data/img_data/train/" + folder + "/*.jpg"):
+    im = Image.open(file)
+    rgb_im = im.convert('RGB')
+    rgb_im.save((folder + "/" + str(n) + ".jpg"), quality=100)
+    n +=1
+
+ +

\

+ +
!mkdir train
+!mv default ./train
+!mv smoke ./train
+!mv fire ./train
+
+ +

Making the Image Classifier

+ +

Making an SFrame

+ +
!pip install turicreate
+
+ +

\

+ +
import turicreate as tc
+import os
+
+data = tc.image_analysis.load_images("./train", with_path=True)
+
+data["label"] = data["path"].apply(lambda path: os.path.basename(os.path.dirname(path)))
+
+print(data)
+
+data.save('fire-smoke.sframe')
+
+ +

\

+ +
+-------------------------+------------------------+
+|           path          |         image          |
++-------------------------+------------------------+
+|  ./train/default/1.jpg  | Height: 224 Width: 224 |
+|  ./train/default/10.jpg | Height: 224 Width: 224 |
+| ./train/default/100.jpg | Height: 224 Width: 224 |
+| ./train/default/101.jpg | Height: 224 Width: 224 |
+| ./train/default/102.jpg | Height: 224 Width: 224 |
+| ./train/default/103.jpg | Height: 224 Width: 224 |
+| ./train/default/104.jpg | Height: 224 Width: 224 |
+| ./train/default/105.jpg | Height: 224 Width: 224 |
+| ./train/default/106.jpg | Height: 224 Width: 224 |
+| ./train/default/107.jpg | Height: 224 Width: 224 |
++-------------------------+------------------------+
+[2028 rows x 2 columns]
+Note: Only the head of the SFrame is printed.
+You can use print_rows(num_rows=m, num_columns=n) to print more rows and columns.
++-------------------------+------------------------+---------+
+|           path          |         image          |  label  |
++-------------------------+------------------------+---------+
+|  ./train/default/1.jpg  | Height: 224 Width: 224 | default |
+|  ./train/default/10.jpg | Height: 224 Width: 224 | default |
+| ./train/default/100.jpg | Height: 224 Width: 224 | default |
+| ./train/default/101.jpg | Height: 224 Width: 224 | default |
+| ./train/default/102.jpg | Height: 224 Width: 224 | default |
+| ./train/default/103.jpg | Height: 224 Width: 224 | default |
+| ./train/default/104.jpg | Height: 224 Width: 224 | default |
+| ./train/default/105.jpg | Height: 224 Width: 224 | default |
+| ./train/default/106.jpg | Height: 224 Width: 224 | default |
+| ./train/default/107.jpg | Height: 224 Width: 224 | default |
++-------------------------+------------------------+---------+
+[2028 rows x 3 columns]
+Note: Only the head of the SFrame is printed.
+You can use print_rows(num_rows=m, num_columns=n) to print more rows and columns.
+
+ +

Making the Model

+ +
import turicreate as tc
+
+# Load the data
+data =  tc.SFrame('fire-smoke.sframe')
+
+# Make a train-test split
+train_data, test_data = data.random_split(0.8)
+
+# Create the model
+model = tc.image_classifier.create(train_data, target='label')
+
+# Save predictions to an SArray
+predictions = model.predict(test_data)
+
+# Evaluate the model and print the results
+metrics = model.evaluate(test_data)
+print(metrics['accuracy'])
+
+# Save the model for later use in Turi Create
+model.save('fire-smoke.model')
+
+# Export for use in Core ML
+model.export_coreml('fire-smoke.mlmodel')
+
+ +

\

+ +
Performing feature extraction on resized images...
+Completed   64/1633
+Completed  128/1633
+Completed  192/1633
+Completed  256/1633
+Completed  320/1633
+Completed  384/1633
+Completed  448/1633
+Completed  512/1633
+Completed  576/1633
+Completed  640/1633
+Completed  704/1633
+Completed  768/1633
+Completed  832/1633
+Completed  896/1633
+Completed  960/1633
+Completed 1024/1633
+Completed 1088/1633
+Completed 1152/1633
+Completed 1216/1633
+Completed 1280/1633
+Completed 1344/1633
+Completed 1408/1633
+Completed 1472/1633
+Completed 1536/1633
+Completed 1600/1633
+Completed 1633/1633
+PROGRESS: Creating a validation set from 5 percent of training data. This may take a while.
+          You can set ``validation_set=None`` to disable validation tracking.
+
+Logistic regression:
+--------------------------------------------------------
+Number of examples          : 1551
+Number of classes           : 3
+Number of feature columns   : 1
+Number of unpacked features : 2048
+Number of coefficients      : 4098
+Starting L-BFGS
+--------------------------------------------------------
++-----------+----------+-----------+--------------+-------------------+---------------------+
+| Iteration | Passes   | Step size | Elapsed Time | Training Accuracy | Validation Accuracy |
++-----------+----------+-----------+--------------+-------------------+---------------------+
+| 0         | 6        | 0.018611  | 0.891830     | 0.553836          | 0.560976            |
+| 1         | 10       | 0.390832  | 1.622383     | 0.744681          | 0.792683            |
+| 2         | 11       | 0.488541  | 1.943987     | 0.733075          | 0.804878            |
+| 3         | 14       | 2.442703  | 2.512545     | 0.727917          | 0.841463            |
+| 4         | 15       | 2.442703  | 2.826964     | 0.861380          | 0.853659            |
+| 9         | 28       | 2.340435  | 5.492035     | 0.941328          | 0.975610            |
++-----------+----------+-----------+--------------+-------------------+---------------------+
+Performing feature extraction on resized images...
+Completed  64/395
+Completed 128/395
+Completed 192/395
+Completed 256/395
+Completed 320/395
+Completed 384/395
+Completed 395/395
+0.9316455696202531
+
+ +

We just got an accuracy of 94% on Training Data and 97% on Validation Data!

+]]>
+
+ + + + https://web.navan.dev/posts/2020-10-11-macOS-Virtual-Cam-OBS.html + + + Trying Different Camera Setups + + + Comparison of different cameras setups for using as a webcam and tutorials for the same. + + https://web.navan.dev/posts/2020-10-11-macOS-Virtual-Cam-OBS.html + Sun, 11 Oct 2020 16:12:00 -0000 + Trying Different Camera Setups + +
    +
  1. Animated Overlays
  2. +
  3. Using a modern camera as your webcam
  4. +
  5. Using your phone's camera as your webcam
  6. +
  7. Using a USB Camera
  8. +
+ +

Comparison

+ +

Here are the results before you begin reading.

+ +
+ Normal Webcam + USB Webcam + Camo iPhone 5S + Camo iPhone 11 + Mirrorless Camera +
+ +

Prerequisites

+ +

I am running macOS and iOS but I will try to link the same steps for Windows as well. If you are running Arch, I assume you already know what you are doing and are using this post as an inspiration and not a how-to guide.

+ +

I assume that you have Homebrew installed.

+ +

OBS and OBS-Virtual-Cam

+ +

Description

+ +
brew cask install obs
+brew cask install obs-virtualcam
+
+ +

Windows users can install the latest version of the plugin from OBS-Forums

+ +

0. Animated Overlays

+ +

I have always liked PewDiePie's animated border he uses in his videos

+ +

Still grab from PewDiePie's video showing border

+ +

The border was apparently made by a YouTuber Sleepy Tanooki. He posted a link to a Google Drive folder containing the video file. (I will be using the video overlay for the example)

+ +

It is pretty simple to use overlays in OBS:

+ +

First, Create a new scene by clicking on the plus button on the bottom right corner.

+ +

Bottom Panel of OBS

+ +

Now, in the Sources section click on the add button -> Video Capture Device -> Create New -> Choose your webcam from the Device section.

+ +

You may, resize if you want

+ +

After this, again click on the add button, but this time choose the Media Source option

+ +

Media Source Option

+ +

and, locate and choose the downloaded overlay.

+ +

1. Using a Modern Camera (Without using a Capture Card)

+ +

I have a Sony mirrorless camera. Using Sony's Imaging Edge Desktop, you can use your laptop as a remote viewfinder and capture or record media.

+ +

After installing Image Edge Desktop or your Camera's equivalent, open the Remote application.

+ +

Remote showing available cameras

+ +

Once you are able to see the output of the camera on the application, switch to OBS. Create a new scene, and this time choose Window Capture in the Sources menu. After you have chosen the appropriate window, you may transform/crop the output using the properties/filters options.

+ +

2.1 Using your iPhone using Quicktime

+ +

Connect your iPhone via a USB cable, then Open Quicktime -> File -> New Movie Recording

+ +

In the Sources choose your device (No need to press record). You may open the camera app now.

+ +

Choose Source

+ +

Now, in OBS create a new scene, and in the sources choose the Window Capture option. You will need to rotate the source:

+ +

Rotation

+ +

2.2 Using your iPhone using an application like Camo

+ +

Install the Camo app on your phone through the app store -> connect to Mac using USB cable, install the companion app and you are done.

+ +

I tried both my current iPhone and an old iPhone 5S

+ +

3. A USB Webcam

+ +

The simplest solution, is to use a USB webcam. I used an old Logitech C310 that was collecting dust. I was surprised to find that Logitech is still selling it after years and proudly advertising it! (5MP)

+ +

It did not sit well on my laptop, so I placed it on my definitely-not-Joby Gorrila Pod i had bought on Amazon for ~₹500

+ +

USB Webcam + +

+ + + + +]]>
+
+ + + + https://web.navan.dev/posts/2020-06-01-Speeding-Up-Molecular-Docking-Workflow-AutoDock-Vina-and-PyMOL.html + + + Workflow for Lightning Fast Molecular Docking Part One + + + This is my workflow for lightning fast molecular docking. + + https://web.navan.dev/posts/2020-06-01-Speeding-Up-Molecular-Docking-Workflow-AutoDock-Vina-and-PyMOL.html + Mon, 01 Jun 2020 13:10:00 -0000 + Workflow for Lightning Fast Molecular Docking Part One + +

My Setup

+ +
    +
  • macOS Catalina ( RIP 32bit app)
  • +
  • PyMOL
  • +
  • AutoDock Vina
  • +
  • Open Babel
  • +
+ +

One Command Docking

+ +
obabel -:"$(pbpaste)" --gen3d -opdbqt -Otest.pdbqt && vina --receptor lu.pdbqt --center_x -9.7 --center_y 11.4 --center_z 68.9 --size_x 19.3 --size_y 29.9 --size_z 21.3  --ligand test.pdbqt
+
+ +

To run this command you simple copy the SMILES structure of the ligand you want an it automatically takes it from your clipboard, generates the 3D structure in the AutoDock PDBQT format using Open Babel and then docks it with your receptor using AutoDock Vina, all with just one command.

+ +

Let me break down the commands

+ +
obabel -:"$(pbpaste)" --gen3d -opdbqt -Otest.pdbqt
+
+ +

pbpaste and pbcopy are macOS commands for pasting and copying from and to the clipboard. Linux users may install the xclip and xsel packages from their respective package managers and then insert these aliases into their bash_profile, zshrc e.t.c

+ +
alias pbcopy='xclip -selection clipboard'
+alias pbpaste='xclip -selection clipboard -o'
+
+ +
$(pbpaste)
+
+ +

This is used in bash to evaluate the results of a command. In this scenario we are using it to get the contents of the clipboard.

+ +

The rest of the command is a normal Open Babel command to generate a 3D structure in PDBQT format and then save it as test.pdbqt

+ +
&&
+
+ +

This tells the terminal to only run the next part if the previous command runs successfully without any errors.

+ +
vina --receptor lu.pdbqt --center_x -9.7 --center_y 11.4 --center_z 68.9 --size_x 19.3 --size_y 29.9 --size_z 21.3  --ligand test.pdbqt
+
+ +

This is just the docking command for AutoDock Vina. In the next part I will tell how to use PyMOL and a plugin to directly generate the coordinates in Vina format --center_x -9.7 --center_y 11.4 --center_z 68.9 --size_x 19.3 --size_y 29.9 --size_z 21.3 without needing to type them manually.

+]]>
+
+ + + + https://web.navan.dev/publications/2019-05-14-Detecting-Driver-Fatigue-Over-Speeding-and-Speeding-up-Post-Accident-Response.html + + + Detecting Driver Fatigue, Over-Speeding, and Speeding up Post-Accident Response + + + This paper is about Detecting Driver Fatigue, Over-Speeding, and Speeding up Post-Accident Response. + + https://web.navan.dev/publications/2019-05-14-Detecting-Driver-Fatigue-Over-Speeding-and-Speeding-up-Post-Accident-Response.html + Tue, 14 May 2019 02:42:00 -0000 + Detecting Driver Fatigue, Over-Speeding, and Speeding up Post-Accident Response + +
+

Based on the project showcased at Toyota Hackathon, IITD - 17/18th December 2018

+
+ +

Edit: It seems like I haven't mentioned Adrian Rosebrock of PyImageSearch anywhere. I apologize for this mistake.

+ +

Download paper here

+ +

Recommended citation:

+ +

ATP

+ +
Chauhan, N. (2019). &quot;Detecting Driver Fatigue, Over-Speeding, and Speeding up Post-Accident Response.&quot; <i>International Research Journal of Engineering and Technology (IRJET), 6(5)</i>.
+
+ +

BibTeX

+ +
@article{chauhan_2019, title={Detecting Driver Fatigue, Over-Speeding, and Speeding up Post-Accident Response}, volume={6}, url={https://www.irjet.net/archives/V6/i5/IRJET-V6I5318.pdf}, number={5}, journal={International Research Journal of Engineering and Technology (IRJET)}, author={Chauhan, Navan}, year={2019}}
+
+]]>
+
+ + + + https://web.navan.dev/publications/2020-03-14-generating-vaporwave.html + + + Is it possible to programmatically generate Vaporwave? + + + This paper is about programmaticaly generating Vaporwave. + + https://web.navan.dev/publications/2020-03-14-generating-vaporwave.html + Sat, 14 Mar 2020 22:23:00 -0000 + Is it possible to programmatically generate Vaporwave? + +

This is still a pre-print.

+ +

Download paper here

+ +

Recommended citation:

+ +

APA

+ +
Chauhan, N. (2020, March 15). Is it possible to programmatically generate Vaporwave?. https://doi.org/10.35543/osf.io/9um2r
+
+ +

MLA

+ +
Chauhan, Navan. “Is It Possible to Programmatically Generate Vaporwave?.” IndiaRxiv, 15 Mar. 2020. Web.
+
+ +

Chicago

+ +
Chauhan, Navan. 2020. “Is It Possible to Programmatically Generate Vaporwave?.” IndiaRxiv. March 15. doi:10.35543/osf.io/9um2r.
+
+ +

Bibtex

+ +
@misc{chauhan_2020,
+ title={Is it possible to programmatically generate Vaporwave?},
+ url={indiarxiv.org/9um2r},
+ DOI={10.35543/osf.io/9um2r},
+ publisher={IndiaRxiv},
+ author={Chauhan, Navan},
+ year={2020},
+ month={Mar}
+}
+
+]]>
+
+ + + + https://web.navan.dev/publications/2020-03-17-Possible-Drug-Candidates-COVID-19.html + + + Possible Drug Candidates for COVID-19 + + + COVID-19, has been officially labeled as a pandemic by the World Health Organisation. This paper presents cloperastine and vigabatrin as two possible drug candidates for combatting the disease along with the process by which they were discovered. + + https://web.navan.dev/publications/2020-03-17-Possible-Drug-Candidates-COVID-19.html + Tue, 17 Mar 2020 17:40:00 -0000 + Possible Drug Candidates for COVID-19 + +

This is still a pre-print.

+ +

Download paper here

+]]>
+
+ +
\ No newline at end of file diff --git a/docs/googlecb0897d479c87d97.html b/docs/googlecb0897d479c87d97.html new file mode 100644 index 0000000..5907da5 --- /dev/null +++ b/docs/googlecb0897d479c87d97.html @@ -0,0 +1 @@ +google-site-verification: googlecb0897d479c87d97.html \ No newline at end of file diff --git a/docs/images/04d0580b-d347-476a-232d-9568839851cd.webPlatform.png b/docs/images/04d0580b-d347-476a-232d-9568839851cd.webPlatform.png new file mode 100644 index 0000000..c277bbc Binary files /dev/null and b/docs/images/04d0580b-d347-476a-232d-9568839851cd.webPlatform.png differ diff --git a/docs/images/14a6e126-4866-93de-8df5-e0e6a3c65da1.webPlatform.png b/docs/images/14a6e126-4866-93de-8df5-e0e6a3c65da1.webPlatform.png new file mode 100644 index 0000000..643a2bd Binary files /dev/null and b/docs/images/14a6e126-4866-93de-8df5-e0e6a3c65da1.webPlatform.png differ diff --git a/docs/images/15294abc-6c7c-ffb8-df8d-d2fad23f50b0.webPlatform.png b/docs/images/15294abc-6c7c-ffb8-df8d-d2fad23f50b0.webPlatform.png new file mode 100644 index 0000000..1dcb03d Binary files /dev/null and b/docs/images/15294abc-6c7c-ffb8-df8d-d2fad23f50b0.webPlatform.png differ diff --git a/docs/images/6b5f7f70-557f-0e4b-3d76-127534525db9.webPlatform.png b/docs/images/6b5f7f70-557f-0e4b-3d76-127534525db9.webPlatform.png new file mode 100644 index 0000000..02e58c4 Binary files /dev/null and b/docs/images/6b5f7f70-557f-0e4b-3d76-127534525db9.webPlatform.png differ diff --git a/docs/images/82e24f17-2e71-90d8-67a7-587163282ebf.webPlatform.png b/docs/images/82e24f17-2e71-90d8-67a7-587163282ebf.webPlatform.png new file mode 100644 index 0000000..cc36571 Binary files /dev/null and b/docs/images/82e24f17-2e71-90d8-67a7-587163282ebf.webPlatform.png differ diff --git a/docs/images/8c0ffe9e-b615-96cd-3e18-ab4307c859a0.webPlatform.png b/docs/images/8c0ffe9e-b615-96cd-3e18-ab4307c859a0.webPlatform.png new file mode 100644 index 0000000..76e34bc Binary files /dev/null and b/docs/images/8c0ffe9e-b615-96cd-3e18-ab4307c859a0.webPlatform.png differ diff --git a/docs/images/9384518b-2a6c-0abc-136c-8c8faf49c71b.webPlatform.png b/docs/images/9384518b-2a6c-0abc-136c-8c8faf49c71b.webPlatform.png new file mode 100644 index 0000000..f659b0b Binary files /dev/null and b/docs/images/9384518b-2a6c-0abc-136c-8c8faf49c71b.webPlatform.png differ diff --git a/docs/images/9bf4aee8-92e3-932f-5388-7731928b5692.webPlatform.png b/docs/images/9bf4aee8-92e3-932f-5388-7731928b5692.webPlatform.png new file mode 100644 index 0000000..31c4507 Binary files /dev/null and b/docs/images/9bf4aee8-92e3-932f-5388-7731928b5692.webPlatform.png differ diff --git a/docs/images/9dc22996-fd1b-b2d3-3627-cef4fa224e25.webPlatform.png b/docs/images/9dc22996-fd1b-b2d3-3627-cef4fa224e25.webPlatform.png new file mode 100644 index 0000000..baf1814 Binary files /dev/null and b/docs/images/9dc22996-fd1b-b2d3-3627-cef4fa224e25.webPlatform.png differ diff --git a/docs/images/afd91c53-cfd0-b52e-ca49-1db0cc292b7d.webPlatform.png b/docs/images/afd91c53-cfd0-b52e-ca49-1db0cc292b7d.webPlatform.png new file mode 100644 index 0000000..adbf1a3 Binary files /dev/null and b/docs/images/afd91c53-cfd0-b52e-ca49-1db0cc292b7d.webPlatform.png differ diff --git a/docs/images/b0cac729-56cb-2a63-7e8b-ac62a038a023.webPlatform.png b/docs/images/b0cac729-56cb-2a63-7e8b-ac62a038a023.webPlatform.png new file mode 100644 index 0000000..7cfe2a7 Binary files /dev/null and b/docs/images/b0cac729-56cb-2a63-7e8b-ac62a038a023.webPlatform.png differ diff --git a/docs/images/bb0aca46-4612-c284-055f-58850c0730bd.webPlatform.png b/docs/images/bb0aca46-4612-c284-055f-58850c0730bd.webPlatform.png new file mode 100644 index 0000000..a5be51a Binary files /dev/null and b/docs/images/bb0aca46-4612-c284-055f-58850c0730bd.webPlatform.png differ diff --git a/docs/images/c5840a63-85f5-62b0-c68f-2faa4aaea42b.webPlatform.png b/docs/images/c5840a63-85f5-62b0-c68f-2faa4aaea42b.webPlatform.png new file mode 100644 index 0000000..bbc54fc Binary files /dev/null and b/docs/images/c5840a63-85f5-62b0-c68f-2faa4aaea42b.webPlatform.png differ diff --git a/docs/images/cbac5b1d-0299-9db6-3747-c7aeaaaa37d0.webPlatform.png b/docs/images/cbac5b1d-0299-9db6-3747-c7aeaaaa37d0.webPlatform.png new file mode 100644 index 0000000..80c3c4f Binary files /dev/null and b/docs/images/cbac5b1d-0299-9db6-3747-c7aeaaaa37d0.webPlatform.png differ diff --git a/docs/images/e429a798-7e86-1f02-565e-39dfab41fe36.webPlatform.png b/docs/images/e429a798-7e86-1f02-565e-39dfab41fe36.webPlatform.png new file mode 100644 index 0000000..f1bc144 Binary files /dev/null and b/docs/images/e429a798-7e86-1f02-565e-39dfab41fe36.webPlatform.png differ diff --git a/docs/images/f1579c61-f17f-ff49-3f97-e942f202bebf.webPlatform.png b/docs/images/f1579c61-f17f-ff49-3f97-e942f202bebf.webPlatform.png new file mode 100644 index 0000000..aad0184 Binary files /dev/null and b/docs/images/f1579c61-f17f-ff49-3f97-e942f202bebf.webPlatform.png differ diff --git a/docs/images/f178697f-630b-bafd-7c7d-e1287b98a969.webPlatform.png b/docs/images/f178697f-630b-bafd-7c7d-e1287b98a969.webPlatform.png new file mode 100644 index 0000000..d4c320d Binary files /dev/null and b/docs/images/f178697f-630b-bafd-7c7d-e1287b98a969.webPlatform.png differ diff --git a/docs/images/f400aaaa-861c-78c0-0919-07a886e57304.webPlatform.png b/docs/images/f400aaaa-861c-78c0-0919-07a886e57304.webPlatform.png new file mode 100644 index 0000000..bb5762e Binary files /dev/null and b/docs/images/f400aaaa-861c-78c0-0919-07a886e57304.webPlatform.png differ diff --git a/docs/images/f7842765-fff5-aa39-9f7f-fdd3024d4056.webPlatform.png b/docs/images/f7842765-fff5-aa39-9f7f-fdd3024d4056.webPlatform.png new file mode 100644 index 0000000..ea79c57 Binary files /dev/null and b/docs/images/f7842765-fff5-aa39-9f7f-fdd3024d4056.webPlatform.png differ diff --git a/docs/images/favicon.png b/docs/images/favicon.png new file mode 100644 index 0000000..ce3a263 Binary files /dev/null and b/docs/images/favicon.png differ diff --git a/docs/images/logo.png b/docs/images/logo.png new file mode 100644 index 0000000..caaf43c Binary files /dev/null and b/docs/images/logo.png differ diff --git a/docs/images/me.jpeg b/docs/images/me.jpeg new file mode 100644 index 0000000..cf70e23 Binary files /dev/null and b/docs/images/me.jpeg differ diff --git a/docs/index.html b/docs/index.html new file mode 100644 index 0000000..08209b8 --- /dev/null +++ b/docs/index.html @@ -0,0 +1,436 @@ + + + + + + + + + Hey - Home + + + + + + +
+

👋 Hi!

+ + + +
+ + + + + + \ No newline at end of file diff --git a/docs/manifest.json b/docs/manifest.json new file mode 100644 index 0000000..bb4ec5d --- /dev/null +++ b/docs/manifest.json @@ -0,0 +1,119 @@ +{ + "dir": "ltr", + "lang": "en", + "name": "Hi! | Navan Chauhan", + "scope": "/", + "display": "fullscreen", + "start_url": "https://navanchauhan.github.io/", + "short_name": "Navan Chauhan", + "theme_color": "black", + "description": "Welcome to my personal fragment of the internet.", + "orientation": "any", + "background_color": "transparent", + "related_applications": [], + "prefer_related_applications": false, + "icons": [ + { + "src": "/images/favicon.png", + "type": "image/png", + "sizes": "32x32" + }, + { + "src": "/images/14a6e126-4866-93de-8df5-e0e6a3c65da1.webPlatform.png", + "sizes": "44x44", + "type": "image/png" + }, + { + "src": "/images/6b5f7f70-557f-0e4b-3d76-127534525db9.webPlatform.png", + "sizes": "48x48", + "type": "image/png" + }, + { + "src": "/images/c5840a63-85f5-62b0-c68f-2faa4aaea42b.webPlatform.png", + "sizes": "1240x600", + "type": "image/png" + }, + { + "src": "/images/82e24f17-2e71-90d8-67a7-587163282ebf.webPlatform.png", + "sizes": "300x300", + "type": "image/png" + }, + { + "src": "/images/f7842765-fff5-aa39-9f7f-fdd3024d4056.webPlatform.png", + "sizes": "150x150", + "type": "image/png" + }, + { + "src": "/images/9384518b-2a6c-0abc-136c-8c8faf49c71b.webPlatform.png", + "sizes": "88x88", + "type": "image/png" + }, + { + "src": "/images/15294abc-6c7c-ffb8-df8d-d2fad23f50b0.webPlatform.png", + "sizes": "24x24", + "type": "image/png" + }, + { + "src": "/images/f178697f-630b-bafd-7c7d-e1287b98a969.webPlatform.png", + "sizes": "50x50", + "type": "image/png" + }, + { + "src": "/images/f400aaaa-861c-78c0-0919-07a886e57304.webPlatform.png", + "sizes": "620x300", + "type": "image/png" + }, + { + "src": "/images/8c0ffe9e-b615-96cd-3e18-ab4307c859a0.webPlatform.png", + "sizes": "192x192", + "type": "image/png" + }, + { + "src": "/images/f1579c61-f17f-ff49-3f97-e942f202bebf.webPlatform.png", + "sizes": "144x144", + "type": "image/png" + }, + { + "src": "/images/9bf4aee8-92e3-932f-5388-7731928b5692.webPlatform.png", + "sizes": "96x96", + "type": "image/png" + }, + { + "src": "/images/9dc22996-fd1b-b2d3-3627-cef4fa224e25.webPlatform.png", + "sizes": "72x72", + "type": "image/png" + }, + { + "src": "/images/afd91c53-cfd0-b52e-ca49-1db0cc292b7d.webPlatform.png", + "sizes": "36x36", + "type": "image/png" + }, + { + "src": "/images/e429a798-7e86-1f02-565e-39dfab41fe36.webPlatform.png", + "sizes": "1024x1024", + "type": "image/png" + }, + { + "src": "/images/04d0580b-d347-476a-232d-9568839851cd.webPlatform.png", + "sizes": "180x180", + "type": "image/png" + }, + { + "src": "/images/cbac5b1d-0299-9db6-3747-c7aeaaaa37d0.webPlatform.png", + "sizes": "152x152", + "type": "image/png" + }, + { + "src": "/images/b0cac729-56cb-2a63-7e8b-ac62a038a023.webPlatform.png", + "sizes": "120x120", + "type": "image/png" + }, + { + "src": "/images/bb0aca46-4612-c284-055f-58850c0730bd.webPlatform.png", + "sizes": "76x76", + "type": "image/png" + } + ], + "url": "https://navanchauhan.github.io", + "screenshots": [] +} diff --git a/docs/posts/2010-01-24-experiments.html b/docs/posts/2010-01-24-experiments.html new file mode 100644 index 0000000..d39cbd3 --- /dev/null +++ b/docs/posts/2010-01-24-experiments.html @@ -0,0 +1,36 @@ + + + + + + + + + Hey - Post + + + + + +
+

Experiments

+ +

https://s3-us-west-2.amazonaws.com/s.cdpn.io/148866/img-original.jpg

+ + + +
+ + + + + + \ No newline at end of file diff --git a/docs/posts/2019-05-05-Custom-Snowboard-Anemone-Theme.html b/docs/posts/2019-05-05-Custom-Snowboard-Anemone-Theme.html new file mode 100644 index 0000000..cdcb2ae --- /dev/null +++ b/docs/posts/2019-05-05-Custom-Snowboard-Anemone-Theme.html @@ -0,0 +1,448 @@ + + + + + + + + + Hey - Post + + + + + +
+

Creating your own custom theme for Snowboard or Anemone

+ +

Contents

+ +
    +
  • Getting Started
  • +
  • Theme Configuration
  • +
  • Creating Icons
  • +
  • Exporting Icons
  • +
  • Icon Masks
  • +
  • Packaging
  • +
  • Building the DEB
  • +
+ +

Getting Started

+ +

Note: Without the proper folder structure, your theme may not show up!

+ +
    +
  • Create a new folder called themeName.theme (Replace themeName with your desired theme name)
  • +
  • Within themeName.theme folder, create another folder called IconBundles (You cannot change this name)
  • +
+ +

Theme Configuration

+ +
    +
  • Now, inside the themeName.theme folder, create a file called Info.plist and paste the following
  • +
+ +
<?xml version="1.0" encoding="UTF-8"?>
+<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
+  <plist version="1.0">
+  <dict>
+    <key>PackageName</key>
+    <string>ThemeName</string>
+    <key>ThemeType</key>
+    <string>Icons</string>
+  </dict>
+</plist>
+
+ +
    +
  • Replace PackageName with the name of the Package and replace ThemeName with the Theme Name
  • +
+ +

Now, you might ask what is the difference between PackageName and ThemeName?

+ +

Well, if for example you want to publish two variants of your icons, one dark and one white but you do not want the user to seperately install them. +Then, you would name the package MyTheme and include two themes Blackie and White thus creating two entries. More about this in the end

+ +

Creating Icons

+ +
    +
  • Open up the Image Editor of your choice and create a new file having a resolution of 512x512
  • +
+ +

Note: Due to IconBundles, we just need to create the icons in one size and they get resized automatically :ghost:

+ +

Want to create rounded icons? +Create them squared only, we will learn how to apply masks!

+ +

Exporting Icons

+ +

Note: All icons must be saved as *.png (Tip: This means you can even create partially transparent icons!)

+ +
    +
  • All Icons must be saved in themeName.theme>IconBundles as bundleID-large.png
  • +
+ +
Finding BundleIDs
+ +

Stock Application BundleIDs

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameBundleID
App Storecom.apple.AppStore
Apple Watchcom.apple.Bridge
Calculatorcom.apple.calculator
Calendarcom.apple.mobilecal
Cameracom.apple.camera
Classroomcom.apple.classroom
Clockcom.apple.mobiletimer
Compasscom.apple.compass
FaceTimecom.apple.facetime
Filescom.apple.DocumentsApp
Game Centercom.apple.gamecenter
Healthcom.apple.Health
Homecom.apple.Home
iBookscom.apple.iBooks
iTunes Storecom.apple.MobileStore
Mailcom.apple.mobilemail
Mapscom.apple.Maps
Measurecom.apple.measure
Messagescom.apple.MobileSMS
Musiccom.apple.Music
Newscom.apple.news
Notescom.apple.mobilenotes
Phonecom.apple.mobilephone
Photo Boothcom.apple.Photo-Booth
Photoscom.apple.mobileslideshow
Playgroundscome.apple.Playgrounds
Podcastscom.apple.podcasts
Reminderscom.apple.reminders
Safaricom.apple.mobilesafari
Settingscom.apple.Preferences
Stockscom.apple.stocks
Tipscom.apple.tips
TVcom.apple.tv
Videoscom.apple.videos
Voice Memoscom.apple.VoiceMemos
Walletcom.apple.Passbook
Weathercom.apple.weather
+ +

3rd Party Applications BundleID +Click here

+ +

Icon Masks

+ +
    +
  • Getting the Classic Rounded Rectangle Masks
  • +
+ +

In your Info.plist file add the following value between <dict> and

+ +
<key>IB-MaskIcons</key>
+    <true/>
+
+ +
    +
  • Custom Icon Masks
  • +
+ +

NOTE: This is an optional step, if you do not want Icon Masks, skip this step

+ +
    +
  • Inside your themeName.theme folder, create another folder called 'Bundles' +
      +
    • Inside Bundles create another folder called com.apple.mobileicons.framework
    • +
  • +
+ +

Designing Masks

+ +

Masking does not support IconBundles, therefore you need to save the masks for each of the following

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FileResolution
AppIconMask@2x~ipad.png152x512
AppIconMask@2x~iphone.png120x120
AppIconMask@3x~ipad.png180x180
AppIconMask@3x~iphone.png180x180
AppIconMask~ipad.png76x76
DocumentBadgeMask-20@2x.png40x40
DocumentBadgeMask-145@2x.png145x145
GameAppIconMask@2x.png84x84
NotificationAppIconMask@2x.png40x40
NotificationAppIconMask@3x.png60x60
SpotlightAppIconMask@2x.png80x80
SpotlightAppIconMask@3x.png120x120
TableIconMask@2x.png58x58
TableIconOutline@2x.png58x58
+ +
    +
  • While creating the mask, make sure that the background is not a solid colour and is transparent
  • +
  • Whichever area you want to make visible, it should be coloured in black
  • +
+ +

Example (Credits: Pinpal):

+ +

Credit: Pinpal

+ +

would result in

+ +

Credit: Pinpal

+ +

Packaging

+ +
    +
  • Create a new folder outside themeName.theme with the name you want to be shown on Cydia, e.g themeNameForCydia
  • +
  • Create another folder called DEBIAN in themeNameForCydia (It needs to be uppercase)
  • +
  • In DEBIAN create an extension-less file called control and edit it using your favourite text editor
  • +
+ +

Paste the following in it, replacing yourname, themename, Theme Name, A theme with beautiful icons! and Your Name with your details:

+ +
Package: com.yourname.themename
+Name: Theme Name
+Version: 1.0
+Architecture: iphoneos-arm
+Description: A theme with beautiful icons!
+Author: Your Name
+Maintainer: Your Name
+Section: Themes
+
+ +
    +
  • Important Notes:

    + +
      +
    • The package field MUST be lower case!
    • +
    • The version field MUST be changed every-time you update your theme!
    • +
    • The control file MUST have an extra blank line at the bottom!
    • +
  • +
  • Now, Create another folder called Library in themeNameForCydia

  • +
  • In Library create another folder called Themes
  • +
  • Finally, copy themeName.theme to the Themes folder (Copy the entire folder, not just the contents)
  • +
+ +

Building the DEB

+ +

For building the deb you need a *nix system, otherwise you can build it using your iPhones

+ +
Pre-Requisite for MacOS users
+ +

1) Install Homenbrew /usr/bin/ruby -e "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install)" (Run this in the terminal) +2) Install dpkg, by running brew install dpkg

+ +

There is a terrible thing called .DS_Store which if not removed, will cause a problem during either build or installation

+ +
    +
  • To remove this we first need to open the folder in the terminal

  • +
  • Launch the Terminal and then drag-and-drop the 'themeNameForCydia' folder on the Terminal icon in the dock

  • +
  • Now, run find . -name "*.DS_Store" -type f -delete
  • +
+ +
Pre-Requisite for Windows Users
+ +
    +
  • SSH into your iPhone and drag and drop the themeNameForCyia folder on the terminal
  • +
+ +
Common Instructions
+ +
    +
  • You should be at the root of the folder in the terminal, i.e Inside themeNameForCydia
  • +
  • running ls should show the following output
  • +
+ +
DEBIAN  Library
+
+ +
    +
  • Now, in the terminal enter the following cd .. && dpkg -b themeNameForCydia
  • +
+ +

Now you will have the themeNameForCydia.deb in the same directory

+ +

You can share this with your friends :+1:

+ +
+ + + + + + \ No newline at end of file diff --git a/docs/posts/2019-12-04-Google-Teachable-Machines.html b/docs/posts/2019-12-04-Google-Teachable-Machines.html new file mode 100644 index 0000000..d13fd6f --- /dev/null +++ b/docs/posts/2019-12-04-Google-Teachable-Machines.html @@ -0,0 +1,80 @@ + + + + + + + + + Hey - Post + + + + + +
+

Image Classifier With Teachable Machines

+ +

Made for Google Code-In

+ +

Task Description

+ +

Using Glitch and the Teachable Machines, build a Book Detector with Tensorflow.js. When a book is recognized, the code would randomly suggest a book/tell a famous quote from a book. Here is an example Project to get you started: https://glitch.com/~voltaic-acorn

+ +

Details

+ +

1) Collecting Data

+ +

Teachable Machine allows you to create your dataset just by using your webcam! I created a database consisting of three classes ( Three Books ) and approximately grabbed 100 pictures for each book/class

+ +

+ +

2) Training

+ +

Training on teachable machines is as simple as clicking the train button. I did not even have to modify any configurations.

+ +

+ +

3) Finding Labels

+ +

Because I originally entered the entire name of the book and it's author's name as the label, the class name got truncated (Note to self, use shorter class names :p ). I then modified the code to print the modified label names in an alert box.

+ +

+ +

+ +

4) Adding a suggestions function

+ +

I first added a text field on the main page and then modified the JavaScript file to suggest a similar book whenever the model predicted with an accuracy >= 98%

+ +

+ +

+ +

5) Running!

+ +

Here it is running!

+ +

+ +

+ +

Remix this project:-

+ +

https://luminous-opinion.glitch.me

+ +
+ + + + + + \ No newline at end of file diff --git a/docs/posts/2019-12-08-Image-Classifier-Tensorflow.html b/docs/posts/2019-12-08-Image-Classifier-Tensorflow.html new file mode 100644 index 0000000..e3e898c --- /dev/null +++ b/docs/posts/2019-12-08-Image-Classifier-Tensorflow.html @@ -0,0 +1,190 @@ + + + + + + + + + Hey - Post + + + + + +
+

Creating a Custom Image Classifier using Tensorflow 2.x and Keras for Detecting Malaria

+ +

Done during Google Code-In. Org: Tensorflow.

+ +

Imports

+ +
%tensorflow_version 2.x #This is for telling Colab that you want to use TF 2.0, ignore if running on local machine
+
+from PIL import Image # We use the PIL Library to resize images
+import numpy as np
+import os
+import cv2
+import tensorflow as tf
+from tensorflow.keras import datasets, layers, models
+import pandas as pd
+import matplotlib.pyplot as plt
+from keras.models import Sequential
+from keras.layers import Conv2D,MaxPooling2D,Dense,Flatten,Dropout
+
+ +

Dataset

+ +

Fetching the Data

+ +
!wget ftp://lhcftp.nlm.nih.gov/Open-Access-Datasets/Malaria/cell_images.zip
+!unzip cell_images.zip
+
+ +

Processing the Data

+ +

We resize all the images as 50x50 and add the numpy array of that image as well as their label names (Infected or Not) to common arrays.

+ +
data = []
+labels = []
+
+Parasitized = os.listdir("./cell_images/Parasitized/")
+for parasite in Parasitized:
+    try:
+        image=cv2.imread("./cell_images/Parasitized/"+parasite)
+        image_from_array = Image.fromarray(image, 'RGB')
+        size_image = image_from_array.resize((50, 50))
+        data.append(np.array(size_image))
+        labels.append(0)
+    except AttributeError:
+        print("")
+
+Uninfected = os.listdir("./cell_images/Uninfected/")
+for uninfect in Uninfected:
+    try:
+        image=cv2.imread("./cell_images/Uninfected/"+uninfect)
+        image_from_array = Image.fromarray(image, 'RGB')
+        size_image = image_from_array.resize((50, 50))
+        data.append(np.array(size_image))
+        labels.append(1)
+    except AttributeError:
+        print("")
+
+ +

Splitting Data

+ +
df = np.array(data)
+labels = np.array(labels)
+(X_train, X_test) = df[(int)(0.1*len(df)):],df[:(int)(0.1*len(df))]
+(y_train, y_test) = labels[(int)(0.1*len(labels)):],labels[:(int)(0.1*len(labels))]
+
+ +
s=np.arange(X_train.shape[0])
+np.random.shuffle(s)
+X_train=X_train[s]
+y_train=y_train[s]
+X_train = X_train/255.0
+
+ +

Model

+ +

Creating Model

+ +

By creating a sequential model, we create a linear stack of layers.

+ +

Note: The input shape for the first layer is 50,50 which corresponds with the sizes of the resized images

+ +
model = models.Sequential()
+model.add(layers.Conv2D(filters=16, kernel_size=2, padding='same', activation='relu', input_shape=(50,50,3)))
+model.add(layers.MaxPooling2D(pool_size=2))
+model.add(layers.Conv2D(filters=32,kernel_size=2,padding='same',activation='relu'))
+model.add(layers.MaxPooling2D(pool_size=2))
+model.add(layers.Conv2D(filters=64,kernel_size=2,padding="same",activation="relu"))
+model.add(layers.MaxPooling2D(pool_size=2))
+model.add(layers.Dropout(0.2))
+model.add(layers.Flatten())
+model.add(layers.Dense(500,activation="relu"))
+model.add(layers.Dropout(0.2))
+model.add(layers.Dense(2,activation="softmax"))#2 represent output layer neurons 
+model.summary()
+
+ +

Compiling Model

+ +

We use the Adam optimiser as it is an adaptive learning rate optimisation algorithm that's been designed specifically for training deep neural networks, which means it changes its learning rate automatically to get the best results

+ +
model.compile(optimizer="adam",
+              loss="sparse_categorical_crossentropy", 
+             metrics=["accuracy"])
+
+ +

Training Model

+ +

We train the model for 10 epochs on the training data and then validate it using the testing data

+ +
history = model.fit(X_train,y_train, epochs=10, validation_data=(X_test,y_test))
+
+ +
Train on 24803 samples, validate on 2755 samples
+Epoch 1/10
+24803/24803 [==============================] - 57s 2ms/sample - loss: 0.0786 - accuracy: 0.9729 - val_loss: 0.0000e+00 - val_accuracy: 1.0000
+Epoch 2/10
+24803/24803 [==============================] - 58s 2ms/sample - loss: 0.0746 - accuracy: 0.9731 - val_loss: 0.0290 - val_accuracy: 0.9996
+Epoch 3/10
+24803/24803 [==============================] - 58s 2ms/sample - loss: 0.0672 - accuracy: 0.9764 - val_loss: 0.0000e+00 - val_accuracy: 1.0000
+Epoch 4/10
+24803/24803 [==============================] - 58s 2ms/sample - loss: 0.0601 - accuracy: 0.9789 - val_loss: 0.0000e+00 - val_accuracy: 1.0000
+Epoch 5/10
+24803/24803 [==============================] - 58s 2ms/sample - loss: 0.0558 - accuracy: 0.9804 - val_loss: 0.0000e+00 - val_accuracy: 1.0000
+Epoch 6/10
+24803/24803 [==============================] - 57s 2ms/sample - loss: 0.0513 - accuracy: 0.9819 - val_loss: 0.0000e+00 - val_accuracy: 1.0000
+Epoch 7/10
+24803/24803 [==============================] - 58s 2ms/sample - loss: 0.0452 - accuracy: 0.9849 - val_loss: 0.3190 - val_accuracy: 0.9985
+Epoch 8/10
+24803/24803 [==============================] - 58s 2ms/sample - loss: 0.0404 - accuracy: 0.9858 - val_loss: 0.0000e+00 - val_accuracy: 1.0000
+Epoch 9/10
+24803/24803 [==============================] - 58s 2ms/sample - loss: 0.0352 - accuracy: 0.9878 - val_loss: 0.0000e+00 - val_accuracy: 1.0000
+Epoch 10/10
+24803/24803 [==============================] - 58s 2ms/sample - loss: 0.0373 - accuracy: 0.9865 - val_loss: 0.0000e+00 - val_accuracy: 1.0000
+
+ +

Results

+ +
accuracy = history.history['accuracy'][-1]*100
+loss = history.history['loss'][-1]*100
+val_accuracy = history.history['val_accuracy'][-1]*100
+val_loss = history.history['val_loss'][-1]*100
+
+print(
+    'Accuracy:', accuracy,
+    '\nLoss:', loss,
+    '\nValidation Accuracy:', val_accuracy,
+    '\nValidation Loss:', val_loss
+)
+
+ +
Accuracy: 98.64532351493835 
+Loss: 3.732407123270176 
+Validation Accuracy: 100.0 
+Validation Loss: 0.0
+
+ +

We have achieved 98% Accuracy!

+ +

Link to Colab Notebook

+ +
+ + + + + + \ No newline at end of file diff --git a/docs/posts/2019-12-08-Splitting-Zips.html b/docs/posts/2019-12-08-Splitting-Zips.html new file mode 100644 index 0000000..69ad52b --- /dev/null +++ b/docs/posts/2019-12-08-Splitting-Zips.html @@ -0,0 +1,53 @@ + + + + + + + + + Hey - Post + + + + + +
+

Splitting ZIPs into Multiple Parts

+ +

Tested on macOS

+ +

Creating the archive:

+ +
zip -r -s 5 oodlesofnoodles.zip website/
+
+ +

5 stands for each split files' size (in mb, kb and gb can also be specified)

+ +

For encrypting the zip:

+ +
zip -er -s 5 oodlesofnoodles.zip website
+
+ +

Extracting Files

+ +

First we need to collect all parts, then

+ +
zip -F oodlesofnoodles.zip --out merged.zip
+
+ +
+ + + + + + \ No newline at end of file diff --git a/docs/posts/2019-12-10-TensorFlow-Model-Prediction.html b/docs/posts/2019-12-10-TensorFlow-Model-Prediction.html new file mode 100644 index 0000000..46eb777 --- /dev/null +++ b/docs/posts/2019-12-10-TensorFlow-Model-Prediction.html @@ -0,0 +1,76 @@ + + + + + + + + + Hey - Post + + + + + +
+

Making Predictions using Image Classifier (TensorFlow)

+ +

This was tested on TF 2.x and works as of 2019-12-10

+ +

If you want to understand how to make your own custom image classifier, please refer to my previous post.

+ +

If you followed my last post, then you created a model which took an image of dimensions 50x50 as an input.

+ +

First we import the following if we have not imported these before

+ +
import cv2
+import os
+
+ +

Then we read the file using OpenCV.

+ +
image=cv2.imread(imagePath)
+
+ +

The cv2. imread() function returns a NumPy array representing the image. Therefore, we need to convert it before we can use it.

+ +
image_from_array = Image.fromarray(image, 'RGB')
+
+ +

Then we resize the image

+ +
size_image = image_from_array.resize((50,50))
+
+ +

After this we create a batch consisting of only one image

+ +
p = np.expand_dims(size_image, 0)
+
+ +

We then convert this uint8 datatype to a float32 datatype

+ +
img = tf.cast(p, tf.float32)
+
+ +

Finally we make the prediction

+ +
print(['Infected','Uninfected'][np.argmax(model.predict(img))])
+
+ +

Infected

+ +
+ + + + + + \ No newline at end of file diff --git a/docs/posts/2019-12-16-TensorFlow-Polynomial-Regression.html b/docs/posts/2019-12-16-TensorFlow-Polynomial-Regression.html new file mode 100644 index 0000000..7a0d95e --- /dev/null +++ b/docs/posts/2019-12-16-TensorFlow-Polynomial-Regression.html @@ -0,0 +1,510 @@ + + + + + + + + + Hey - Post + + + + + +
+

Polynomial Regression Using TensorFlow

+ +

In this tutorial you will learn about polynomial regression and how you can implement it in Tensorflow.

+ +

In this, we will be performing polynomial regression using 5 types of equations -

+ +
    +
  • Linear
  • +
  • Quadratic
  • +
  • Cubic
  • +
  • Quartic
  • +
  • Quintic
  • +
+ +

Regression

+ +

What is Regression?

+ +

Regression is a statistical measurement that is used to try to determine the relationship between a +dependent variable (often denoted by Y), and series of varying variables (called independent variables, often denoted by X ).

+ +

What is Polynomial Regression

+ +

This is a form of Regression Analysis where the relationship between Y and X is denoted as the nth degree/power of X. +Polynomial regression even fits a non-linear relationship (e.g when the points don't form a straight line).

+ +

Imports

+ +
import tensorflow.compat.v1 as tf
+tf.disable_v2_behavior()
+import matplotlib.pyplot as plt
+import numpy as np
+import pandas as pd
+
+ +

Dataset

+ +

Creating Random Data

+ +

Even though in this tutorial we will use a Position Vs Salary dataset, it is important to know how to create synthetic data

+ +

To create 50 values spaced evenly between 0 and 50, we use NumPy's linspace function

+ +

linspace(lower_limit, upper_limit, no_of_observations)

+ +
x = np.linspace(0, 50, 50)
+y = np.linspace(0, 50, 50)
+
+ +

We use the following function to add noise to the data, so that our values

+ +
x += np.random.uniform(-4, 4, 50)
+y += np.random.uniform(-4, 4, 50)
+
+ +

Position vs Salary Dataset

+ +

We will be using https://drive.google.com/file/d/1tNL4jxZEfpaP4oflfSn6pIHJX7Pachm9/view (Salary vs Position Dataset)

+ +
!wget --no-check-certificate 'https://docs.google.com/uc?export=download&id=1tNL4jxZEfpaP4oflfSn6pIHJX7Pachm9' -O data.csv
+
+ +
df = pd.read_csv("data.csv")
+
+ +
df # this gives us a preview of the dataset we are working with
+
+ +
| Position          | Level | Salary  |
+|-------------------|-------|---------|
+| Business Analyst  | 1     | 45000   |
+| Junior Consultant | 2     | 50000   |
+| Senior Consultant | 3     | 60000   |
+| Manager           | 4     | 80000   |
+| Country Manager   | 5     | 110000  |
+| Region Manager    | 6     | 150000  |
+| Partner           | 7     | 200000  |
+| Senior Partner    | 8     | 300000  |
+| C-level           | 9     | 500000  |
+| CEO               | 10    | 1000000 |
+
+ +

We convert the salary column as the ordinate (y-coordinate) and level column as the abscissa

+ +
abscissa = df["Level"].to_list() # abscissa = [1,2,3,4,5,6,7,8,9,10]
+ordinate = df["Salary"].to_list() # ordinate = [45000,50000,60000,80000,110000,150000,200000,300000,500000,1000000]
+
+ +
n = len(abscissa) # no of observations
+plt.scatter(abscissa, ordinate)
+plt.ylabel('Salary')
+plt.xlabel('Position')
+plt.title("Salary vs Position")
+plt.show()
+
+ +

+ +

Defining Stuff

+ +
X = tf.placeholder("float")
+Y = tf.placeholder("float")
+
+ +

Defining Variables

+ +

We first define all the coefficients and constant as tensorflow variables having a random initial value

+ +
a = tf.Variable(np.random.randn(), name = "a")
+b = tf.Variable(np.random.randn(), name = "b")
+c = tf.Variable(np.random.randn(), name = "c")
+d = tf.Variable(np.random.randn(), name = "d")
+e = tf.Variable(np.random.randn(), name = "e")
+f = tf.Variable(np.random.randn(), name = "f")
+
+ +

Model Configuration

+ +
learning_rate = 0.2
+no_of_epochs = 25000
+
+ +

Equations

+ +
deg1 = a*X + b
+deg2 = a*tf.pow(X,2) + b*X + c
+deg3 = a*tf.pow(X,3) + b*tf.pow(X,2) + c*X + d
+deg4 = a*tf.pow(X,4) + b*tf.pow(X,3) + c*tf.pow(X,2) + d*X + e
+deg5 = a*tf.pow(X,5) + b*tf.pow(X,4) + c*tf.pow(X,3) + d*tf.pow(X,2) + e*X + f
+
+ +

Cost Function

+ +

We use the Mean Squared Error Function

+ +
mse1 = tf.reduce_sum(tf.pow(deg1-Y,2))/(2*n)
+mse2 = tf.reduce_sum(tf.pow(deg2-Y,2))/(2*n)
+mse3 = tf.reduce_sum(tf.pow(deg3-Y,2))/(2*n)
+mse4 = tf.reduce_sum(tf.pow(deg4-Y,2))/(2*n)
+mse5 = tf.reduce_sum(tf.pow(deg5-Y,2))/(2*n)
+
+ +

Optimizer

+ +

We use the AdamOptimizer for the polynomial functions and GradientDescentOptimizer for the linear function

+ +
optimizer1 = tf.train.GradientDescentOptimizer(learning_rate).minimize(mse1)
+optimizer2 = tf.train.AdamOptimizer(learning_rate).minimize(mse2)
+optimizer3 = tf.train.AdamOptimizer(learning_rate).minimize(mse3)
+optimizer4 = tf.train.AdamOptimizer(learning_rate).minimize(mse4)
+optimizer5 = tf.train.AdamOptimizer(learning_rate).minimize(mse5)
+
+ +
init=tf.global_variables_initializer()
+
+ +

Model Predictions

+ +

For each type of equation first we make the model predict the values of the coefficient(s) and constant, once we get these values we use it to predict the Y +values using the X values. We then plot it to compare the actual data and predicted line.

+ +

Linear Equation

+ +
with tf.Session() as sess:
+    sess.run(init)
+    for epoch in range(no_of_epochs):
+      for (x,y) in zip(abscissa, ordinate):
+        sess.run(optimizer1, feed_dict={X:x, Y:y})
+      if (epoch+1)%1000==0:
+        cost = sess.run(mse1,feed_dict={X:abscissa,Y:ordinate})
+        print("Epoch",(epoch+1), ": Training Cost:", cost," a,b:",sess.run(a),sess.run(b))
+
+        training_cost = sess.run(mse1,feed_dict={X:abscissa,Y:ordinate})
+        coefficient1 = sess.run(a)
+        constant = sess.run(b)
+
+print(training_cost, coefficient1, constant)
+
+ +
Epoch 1000 : Training Cost: 88999125000.0  a,b: 180396.42 -478869.12
+Epoch 2000 : Training Cost: 88999125000.0  a,b: 180396.42 -478869.12
+Epoch 3000 : Training Cost: 88999125000.0  a,b: 180396.42 -478869.12
+Epoch 4000 : Training Cost: 88999125000.0  a,b: 180396.42 -478869.12
+Epoch 5000 : Training Cost: 88999125000.0  a,b: 180396.42 -478869.12
+Epoch 6000 : Training Cost: 88999125000.0  a,b: 180396.42 -478869.12
+Epoch 7000 : Training Cost: 88999125000.0  a,b: 180396.42 -478869.12
+Epoch 8000 : Training Cost: 88999125000.0  a,b: 180396.42 -478869.12
+Epoch 9000 : Training Cost: 88999125000.0  a,b: 180396.42 -478869.12
+Epoch 10000 : Training Cost: 88999125000.0  a,b: 180396.42 -478869.12
+Epoch 11000 : Training Cost: 88999125000.0  a,b: 180396.42 -478869.12
+Epoch 12000 : Training Cost: 88999125000.0  a,b: 180396.42 -478869.12
+Epoch 13000 : Training Cost: 88999125000.0  a,b: 180396.42 -478869.12
+Epoch 14000 : Training Cost: 88999125000.0  a,b: 180396.42 -478869.12
+Epoch 15000 : Training Cost: 88999125000.0  a,b: 180396.42 -478869.12
+Epoch 16000 : Training Cost: 88999125000.0  a,b: 180396.42 -478869.12
+Epoch 17000 : Training Cost: 88999125000.0  a,b: 180396.42 -478869.12
+Epoch 18000 : Training Cost: 88999125000.0  a,b: 180396.42 -478869.12
+Epoch 19000 : Training Cost: 88999125000.0  a,b: 180396.42 -478869.12
+Epoch 20000 : Training Cost: 88999125000.0  a,b: 180396.42 -478869.12
+Epoch 21000 : Training Cost: 88999125000.0  a,b: 180396.42 -478869.12
+Epoch 22000 : Training Cost: 88999125000.0  a,b: 180396.42 -478869.12
+Epoch 23000 : Training Cost: 88999125000.0  a,b: 180396.42 -478869.12
+Epoch 24000 : Training Cost: 88999125000.0  a,b: 180396.42 -478869.12
+Epoch 25000 : Training Cost: 88999125000.0  a,b: 180396.42 -478869.12
+88999125000.0 180396.42 -478869.12
+
+ +
predictions = []
+for x in abscissa:
+  predictions.append((coefficient1*x + constant))
+plt.plot(abscissa , ordinate, 'ro', label ='Original data')
+plt.plot(abscissa, predictions, label ='Fitted line')
+plt.title('Linear Regression Result')
+plt.legend()
+plt.show()
+
+ +

+ +

Quadratic Equation

+ +
with tf.Session() as sess:
+    sess.run(init)
+    for epoch in range(no_of_epochs):
+      for (x,y) in zip(abscissa, ordinate):
+        sess.run(optimizer2, feed_dict={X:x, Y:y})
+      if (epoch+1)%1000==0:
+        cost = sess.run(mse2,feed_dict={X:abscissa,Y:ordinate})
+        print("Epoch",(epoch+1), ": Training Cost:", cost," a,b,c:",sess.run(a),sess.run(b),sess.run(c))
+
+        training_cost = sess.run(mse2,feed_dict={X:abscissa,Y:ordinate})
+        coefficient1 = sess.run(a)
+        coefficient2 = sess.run(b)
+        constant = sess.run(c)
+
+print(training_cost, coefficient1, coefficient2, constant)
+
+ +
Epoch 1000 : Training Cost: 52571360000.0  a,b,c: 1002.4456 1097.0197 1276.6921
+Epoch 2000 : Training Cost: 37798890000.0  a,b,c: 1952.4263 2130.2825 2469.7756
+Epoch 3000 : Training Cost: 26751185000.0  a,b,c: 2839.5825 3081.6118 3554.351
+Epoch 4000 : Training Cost: 19020106000.0  a,b,c: 3644.56 3922.9563 4486.3135
+Epoch 5000 : Training Cost: 14060446000.0  a,b,c: 4345.042 4621.4233 5212.693
+Epoch 6000 : Training Cost: 11201084000.0  a,b,c: 4921.1855 5148.1504 5689.0713
+Epoch 7000 : Training Cost: 9732740000.0  a,b,c: 5364.764 5493.0156 5906.754
+Epoch 8000 : Training Cost: 9050918000.0  a,b,c: 5685.4067 5673.182 5902.0728
+Epoch 9000 : Training Cost: 8750394000.0  a,b,c: 5906.9814 5724.8906 5734.746
+Epoch 10000 : Training Cost: 8613128000.0  a,b,c: 6057.3677 5687.3364 5461.167
+Epoch 11000 : Training Cost: 8540034600.0  a,b,c: 6160.547 5592.3022 5122.8633
+Epoch 12000 : Training Cost: 8490983000.0  a,b,c: 6233.9175 5462.025 4747.111
+Epoch 13000 : Training Cost: 8450816500.0  a,b,c: 6289.048 5310.7583 4350.6997
+Epoch 14000 : Training Cost: 8414082000.0  a,b,c: 6333.199 5147.394 3943.9294
+Epoch 15000 : Training Cost: 8378841600.0  a,b,c: 6370.7944 4977.1704 3532.476
+Epoch 16000 : Training Cost: 8344471000.0  a,b,c: 6404.468 4803.542 3120.2087
+Epoch 17000 : Training Cost: 8310785500.0  a,b,c: 6435.365 4628.1523 2709.1445
+Epoch 18000 : Training Cost: 8277482000.0  a,b,c: 6465.5493 4451.833 2300.2783
+Epoch 19000 : Training Cost: 8244650000.0  a,b,c: 6494.609 4274.826 1894.3738
+Epoch 20000 : Training Cost: 8212349000.0  a,b,c: 6522.8247 4098.1733 1491.9915
+Epoch 21000 : Training Cost: 8180598300.0  a,b,c: 6550.6567 3922.7405 1093.3868
+Epoch 22000 : Training Cost: 8149257700.0  a,b,c: 6578.489 3747.8362 698.53357
+Epoch 23000 : Training Cost: 8118325000.0  a,b,c: 6606.1973 3573.2742 307.3541
+Epoch 24000 : Training Cost: 8088001000.0  a,b,c: 6632.96 3399.878 -79.89219
+Epoch 25000 : Training Cost: 8058094600.0  a,b,c: 6659.793 3227.2517 -463.03156
+8058094600.0 6659.793 3227.2517 -463.03156
+
+ +
predictions = []
+for x in abscissa:
+  predictions.append((coefficient1*pow(x,2) + coefficient2*x + constant))
+plt.plot(abscissa , ordinate, 'ro', label ='Original data')
+plt.plot(abscissa, predictions, label ='Fitted line')
+plt.title('Quadratic Regression Result')
+plt.legend()
+plt.show()
+
+ +

+ +

Cubic

+ +
with tf.Session() as sess:
+    sess.run(init)
+    for epoch in range(no_of_epochs):
+      for (x,y) in zip(abscissa, ordinate):
+        sess.run(optimizer3, feed_dict={X:x, Y:y})
+      if (epoch+1)%1000==0:
+        cost = sess.run(mse3,feed_dict={X:abscissa,Y:ordinate})
+        print("Epoch",(epoch+1), ": Training Cost:", cost," a,b,c,d:",sess.run(a),sess.run(b),sess.run(c),sess.run(d))
+
+        training_cost = sess.run(mse3,feed_dict={X:abscissa,Y:ordinate})
+        coefficient1 = sess.run(a)
+        coefficient2 = sess.run(b)
+        coefficient3 = sess.run(c)
+        constant = sess.run(d)
+
+print(training_cost, coefficient1, coefficient2, coefficient3, constant)
+
+ +
Epoch 1000 : Training Cost: 4279814000.0  a,b,c,d: 670.1527 694.4212 751.4653 903.9527
+Epoch 2000 : Training Cost: 3770950400.0  a,b,c,d: 742.6414 666.3489 636.94525 859.2088
+Epoch 3000 : Training Cost: 3717708300.0  a,b,c,d: 756.2582 569.3339 448.105 748.23956
+Epoch 4000 : Training Cost: 3667464000.0  a,b,c,d: 769.4476 474.0318 265.5761 654.75525
+Epoch 5000 : Training Cost: 3620040700.0  a,b,c,d: 782.32324 380.54272 89.39888 578.5136
+Epoch 6000 : Training Cost: 3575265800.0  a,b,c,d: 794.8898 288.83356 -80.5215 519.13654
+Epoch 7000 : Training Cost: 3532972000.0  a,b,c,d: 807.1608 198.87044 -244.31102 476.2061
+Epoch 8000 : Training Cost: 3493009200.0  a,b,c,d: 819.13513 110.64169 -402.0677 449.3291
+Epoch 9000 : Training Cost: 3455228400.0  a,b,c,d: 830.80255 24.0964 -553.92804 438.0652
+Epoch 10000 : Training Cost: 3419475500.0  a,b,c,d: 842.21594 -60.797424 -700.0123 441.983
+Epoch 11000 : Training Cost: 3385625300.0  a,b,c,d: 853.3363 -144.08699 -840.467 460.6356
+Epoch 12000 : Training Cost: 3353544700.0  a,b,c,d: 864.19135 -225.8125 -975.4196 493.57703
+Epoch 13000 : Training Cost: 3323125000.0  a,b,c,d: 874.778 -305.98932 -1104.9867 540.39465
+Epoch 14000 : Training Cost: 3294257000.0  a,b,c,d: 885.1007 -384.63474 -1229.277 600.65607
+Epoch 15000 : Training Cost: 3266820000.0  a,b,c,d: 895.18823 -461.819 -1348.4417 673.9051
+Epoch 16000 : Training Cost: 3240736000.0  a,b,c,d: 905.0128 -537.541 -1462.6171 759.7118
+Epoch 17000 : Training Cost: 3215895000.0  a,b,c,d: 914.60065 -611.8676 -1571.9058 857.6638
+Epoch 18000 : Training Cost: 3192216800.0  a,b,c,d: 923.9603 -684.8093 -1676.4642 967.30475
+Epoch 19000 : Training Cost: 3169632300.0  a,b,c,d: 933.08594 -756.3582 -1776.4275 1088.2198
+Epoch 20000 : Training Cost: 3148046300.0  a,b,c,d: 941.9928 -826.6257 -1871.9355 1219.9702
+Epoch 21000 : Training Cost: 3127394800.0  a,b,c,d: 950.67896 -895.6205 -1963.0989 1362.1665
+Epoch 22000 : Training Cost: 3107608600.0  a,b,c,d: 959.1487 -963.38116 -2050.0586 1514.4026
+Epoch 23000 : Training Cost: 3088618200.0  a,b,c,d: 967.4355 -1029.9625 -2132.961 1676.2717
+Epoch 24000 : Training Cost: 3070361300.0  a,b,c,d: 975.52875 -1095.4292 -2211.854 1847.4485
+Epoch 25000 : Training Cost: 3052791300.0  a,b,c,d: 983.4346 -1159.7922 -2286.9412 2027.4857
+3052791300.0 983.4346 -1159.7922 -2286.9412 2027.4857
+
+ +
predictions = []
+for x in abscissa:
+  predictions.append((coefficient1*pow(x,3) + coefficient2*pow(x,2) + coefficient3*x + constant))
+plt.plot(abscissa , ordinate, 'ro', label ='Original data')
+plt.plot(abscissa, predictions, label ='Fitted line')
+plt.title('Cubic Regression Result')
+plt.legend()
+plt.show()
+
+ +

+ +

Quartic

+ +
with tf.Session() as sess:
+    sess.run(init)
+    for epoch in range(no_of_epochs):
+      for (x,y) in zip(abscissa, ordinate):
+        sess.run(optimizer4, feed_dict={X:x, Y:y})
+      if (epoch+1)%1000==0:
+        cost = sess.run(mse4,feed_dict={X:abscissa,Y:ordinate})
+        print("Epoch",(epoch+1), ": Training Cost:", cost," a,b,c,d:",sess.run(a),sess.run(b),sess.run(c),sess.run(d),sess.run(e))
+
+        training_cost = sess.run(mse4,feed_dict={X:abscissa,Y:ordinate})
+        coefficient1 = sess.run(a)
+        coefficient2 = sess.run(b)
+        coefficient3 = sess.run(c)
+        coefficient4 = sess.run(d)
+        constant = sess.run(e)
+
+print(training_cost, coefficient1, coefficient2, coefficient3, coefficient4, constant)
+
+ +
Epoch 1000 : Training Cost: 1902632600.0  a,b,c,d: 84.48304 52.210594 54.791424 142.51952 512.0343
+Epoch 2000 : Training Cost: 1854316200.0  a,b,c,d: 88.998955 13.073557 14.276088 223.55667 1056.4655
+Epoch 3000 : Training Cost: 1812812400.0  a,b,c,d: 92.9462 -22.331177 -15.262934 327.41858 1634.9054
+Epoch 4000 : Training Cost: 1775716000.0  a,b,c,d: 96.42522 -54.64535 -35.829437 449.5028 2239.1392
+Epoch 5000 : Training Cost: 1741494100.0  a,b,c,d: 99.524734 -84.43976 -49.181057 585.85876 2862.4915
+Epoch 6000 : Training Cost: 1709199600.0  a,b,c,d: 102.31984 -112.19895 -56.808075 733.1876 3499.6199
+Epoch 7000 : Training Cost: 1678261800.0  a,b,c,d: 104.87324 -138.32709 -59.9442 888.79626 4146.2944
+Epoch 8000 : Training Cost: 1648340600.0  a,b,c,d: 107.23536 -163.15173 -59.58964 1050.524 4798.979
+Epoch 9000 : Training Cost: 1619243400.0  a,b,c,d: 109.44742 -186.9409 -56.53944 1216.6432 5454.9463
+Epoch 10000 : Training Cost: 1590821900.0  a,b,c,d: 111.54233 -209.91287 -51.423084 1385.8513 6113.5137
+Epoch 11000 : Training Cost: 1563042200.0  a,b,c,d: 113.54405 -232.21953 -44.73371 1557.1084 6771.7046
+Epoch 12000 : Training Cost: 1535855600.0  a,b,c,d: 115.471565 -253.9838 -36.851135 1729.535 7429.069
+Epoch 13000 : Training Cost: 1509255300.0  a,b,c,d: 117.33939 -275.29697 -28.0714 1902.5308 8083.9634
+Epoch 14000 : Training Cost: 1483227000.0  a,b,c,d: 119.1605 -296.2472 -18.618649 2075.6094 8735.381
+Epoch 15000 : Training Cost: 1457726700.0  a,b,c,d: 120.94584 -316.915 -8.650095 2248.3247 9384.197
+Epoch 16000 : Training Cost: 1432777300.0  a,b,c,d: 122.69806 -337.30704 1.7027153 2420.5771 10028.871
+Epoch 17000 : Training Cost: 1408365000.0  a,b,c,d: 124.42179 -357.45245 12.33499 2592.2983 10669.157
+Epoch 18000 : Training Cost: 1384480000.0  a,b,c,d: 126.12332 -377.39734 23.168756 2763.0933 11305.027
+Epoch 19000 : Training Cost: 1361116800.0  a,b,c,d: 127.80568 -397.16415 34.160156 2933.0452 11935.669
+Epoch 20000 : Training Cost: 1338288100.0  a,b,c,d: 129.4674 -416.72803 45.259155 3101.7727 12561.179
+Epoch 21000 : Training Cost: 1315959700.0  a,b,c,d: 131.11403 -436.14285 56.4436 3269.3142 13182.058
+Epoch 22000 : Training Cost: 1294164700.0  a,b,c,d: 132.74377 -455.3779 67.6757 3435.3833 13796.807
+Epoch 23000 : Training Cost: 1272863600.0  a,b,c,d: 134.35779 -474.45316 78.96117 3600.264 14406.58
+Epoch 24000 : Training Cost: 1252052600.0  a,b,c,d: 135.9583 -493.38254 90.268616 3764.0078 15010.481
+Epoch 25000 : Training Cost: 1231713700.0  a,b,c,d: 137.54753 -512.1876 101.59372 3926.4897 15609.368
+1231713700.0 137.54753 -512.1876 101.59372 3926.4897 15609.368
+
+ +
predictions = []
+for x in abscissa:
+  predictions.append((coefficient1*pow(x,4) + coefficient2*pow(x,3) + coefficient3*pow(x,2) + coefficient4*x + constant))
+plt.plot(abscissa , ordinate, 'ro', label ='Original data')
+plt.plot(abscissa, predictions, label ='Fitted line')
+plt.title('Quartic Regression Result')
+plt.legend()
+plt.show()
+
+ +

+ +

Quintic

+ +
with tf.Session() as sess:
+    sess.run(init)
+    for epoch in range(no_of_epochs):
+      for (x,y) in zip(abscissa, ordinate):
+        sess.run(optimizer5, feed_dict={X:x, Y:y})
+      if (epoch+1)%1000==0:
+        cost = sess.run(mse5,feed_dict={X:abscissa,Y:ordinate})
+        print("Epoch",(epoch+1), ": Training Cost:", cost," a,b,c,d,e,f:",sess.run(a),sess.run(b),sess.run(c),sess.run(d),sess.run(e),sess.run(f))
+
+        training_cost = sess.run(mse5,feed_dict={X:abscissa,Y:ordinate})
+        coefficient1 = sess.run(a)
+        coefficient2 = sess.run(b)
+        coefficient3 = sess.run(c)
+        coefficient4 = sess.run(d)
+        coefficient5 = sess.run(e)
+        constant = sess.run(f)
+
+ +
Epoch 1000 : Training Cost: 1409200100.0  a,b,c,d,e,f: 7.949472 7.46219 55.626034 184.29028 484.00223 1024.0083
+Epoch 2000 : Training Cost: 1306882400.0  a,b,c,d,e,f: 8.732181 -4.0085897 73.25298 315.90103 904.08887 2004.9749
+Epoch 3000 : Training Cost: 1212606000.0  a,b,c,d,e,f: 9.732249 -16.90125 86.28379 437.06552 1305.055 2966.2188
+Epoch 4000 : Training Cost: 1123640400.0  a,b,c,d,e,f: 10.74851 -29.82692 98.59997 555.331 1698.4631 3917.9155
+Epoch 5000 : Training Cost: 1039694300.0  a,b,c,d,e,f: 11.75426 -42.598194 110.698326 671.64355 2085.5513 4860.8535
+Epoch 6000 : Training Cost: 960663550.0  a,b,c,d,e,f: 12.745439 -55.18337 122.644936 786.00214 2466.1638 5794.3735
+Epoch 7000 : Training Cost: 886438340.0  a,b,c,d,e,f: 13.721028 -67.57168 134.43822 898.3691 2839.9958 6717.659
+Epoch 8000 : Training Cost: 816913100.0  a,b,c,d,e,f: 14.679965 -79.75113 146.07385 1008.66895 3206.6692 7629.812
+Epoch 9000 : Training Cost: 751971500.0  a,b,c,d,e,f: 15.62181 -91.71608 157.55713 1116.7715 3565.8323 8529.976
+Epoch 10000 : Training Cost: 691508740.0  a,b,c,d,e,f: 16.545347 -103.4531 168.88321 1222.6348 3916.9785 9416.236
+Epoch 11000 : Training Cost: 635382000.0  a,b,c,d,e,f: 17.450052 -114.954254 180.03932 1326.1565 4259.842 10287.99
+Epoch 12000 : Training Cost: 583477250.0  a,b,c,d,e,f: 18.334944 -126.20821 191.02948 1427.2095 4593.8 11143.449
+Epoch 13000 : Training Cost: 535640400.0  a,b,c,d,e,f: 19.198917 -137.20206 201.84718 1525.6926 4918.5327 11981.633
+Epoch 14000 : Training Cost: 491722240.0  a,b,c,d,e,f: 20.041153 -147.92719 212.49709 1621.5496 5233.627 12800.468
+Epoch 15000 : Training Cost: 451559520.0  a,b,c,d,e,f: 20.860966 -158.37456 222.97133 1714.7141 5538.676 13598.337
+Epoch 16000 : Training Cost: 414988960.0  a,b,c,d,e,f: 21.657421 -168.53406 233.27422 1805.0874 5833.1978 14373.658
+Epoch 17000 : Training Cost: 381837920.0  a,b,c,d,e,f: 22.429693 -178.39536 243.39914 1892.5883 6116.847 15124.394
+Epoch 18000 : Training Cost: 351931300.0  a,b,c,d,e,f: 23.176882 -187.94789 253.3445 1977.137 6389.117 15848.417
+Epoch 19000 : Training Cost: 325074400.0  a,b,c,d,e,f: 23.898485 -197.18741 263.12512 2058.6716 6649.8037 16543.95
+Epoch 20000 : Training Cost: 301073570.0  a,b,c,d,e,f: 24.593851 -206.10497 272.72385 2137.1797 6898.544 17209.367
+Epoch 21000 : Training Cost: 279727000.0  a,b,c,d,e,f: 25.262104 -214.69217 282.14642 2212.6372 7135.217 17842.854
+Epoch 22000 : Training Cost: 260845550.0  a,b,c,d,e,f: 25.903376 -222.94969 291.4003 2284.9844 7359.4644 18442.408
+Epoch 23000 : Training Cost: 244218030.0  a,b,c,d,e,f: 26.517094 -230.8697 300.45532 2354.3003 7571.261 19007.49
+Epoch 24000 : Training Cost: 229660080.0  a,b,c,d,e,f: 27.102589 -238.44817 309.35342 2420.4185 7770.5728 19536.19
+Epoch 25000 : Training Cost: 216972400.0  a,b,c,d,e,f: 27.660324 -245.69016 318.10062 2483.3608 7957.354 20027.707
+216972400.0 27.660324 -245.69016 318.10062 2483.3608 7957.354 20027.707
+
+ +
predictions = []
+for x in abscissa:
+  predictions.append((coefficient1*pow(x,5) + coefficient2*pow(x,4) + coefficient3*pow(x,3) + coefficient4*pow(x,2) + coefficient5*x + constant))
+plt.plot(abscissa , ordinate, 'ro', label ='Original data')
+plt.plot(abscissa, predictions, label ='Fitted line')
+plt.title('Quintic Regression Result')
+plt.legend()
+plt.show()
+
+ +

+ +

Results and Conclusion

+ +

You just learnt Polynomial Regression using TensorFlow!

+ +

Notes

+ +

Overfitting

+ +
+
+

Overfitting refers to a model that models the training data too well. + Overfitting happens when a model learns the detail and noise in the training data to the extent that it negatively impacts the performance of the model on new data. This means that the noise or random fluctuations in the training data is picked up and learned as concepts by the model. The problem is that these concepts do not apply to new data and negatively impact the models ability to generalise.

+
+
+ +
+

Source: Machine Learning Mastery

+
+ +

Basically if you train your machine learning model on a small dataset for a really large number of epochs, the model will learn all the deformities/noise in the data and will actually think that it is a normal part. Therefore when it will see some new data, it will discard that new data as noise and will impact the accuracy of the model in a negative manner

+ +
+ + + + + + \ No newline at end of file diff --git a/docs/posts/2019-12-22-Fake-News-Detector.html b/docs/posts/2019-12-22-Fake-News-Detector.html new file mode 100644 index 0000000..ed86ba4 --- /dev/null +++ b/docs/posts/2019-12-22-Fake-News-Detector.html @@ -0,0 +1,262 @@ + + + + + + + + + Hey - Post + + + + + +
+

Building a Fake News Detector with Turicreate

+ +

In this tutorial we will build a fake news detecting app from scratch, using Turicreate for the machine learning model and SwiftUI for building the app

+ +

Note: These commands are written as if you are running a jupyter notebook.

+ +

Building the Machine Learning Model

+ +

Data Gathering

+ +

To build a classifier, you need a lot of data. George McIntire (GH: @joolsa) has created a wonderful dataset containing the headline, body and whether it is fake or real. +Whenever you are looking for a dataset, always try searching on Kaggle and GitHub before you start building your own

+ +

Dependencies

+ +

I used a Google Colab instance for training my model. If you also plan on using Google Colab then I recommend choosing a GPU Instance (It is Free) +This allows you to train the model on the GPU. Turicreate is built on top of Apache's MXNet Framework, for us to use GPU we need to install +a CUDA compatible MXNet package.

+ +
!pip install turicreate
+!pip uninstall -y mxnet
+!pip install mxnet-cu100==1.4.0.post0
+
+ +

If you do not wish to train on GPU or are running it on your computer, you can ignore the last two lines

+ +

Downloading the Dataset

+ +
!wget -q "https://github.com/joolsa/fake_real_news_dataset/raw/master/fake_or_real_news.csv.zip"
+!unzip fake_or_real_news.csv.zip
+
+ +

Model Creation

+ +
import turicreate as tc
+tc.config.set_num_gpus(-1) # If you do not wish to use GPUs, set it to 0
+
+ +
dataSFrame = tc.SFrame('fake_or_real_news.csv')
+
+ +

The dataset contains a column named "X1", which is of no use to us. Therefore, we simply drop it

+ +
dataSFrame.remove_column('X1')
+
+ +

Splitting Dataset

+ +
train, test = dataSFrame.random_split(.9)
+
+ +

Training

+ +
model = tc.text_classifier.create(
+    dataset=train,
+    target='label',
+    features=['title','text']
+)
+
+ +
+-----------+----------+-----------+--------------+-------------------+---------------------+
+| Iteration | Passes   | Step size | Elapsed Time | Training Accuracy | Validation Accuracy |
++-----------+----------+-----------+--------------+-------------------+---------------------+
+| 0         | 2        | 1.000000  | 1.156349     | 0.889680          | 0.790036            |
+| 1         | 4        | 1.000000  | 1.359196     | 0.985952          | 0.918149            |
+| 2         | 6        | 0.820091  | 1.557205     | 0.990260          | 0.914591            |
+| 3         | 7        | 1.000000  | 1.684872     | 0.998689          | 0.925267            |
+| 4         | 8        | 1.000000  | 1.814194     | 0.999063          | 0.925267            |
+| 9         | 14       | 1.000000  | 2.507072     | 1.000000          | 0.911032            |
++-----------+----------+-----------+--------------+-------------------+---------------------+
+
+ +

Testing the Model

+ +
est_predictions = model.predict(test)
+accuracy = tc.evaluation.accuracy(test['label'], test_predictions)
+print(f'Topic classifier model has a testing accuracy of {accuracy*100}% ', flush=True)
+
+ +
Topic classifier model has a testing accuracy of 92.3076923076923%
+
+ +

We have just created our own Fake News Detection Model which has an accuracy of 92%!

+ +
example_text = {"title": ["Middling ‘Rise Of Skywalker’ Review Leaves Fan On Fence About Whether To Threaten To Kill Critic"], "text": ["Expressing ambivalence toward the relatively balanced appraisal of the film, Star Wars fan Miles Ariely admitted Thursday that an online publication’s middling review of The Rise Of Skywalker had left him on the fence about whether he would still threaten to kill the critic who wrote it. “I’m really of two minds about this, because on the one hand, he said the new movie fails to live up to the original trilogy, which makes me at least want to throw a brick through his window with a note telling him to watch his back,” said Ariely, confirming he had already drafted an eight-page-long death threat to Stan Corimer of the website Screen-On Time, but had not yet decided whether to post it to the reviewer’s Facebook page. “On the other hand, though, he commended J.J. Abrams’ skillful pacing and faithfulness to George Lucas’ vision, which makes me wonder if I should just call the whole thing off. Now, I really don’t feel like camping outside his house for hours. Maybe I could go with a response that’s somewhere in between, like, threatening to kill his dog but not everyone in his whole family? I don’t know. This is a tough one.” At press time, sources reported that Ariely had resolved to wear his Ewok costume while he murdered the critic in his sleep."]}
+example_prediction = model.classify(tc.SFrame(example_text))
+print(example_prediction, flush=True)
+
+ +
+-------+--------------------+
+| class |    probability     |
++-------+--------------------+
+|  FAKE | 0.9245648658345308 |
++-------+--------------------+
+[1 rows x 2 columns]
+
+ +

Exporting the Model

+ +
model_name = 'FakeNews'
+coreml_model_name = model_name + '.mlmodel'
+exportedModel = model.export_coreml(coreml_model_name)
+
+ +

Note: To download files from Google Colab, simply click on the files section in the sidebar, right click on filename and then click on download

+ +

Link to Colab Notebook

+ +

Building the App using SwiftUI

+ +

Initial Setup

+ +

First we create a single view app (make sure you check the use SwiftUI button)

+ +

Then we copy our .mlmodel file to our project (Just drag and drop the file in the XCode Files Sidebar)

+ +

Our ML Model does not take a string directly as an input, rather it takes bag of words as an input. +DescriptionThe bag-of-words model is a simplifying representation used in NLP, in this text is represented as a bag of words, without any regard for grammar or order, but noting multiplicity

+ +

We define our bag of words function

+ +
func bow(text: String) -> [String: Double] {
+        var bagOfWords = [String: Double]()
+
+        let tagger = NSLinguisticTagger(tagSchemes: [.tokenType], options: 0)
+        let range = NSRange(location: 0, length: text.utf16.count)
+        let options: NSLinguisticTagger.Options = [.omitPunctuation, .omitWhitespace]
+        tagger.string = text
+
+        tagger.enumerateTags(in: range, unit: .word, scheme: .tokenType, options: options) { _, tokenRange, _ in
+            let word = (text as NSString).substring(with: tokenRange)
+            if bagOfWords[word] != nil {
+                bagOfWords[word]! += 1
+            } else {
+                bagOfWords[word] = 1
+            }
+        }
+
+        return bagOfWords
+    }
+
+ +

We also declare our variables

+ +
@State private var title: String = ""
+@State private var headline: String = ""
+@State private var alertTitle = ""
+@State private var alertText = ""
+@State private var showingAlert = false
+
+ +

Finally, we implement a simple function which reads the two text fields, creates their bag of words representation and displays an alert with the appropriate result

+ +

Complete Code

+ +
import SwiftUI
+
+struct ContentView: View {
+    @State private var title: String = ""
+    @State private var headline: String = ""
+
+    @State private var alertTitle = ""
+    @State private var alertText = ""
+    @State private var showingAlert = false
+
+    var body: some View {
+        NavigationView {
+            VStack(alignment: .leading) {
+                Text("Headline").font(.headline)
+                TextField("Please Enter Headline", text: $title)
+                    .lineLimit(nil)
+                Text("Body").font(.headline)
+                TextField("Please Enter the content", text: $headline)
+                .lineLimit(nil)
+            }
+                .navigationBarTitle("Fake News Checker")
+            .navigationBarItems(trailing:
+                Button(action: classifyFakeNews) {
+                    Text("Check")
+                })
+            .padding()
+                .alert(isPresented: $showingAlert){
+                    Alert(title: Text(alertTitle), message: Text(alertText), dismissButton: .default(Text("OK")))
+            }
+        }
+
+    }
+
+    func classifyFakeNews(){
+        let model = FakeNews()
+        let myTitle = bow(text: title)
+        let myText = bow(text: headline)
+        do {
+            let prediction = try model.prediction(title: myTitle, text: myText)
+            alertTitle = prediction.label
+            alertText = "It is likely that this piece of news is \(prediction.label.lowercased())."
+            print(alertText)
+        } catch {
+            alertTitle = "Error"
+            alertText = "Sorry, could not classify if the input news was fake or not."
+        }
+
+        showingAlert = true
+    }
+    func bow(text: String) -> [String: Double] {
+        var bagOfWords = [String: Double]()
+
+        let tagger = NSLinguisticTagger(tagSchemes: [.tokenType], options: 0)
+        let range = NSRange(location: 0, length: text.utf16.count)
+        let options: NSLinguisticTagger.Options = [.omitPunctuation, .omitWhitespace]
+        tagger.string = text
+
+        tagger.enumerateTags(in: range, unit: .word, scheme: .tokenType, options: options) { _, tokenRange, _ in
+            let word = (text as NSString).substring(with: tokenRange)
+            if bagOfWords[word] != nil {
+                bagOfWords[word]! += 1
+            } else {
+                bagOfWords[word] = 1
+            }
+        }
+
+        return bagOfWords
+    }
+}
+
+struct ContentView_Previews: PreviewProvider {
+    static var previews: some View {
+        ContentView()
+    }
+}
+
+ +
+ + + + + + \ No newline at end of file diff --git a/docs/posts/2020-01-14-Converting-between-PIL-NumPy.html b/docs/posts/2020-01-14-Converting-between-PIL-NumPy.html new file mode 100644 index 0000000..7759e9a --- /dev/null +++ b/docs/posts/2020-01-14-Converting-between-PIL-NumPy.html @@ -0,0 +1,52 @@ + + + + + + + + + Hey - Post + + + + + +
+

Converting between image and NumPy array

+ +
import numpy
+import PIL
+
+# Convert PIL Image to NumPy array
+img = PIL.Image.open("foo.jpg")
+arr = numpy.array(img)
+
+# Convert array to Image
+img = PIL.Image.fromarray(arr)
+
+ +

Saving an Image

+ +
try:
+    img.save(destination, "JPEG", quality=80, optimize=True, progressive=True)
+except IOError:
+    PIL.ImageFile.MAXBLOCK = img.size[0] * img.size[1]
+    img.save(destination, "JPEG", quality=80, optimize=True, progressive=True)
+
+ +
+ + + + + + \ No newline at end of file diff --git a/docs/posts/2020-01-15-Setting-up-Kaggle-to-use-with-Colab.html b/docs/posts/2020-01-15-Setting-up-Kaggle-to-use-with-Colab.html new file mode 100644 index 0000000..26753e8 --- /dev/null +++ b/docs/posts/2020-01-15-Setting-up-Kaggle-to-use-with-Colab.html @@ -0,0 +1,72 @@ + + + + + + + + + Hey - Post + + + + + +
+

Setting up Kaggle to use with Google Colab

+ +

In order to be able to access Kaggle Datasets, you will need to have an account on Kaggle (which is Free)

+ +

Grabbing Our Tokens

+ +

Go to Kaggle

+ +

"Homepage"

+ +

Click on your User Profile and Click on My Account

+ +

"Account"

+ +

Scroll Down until you see Create New API Token

+ +

+ +

This will download your token as a JSON file

+ +

+ +

Copy the File to the root folder of your Google Drive

+ +

Setting up Colab

+ +

Mounting Google Drive

+ +
import os
+from google.colab import drive
+drive.mount('/content/drive')
+
+ +

After this click on the URL in the output section, login and then paste the Auth Code

+ +

Configuring Kaggle

+ +
os.environ['KAGGLE_CONFIG_DIR'] = "/content/drive/My Drive/"
+
+ +

Voila! You can now download Kaggle datasets

+ +
+ + + + + + \ No newline at end of file diff --git a/docs/posts/2020-01-16-Image-Classifier-Using-Turicreate.html b/docs/posts/2020-01-16-Image-Classifier-Using-Turicreate.html new file mode 100644 index 0000000..f149a5c --- /dev/null +++ b/docs/posts/2020-01-16-Image-Classifier-Using-Turicreate.html @@ -0,0 +1,276 @@ + + + + + + + + + Hey - Post + + + + + +
+

Creating a Custom Image Classifier using Turicreate to detect Smoke and Fire

+ +

For setting up Kaggle with Google Colab, please refer to my previous post

+ +

Dataset

+ +

Mounting Google Drive

+ +
import os
+from google.colab import drive
+drive.mount('/content/drive')
+
+ +

Downloading Dataset from Kaggle

+ +
os.environ['KAGGLE_CONFIG_DIR'] = "/content/drive/My Drive/"
+!kaggle datasets download ashutosh69/fire-and-smoke-dataset
+!unzip "fire-and-smoke-dataset.zip"
+
+ +

Pre-Processing

+ +
!mkdir default smoke fire
+
+ +

\

+ +
!ls data/data/img_data/train/default/*.jpg
+
+ +

\

+ +
img_1002.jpg   img_20.jpg     img_519.jpg     img_604.jpg       img_80.jpg
+img_1003.jpg   img_21.jpg     img_51.jpg     img_60.jpg       img_8.jpg
+img_1007.jpg   img_22.jpg     img_520.jpg     img_61.jpg       img_900.jpg
+img_100.jpg    img_23.jpg     img_521.jpg    'img_62 (2).jpg'   img_920.jpg
+img_1014.jpg   img_24.jpg    'img_52 (2).jpg'     img_62.jpg       img_921.jpg
+img_1018.jpg   img_29.jpg     img_522.jpg    'img_63 (2).jpg'   img_922.jpg
+img_101.jpg    img_3000.jpg   img_523.jpg     img_63.jpg       img_923.jpg
+img_1027.jpg   img_335.jpg    img_524.jpg     img_66.jpg       img_924.jpg
+img_102.jpg    img_336.jpg    img_52.jpg     img_67.jpg       img_925.jpg
+img_1042.jpg   img_337.jpg    img_530.jpg     img_68.jpg       img_926.jpg
+img_1043.jpg   img_338.jpg    img_531.jpg     img_700.jpg       img_927.jpg
+img_1046.jpg   img_339.jpg   'img_53 (2).jpg'     img_701.jpg       img_928.jpg
+img_1052.jpg   img_340.jpg    img_532.jpg     img_702.jpg       img_929.jpg
+img_107.jpg    img_341.jpg    img_533.jpg     img_703.jpg       img_930.jpg
+img_108.jpg    img_3.jpg      img_537.jpg     img_704.jpg       img_931.jpg
+img_109.jpg    img_400.jpg    img_538.jpg     img_705.jpg       img_932.jpg
+img_10.jpg     img_471.jpg    img_539.jpg     img_706.jpg       img_933.jpg
+img_118.jpg    img_472.jpg    img_53.jpg     img_707.jpg       img_934.jpg
+img_12.jpg     img_473.jpg    img_540.jpg     img_708.jpg       img_935.jpg
+img_14.jpg     img_488.jpg    img_541.jpg     img_709.jpg       img_938.jpg
+img_15.jpg     img_489.jpg   'img_54 (2).jpg'     img_70.jpg       img_958.jpg
+img_16.jpg     img_490.jpg    img_542.jpg     img_710.jpg       img_971.jpg
+img_17.jpg     img_491.jpg    img_543.jpg    'img_71 (2).jpg'   img_972.jpg
+img_18.jpg     img_492.jpg    img_54.jpg     img_71.jpg       img_973.jpg
+img_19.jpg     img_493.jpg   'img_55 (2).jpg'     img_72.jpg       img_974.jpg
+img_1.jpg      img_494.jpg    img_55.jpg     img_73.jpg       img_975.jpg
+img_200.jpg    img_495.jpg    img_56.jpg     img_74.jpg       img_980.jpg
+img_201.jpg    img_496.jpg    img_57.jpg     img_75.jpg       img_988.jpg
+img_202.jpg    img_497.jpg    img_58.jpg     img_76.jpg       img_9.jpg
+img_203.jpg    img_4.jpg      img_59.jpg     img_77.jpg
+img_204.jpg    img_501.jpg    img_601.jpg     img_78.jpg
+img_205.jpg    img_502.jpg    img_602.jpg     img_79.jpg
+img_206.jpg    img_50.jpg     img_603.jpg     img_7.jpg
+
+ +

The image files are not actually JPEG, thus we first need to save them in the correct format for Turicreate

+ +
from PIL import Image
+import glob
+
+
+folders = ["default","smoke","fire"]
+for folder in folders:
+  n = 1
+  for file in glob.glob("./data/data/img_data/train/" + folder + "/*.jpg"):
+    im = Image.open(file)
+    rgb_im = im.convert('RGB')
+    rgb_im.save((folder + "/" + str(n) + ".jpg"), quality=100)
+    n +=1 
+  for file in glob.glob("./data/data/img_data/train/" + folder + "/*.jpg"):
+    im = Image.open(file)
+    rgb_im = im.convert('RGB')
+    rgb_im.save((folder + "/" + str(n) + ".jpg"), quality=100)
+    n +=1
+
+ +

\

+ +
!mkdir train
+!mv default ./train
+!mv smoke ./train
+!mv fire ./train
+
+ +

Making the Image Classifier

+ +

Making an SFrame

+ +
!pip install turicreate
+
+ +

\

+ +
import turicreate as tc
+import os
+
+data = tc.image_analysis.load_images("./train", with_path=True)
+
+data["label"] = data["path"].apply(lambda path: os.path.basename(os.path.dirname(path)))
+
+print(data)
+
+data.save('fire-smoke.sframe')
+
+ +

\

+ +
+-------------------------+------------------------+
+|           path          |         image          |
++-------------------------+------------------------+
+|  ./train/default/1.jpg  | Height: 224 Width: 224 |
+|  ./train/default/10.jpg | Height: 224 Width: 224 |
+| ./train/default/100.jpg | Height: 224 Width: 224 |
+| ./train/default/101.jpg | Height: 224 Width: 224 |
+| ./train/default/102.jpg | Height: 224 Width: 224 |
+| ./train/default/103.jpg | Height: 224 Width: 224 |
+| ./train/default/104.jpg | Height: 224 Width: 224 |
+| ./train/default/105.jpg | Height: 224 Width: 224 |
+| ./train/default/106.jpg | Height: 224 Width: 224 |
+| ./train/default/107.jpg | Height: 224 Width: 224 |
++-------------------------+------------------------+
+[2028 rows x 2 columns]
+Note: Only the head of the SFrame is printed.
+You can use print_rows(num_rows=m, num_columns=n) to print more rows and columns.
++-------------------------+------------------------+---------+
+|           path          |         image          |  label  |
++-------------------------+------------------------+---------+
+|  ./train/default/1.jpg  | Height: 224 Width: 224 | default |
+|  ./train/default/10.jpg | Height: 224 Width: 224 | default |
+| ./train/default/100.jpg | Height: 224 Width: 224 | default |
+| ./train/default/101.jpg | Height: 224 Width: 224 | default |
+| ./train/default/102.jpg | Height: 224 Width: 224 | default |
+| ./train/default/103.jpg | Height: 224 Width: 224 | default |
+| ./train/default/104.jpg | Height: 224 Width: 224 | default |
+| ./train/default/105.jpg | Height: 224 Width: 224 | default |
+| ./train/default/106.jpg | Height: 224 Width: 224 | default |
+| ./train/default/107.jpg | Height: 224 Width: 224 | default |
++-------------------------+------------------------+---------+
+[2028 rows x 3 columns]
+Note: Only the head of the SFrame is printed.
+You can use print_rows(num_rows=m, num_columns=n) to print more rows and columns.
+
+ +

Making the Model

+ +
import turicreate as tc
+
+# Load the data
+data =  tc.SFrame('fire-smoke.sframe')
+
+# Make a train-test split
+train_data, test_data = data.random_split(0.8)
+
+# Create the model
+model = tc.image_classifier.create(train_data, target='label')
+
+# Save predictions to an SArray
+predictions = model.predict(test_data)
+
+# Evaluate the model and print the results
+metrics = model.evaluate(test_data)
+print(metrics['accuracy'])
+
+# Save the model for later use in Turi Create
+model.save('fire-smoke.model')
+
+# Export for use in Core ML
+model.export_coreml('fire-smoke.mlmodel')
+
+ +

\

+ +
Performing feature extraction on resized images...
+Completed   64/1633
+Completed  128/1633
+Completed  192/1633
+Completed  256/1633
+Completed  320/1633
+Completed  384/1633
+Completed  448/1633
+Completed  512/1633
+Completed  576/1633
+Completed  640/1633
+Completed  704/1633
+Completed  768/1633
+Completed  832/1633
+Completed  896/1633
+Completed  960/1633
+Completed 1024/1633
+Completed 1088/1633
+Completed 1152/1633
+Completed 1216/1633
+Completed 1280/1633
+Completed 1344/1633
+Completed 1408/1633
+Completed 1472/1633
+Completed 1536/1633
+Completed 1600/1633
+Completed 1633/1633
+PROGRESS: Creating a validation set from 5 percent of training data. This may take a while.
+          You can set ``validation_set=None`` to disable validation tracking.
+
+Logistic regression:
+--------------------------------------------------------
+Number of examples          : 1551
+Number of classes           : 3
+Number of feature columns   : 1
+Number of unpacked features : 2048
+Number of coefficients      : 4098
+Starting L-BFGS
+--------------------------------------------------------
++-----------+----------+-----------+--------------+-------------------+---------------------+
+| Iteration | Passes   | Step size | Elapsed Time | Training Accuracy | Validation Accuracy |
++-----------+----------+-----------+--------------+-------------------+---------------------+
+| 0         | 6        | 0.018611  | 0.891830     | 0.553836          | 0.560976            |
+| 1         | 10       | 0.390832  | 1.622383     | 0.744681          | 0.792683            |
+| 2         | 11       | 0.488541  | 1.943987     | 0.733075          | 0.804878            |
+| 3         | 14       | 2.442703  | 2.512545     | 0.727917          | 0.841463            |
+| 4         | 15       | 2.442703  | 2.826964     | 0.861380          | 0.853659            |
+| 9         | 28       | 2.340435  | 5.492035     | 0.941328          | 0.975610            |
++-----------+----------+-----------+--------------+-------------------+---------------------+
+Performing feature extraction on resized images...
+Completed  64/395
+Completed 128/395
+Completed 192/395
+Completed 256/395
+Completed 320/395
+Completed 384/395
+Completed 395/395
+0.9316455696202531
+
+ +

We just got an accuracy of 94% on Training Data and 97% on Validation Data!

+ +
+ + + + + + \ No newline at end of file diff --git a/docs/posts/2020-01-19-Connect-To-Bluetooth-Devices-Linux-Terminal.html b/docs/posts/2020-01-19-Connect-To-Bluetooth-Devices-Linux-Terminal.html new file mode 100644 index 0000000..85d13aa --- /dev/null +++ b/docs/posts/2020-01-19-Connect-To-Bluetooth-Devices-Linux-Terminal.html @@ -0,0 +1,52 @@ + + + + + + + + + Hey - Post + + + + + +
+

How to setup Bluetooth on a Raspberry Pi

+ +

This was tested on a Raspberry Pi Zero W

+ +

Enter in the Bluetooth Mode

+ +

pi@raspberrypi:~ $ bluetoothctl

+ +

[bluetooth]# agent on

+ +

[bluetooth]# default-agent

+ +

[bluetooth]# scan on

+ +

To Pair

+ +

While being in bluetooth mode

+ +

[bluetooth]# pair XX:XX:XX:XX:XX:XX

+ +

To Exit out of bluetoothctl anytime, just type exit

+ +
+ + + + + + \ No newline at end of file diff --git a/docs/posts/2020-03-03-Playing-With-Android-TV.html b/docs/posts/2020-03-03-Playing-With-Android-TV.html new file mode 100644 index 0000000..e4cc18b --- /dev/null +++ b/docs/posts/2020-03-03-Playing-With-Android-TV.html @@ -0,0 +1,103 @@ + + + + + + + + + Hey - Post + + + + + +
+

Tinkering with an Android TV

+ +

So I have an Android TV, this posts covers everything I have tried on it

+ +

Contents

+ +
    +
  1. Getting TV's IP Address
  2. +
  3. Enable Developer Settings
  4. +
  5. Enable ADB
  6. +
  7. Connect ADB
  8. +
  9. Manipulating Packages
  10. +
+ +

IP-Address

+ +

These steps should be similar for all Android-TVs

+ +
    +
  • Go To Settings
  • +
  • Go to Network
  • +
  • Advanced Settings
  • +
  • Network Status
  • +
  • Note Down IP-Address
  • +
+ +

The other option is to go to your router's server page and get connected devices

+ +

Developer-Settings

+ +
    +
  • Go To Settings
  • +
  • About
  • +
  • Continously click on the "Build" option until it says "You are a Developer"
  • +
+ +

Enable-ADB

+ +
    +
  • Go to Settings
  • +
  • Go to Developer Options
  • +
  • Scroll untill you find ADB Debugging and enable that option
  • +
+ +

Connect-ADB

+ +
    +
  • Open Terminal (Make sure you have ADB installed)
  • +
  • Enter the following command adb connect <IP_ADDRESS>
  • +
  • To test the connection run adb logcat
  • +
+ +

Manipulating Apps / Packages

+ +

Listing Packages

+ +
    +
  • adb shell
  • +
  • pm list packages
  • +
+ +

Installing Packages

+ +
    +
  • adb install -r package.apk
  • +
+ +

Uninstalling Packages

+ +
    +
  • adb uninstall com.company.yourpackagename
  • +
+ +
+ + + + + + \ No newline at end of file diff --git a/docs/posts/2020-03-08-Making-Vaporwave-Track.html b/docs/posts/2020-03-08-Making-Vaporwave-Track.html new file mode 100644 index 0000000..fe385f5 --- /dev/null +++ b/docs/posts/2020-03-08-Making-Vaporwave-Track.html @@ -0,0 +1,62 @@ + + + + + + + + + Hey - Post + + + + + +
+

Making My First Vaporwave Track (Remix)

+ +

I finally completed my first quick and dirty vaporwave remix of "I Want It That Way" by the Backstreet Boys

+ +

V A P O R W A V E

+ +

Vaporwave is all about A E S T H E T I C S. Vaporwave is a type of music genre that emerged as a parody of Chillwave, shared more as a meme rather than a proper musical genre. Of course this changed as the genre become mature

+ +

How to Vaporwave

+ +

The first track which is considered to be actual Vaporwave is Ramona Xavier's Macintosh Plus, this set the the guidelines for making Vaporwave

+ +
    +
  • Take a 1980s RnB song
  • +
  • Slow it down
  • +
  • Add Bass and Treble
  • +
  • Add again
  • +
  • Add Reverb ( make sure its wet )
  • +
+ +

There you have your very own Vaporwave track.

+ +

( Now, there are some tracks being produced which are not remixes and are original )

+ +

My Remix

+ + + +

Where is the Programming?

+ +

The fact that there are steps on producing Vaporwave, this gave me the idea that Vaporwave can actually be made using programming, stay tuned for when I publish the program which I am working on ( Generating A E S T H E T I C artwork and remixes)

+ +
+ + + + + + \ No newline at end of file diff --git a/docs/posts/2020-04-13-Fixing-X11-Error-AmberTools-macOS.html b/docs/posts/2020-04-13-Fixing-X11-Error-AmberTools-macOS.html new file mode 100644 index 0000000..9d37882 --- /dev/null +++ b/docs/posts/2020-04-13-Fixing-X11-Error-AmberTools-macOS.html @@ -0,0 +1,61 @@ + + + + + + + + + Hey - Post + + + + + +
+

Fixing X11 Error on macOS Catalina for AmberTools 18/19

+ +

I was trying to install AmberTools on my macOS Catalina Installation. Running ./configure -macAccelerate clang gave me an error that it could not find X11 libraries, even though locate libXt showed that my installation was correct.

+ +

Error:

+ +
Could not find the X11 libraries; you may need to edit config.h
+   to set the XHOME and XLIBS variables.
+Error: The X11 libraries are not in the usual location !
+       To search for them try the command: locate libXt
+       On new Fedora OS's install the libXt-devel libXext-devel
+       libX11-devel libICE-devel libSM-devel packages.
+       On old Fedora OS's install the xorg-x11-devel package.
+       On RedHat OS's install the XFree86-devel package.
+       On Ubuntu OS's install the xorg-dev and xserver-xorg packages.
+
+          ...more info for various linuxes at ambermd.org/ubuntu.html
+
+       To build Amber without XLEaP, re-run configure with '-noX11:
+            ./configure -noX11 --with-python /usr/local/bin/python3 -macAccelerate clang
+Configure failed due to the errors above!
+
+ +

I searched on Google for a solution. Sadly, there was not even a single thread which had a solution about this error.

+ +

The Fix

+ +

Simply reinstalling XQuartz using homebrew fixed the error brew cask reinstall xquartz

+ +

If you do not have XQuartz installed, you need to run brew cask install xquartz

+ +
+ + + + + + \ No newline at end of file diff --git a/docs/posts/2020-05-31-compiling-open-babel-on-ios.html b/docs/posts/2020-05-31-compiling-open-babel-on-ios.html new file mode 100644 index 0000000..bccab0e --- /dev/null +++ b/docs/posts/2020-05-31-compiling-open-babel-on-ios.html @@ -0,0 +1,147 @@ + + + + + + + + + Hey - Post + + + + + +
+

Compiling Open Babel on iOS

+ +

Due to the fact that my summer vacations started today, +I had the brilliant idea of trying to run open babel on my iPad. +To give a little background, I had tried to compile AutoDock Vina using a cross-compiler but I had miserably failed.

+ +

I am running the Checkr1n jailbreak on my iPad and the Unc0ver jailbreak on my phone.

+ +

But Why?

+ +

Well, just because I can. This is literally the only reason I tried compiling it and also partially because in the long run I want to compile AutoDock Vina so I can do Molecular Docking on the go.

+ +

Let's Go!

+ +

How hard can it be to compile open babel right? It is just a simple software with clear and concise build instructions. I just need to use cmake to build and the make to install.

+ +

It is 11 AM in the morning. I install clang, cmake and make from the Sam Bingner's repository, fired up ssh, downloaded the source code and ran the build command.`clang

+ +

Fail No. 1

+ +

I couldn't even get cmake to run, I did a little digging around StackOverflow and founf that I needed the iOS SDK, sure no problem. I waited for Xcode to update and transferred the SDKs to my iPad

+ +
scp -r /Applications/Xcode.app/Contents/Developer/Platforms/iPhoneOS.platform/Developer/SDKs/iPhoneOS.sdk root@192.168.1.8:/var/sdks/
+
+ +

Them I told cmake that this is the location for my SDK 😠. Successful! Now I just needed to use make.

+ +

Fail No. 2

+ +

It was giving the error that thread-local-storage was not supported on this device.

+ +
[  0%] Building CXX object src/CMakeFiles/openbabel.dir/alias.cpp.o
+[  1%] Building CXX object src/CMakeFiles/openbabel.dir/atom.cpp.o
+In file included from /var/root/obabel/ob-src/src/atom.cpp:28:
+In file included from /var/root/obabel/ob-src/include/openbabel/ring.h:29:
+/var/root/obabel/ob-src/include/openbabel/typer.h:70:1: error: thread-local storage is not supported for the current target
+THREAD_LOCAL OB_EXTERN OBAtomTyper      atomtyper;
+^
+/var/root/obabel/ob-src/include/openbabel/mol.h:35:24: note: expanded from macro 'THREAD_LOCAL'
+#  define THREAD_LOCAL thread_local
+                       ^
+In file included from /var/root/obabel/ob-src/src/atom.cpp:28:
+In file included from /var/root/obabel/ob-src/include/openbabel/ring.h:29:
+/var/root/obabel/ob-src/include/openbabel/typer.h:84:1: error: thread-local storage is not supported for the current target
+THREAD_LOCAL OB_EXTERN OBAromaticTyper  aromtyper;
+^
+/var/root/obabel/ob-src/include/openbabel/mol.h:35:24: note: expanded from macro 'THREAD_LOCAL'
+#  define THREAD_LOCAL thread_local
+                       ^
+/var/root/obabel/ob-src/src/atom.cpp:107:10: error: thread-local storage is not supported for the current target
+  extern THREAD_LOCAL OBAromaticTyper  aromtyper;
+         ^
+/var/root/obabel/ob-src/include/openbabel/mol.h:35:24: note: expanded from macro 'THREAD_LOCAL'
+#  define THREAD_LOCAL thread_local
+                       ^
+/var/root/obabel/ob-src/src/atom.cpp:108:10: error: thread-local storage is not supported for the current target
+  extern THREAD_LOCAL OBAtomTyper      atomtyper;
+         ^
+/var/root/obabel/ob-src/include/openbabel/mol.h:35:24: note: expanded from macro 'THREAD_LOCAL'
+#  define THREAD_LOCAL thread_local
+                       ^
+/var/root/obabel/ob-src/src/atom.cpp:109:10: error: thread-local storage is not supported for the current target
+  extern THREAD_LOCAL OBPhModel        phmodel;
+         ^
+/var/root/obabel/ob-src/include/openbabel/mol.h:35:24: note: expanded from macro 'THREAD_LOCAL'
+#  define THREAD_LOCAL thread_local
+                       ^
+5 errors generated.
+make[2]: *** [src/CMakeFiles/openbabel.dir/build.make:76: src/CMakeFiles/openbabel.dir/atom.cpp.o] Error 1
+make[1]: *** [CMakeFiles/Makefile2:1085: src/CMakeFiles/openbabel.dir/all] Error 2
+make: *** [Makefile:129: all] Error 2
+
+ +

Strange but it is alright, there is nothing that hasn't been answered on the internet.

+ +

I did a little digging around and could not find a solution 😔

+ +

As a temporary fix, I disabled multithreading by going and commenting the lines in the source code.

+ +

"Open-Babel running on my iPad"

+ +

Packaging as a deb

+ +

This was pretty straight forward, I tried installing it on my iPad and it was working pretty smoothly.

+ +

Moment of Truth

+ +

So I airdropped the .deb to my phone and tried installing it, the installation was successful but when I tried obabel it just aborted.

+ +

"Open Babel crashing"

+ +

Turns out because I had created an install target of a separate folder while compiling, the binaries were referencing a non-existing dylib rather than those in the /usr/lib folder. As a quick workaround I transferred the deb folder to my laptop and used otool and install_name tool: install_name_tool -change /var/root/obabel/ob-build/lib/libopenbabel.7.dylib /usr/lib/libopenbabel.7.dylib for all the executables and then signed them using jtool

+ +

I then installed it and everything went smoothly, I even ran obabel and it executed perfectly, showing the version number 3.1.0 ✌️ Ahh, smooth victory.

+ +

Nope. When I tried converting from SMILES to pdbqt, it gave an error saying plugin not found. This was weird.

+ +

"Open Babel Plugin Error"

+ +

So I just copied the entire build folder from my iPad to my phone and tried running it. Oops, Apple Sandbox Error, Oh no!

+ +

I spent 2 hours around this problem, only to see the documentation and realise I hadn't setup the environment variable 🤦‍♂️

+ +

The Final Fix ( For Now )

+ +
export BABEL_DATADIR="/usr/share/openbabel/3.1.0"
+export BABEL_LIBDIR="/usr/lib/openbabel/3.1.0"
+
+ +

This was the tragedy of trying to compile something without knowing enough about compiling. It is 11:30 as of writing this. Something as trivial as this should not have taken me so long. Am I going to try to compile AutoDock Vina next? 🤔 Maybe.

+ +

Also, if you want to try Open Babel on you jailbroken iDevice, install the package from my repository ( You, need to run the above mentioned final fix :p ). This was tested on iOS 13.5, I cannot tell if it will work on others or not.

+ +

Hopefully, I add some more screenshots to this post.

+ +

Edit 1: Added Screenshots, had to replicate the errors.

+ +
+ + + + + + \ No newline at end of file diff --git a/docs/posts/2020-06-01-Speeding-Up-Molecular-Docking-Workflow-AutoDock-Vina-and-PyMOL.html b/docs/posts/2020-06-01-Speeding-Up-Molecular-Docking-Workflow-AutoDock-Vina-and-PyMOL.html new file mode 100644 index 0000000..c388e29 --- /dev/null +++ b/docs/posts/2020-06-01-Speeding-Up-Molecular-Docking-Workflow-AutoDock-Vina-and-PyMOL.html @@ -0,0 +1,76 @@ + + + + + + + + + Hey - Post + + + + + +
+

Workflow for Lightning Fast Molecular Docking Part One

+ +

My Setup

+ +
    +
  • macOS Catalina ( RIP 32bit app)
  • +
  • PyMOL
  • +
  • AutoDock Vina
  • +
  • Open Babel
  • +
+ +

One Command Docking

+ +
obabel -:"$(pbpaste)" --gen3d -opdbqt -Otest.pdbqt && vina --receptor lu.pdbqt --center_x -9.7 --center_y 11.4 --center_z 68.9 --size_x 19.3 --size_y 29.9 --size_z 21.3  --ligand test.pdbqt
+
+ +

To run this command you simple copy the SMILES structure of the ligand you want an it automatically takes it from your clipboard, generates the 3D structure in the AutoDock PDBQT format using Open Babel and then docks it with your receptor using AutoDock Vina, all with just one command.

+ +

Let me break down the commands

+ +
obabel -:"$(pbpaste)" --gen3d -opdbqt -Otest.pdbqt
+
+ +

pbpaste and pbcopy are macOS commands for pasting and copying from and to the clipboard. Linux users may install the xclip and xsel packages from their respective package managers and then insert these aliases into their bash_profile, zshrc e.t.c

+ +
alias pbcopy='xclip -selection clipboard'
+alias pbpaste='xclip -selection clipboard -o'
+
+ +
$(pbpaste)
+
+ +

This is used in bash to evaluate the results of a command. In this scenario we are using it to get the contents of the clipboard.

+ +

The rest of the command is a normal Open Babel command to generate a 3D structure in PDBQT format and then save it as test.pdbqt

+ +
&&
+
+ +

This tells the terminal to only run the next part if the previous command runs successfully without any errors.

+ +
vina --receptor lu.pdbqt --center_x -9.7 --center_y 11.4 --center_z 68.9 --size_x 19.3 --size_y 29.9 --size_z 21.3  --ligand test.pdbqt
+
+ +

This is just the docking command for AutoDock Vina. In the next part I will tell how to use PyMOL and a plugin to directly generate the coordinates in Vina format --center_x -9.7 --center_y 11.4 --center_z 68.9 --size_x 19.3 --size_y 29.9 --size_z 21.3 without needing to type them manually.

+ +
+ + + + + + \ No newline at end of file diff --git a/docs/posts/2020-06-02-Compiling-AutoDock-Vina-on-iOS.html b/docs/posts/2020-06-02-Compiling-AutoDock-Vina-on-iOS.html new file mode 100644 index 0000000..9820e75 --- /dev/null +++ b/docs/posts/2020-06-02-Compiling-AutoDock-Vina-on-iOS.html @@ -0,0 +1,113 @@ + + + + + + + + + Hey - Post + + + + + +
+

Compiling AutoDock Vina on iOS

+ +

Why? Because I can.

+ +

Installing makedepend

+ +

makedepend is a Unix tool used to generate dependencies of C source files. Most modern programs do not use this anymore, but then again AutoDock Vina's source code hasn't been changed since 2011. The first hurdle came when I saw that there was no makedepend command, neither was there any package on any development repository for iOS. So, I tracked down the original source code for makedepend (https://github.com/DerellLicht/makedepend). According to the repository this is actually the source code for the makedepend utility that came with some XWindows distribution back around Y2K. I am pretty sure there is a problem with my current compiler configuration because I had to manually edit the Makefile to provide the path to the iOS SDKs using the -isysroot flag.

+ +

Editing the Makefile

+ +

Original Makefile ( I used the provided mac Makefile base )

+ +
BASE=/usr/local
+BOOST_VERSION=1_41
+BOOST_INCLUDE = $(BASE)/include
+C_PLATFORM=-arch i386 -arch ppc -isysroot /Developer/SDKs/MacOSX10.5.sdk -mmacosx-version-min=10.4
+GPP=/usr/bin/g++
+C_OPTIONS= -O3 -DNDEBUG
+BOOST_LIB_VERSION=
+
+include ../../makefile_common
+
+ +

I installed Boost 1.68.0-1 from Sam Bingner's repository. ( Otherwise I would have had to compile boost too 😫 )

+ +

Edited Makefile

+ +
BASE=/usr
+BOOST_VERSION=1_68
+BOOST_INCLUDE = $(BASE)/include
+C_PLATFORM=-arch arm64 -isysroot /var/sdks/Latest.sdk
+GPP=/usr/bin/g++
+C_OPTIONS= -O3 -DNDEBUG
+BOOST_LIB_VERSION=
+
+include ../../makefile_common
+
+
+ +

Updating the Source Code

+ +

Of course since Boost 1.41 many things have been added and deprecated, that is why I had to edit the source code to make it work with version 1.68

+ +

Error 1 - No Matching Constructor

+ +
../../../src/main/main.cpp:50:9: error: no matching constructor for initialization of 'path' (aka 'boost::filesystem::path')
+return path(str, boost::filesystem::native);
+
+ +

This was an easy fix, I just commented this and added a return statement to return the path

+ +
return path(str)
+
+ +

Error 2 - No Member Named 'nativefilestring'

+ +
../../../src/main/main.cpp:665:57: error: no member named 'native_file_string' in 'boost::filesystem::path'
+                std::cerr << "\n\nError: could not open \"" << e.name.native_file_string() << "\" for " << (e.in ? "reading" : "writing") << ".\n";
+                                                               ~~~~~~ ^
+../../../src/main/main.cpp:677:80: error: no member named 'native_file_string' in 'boost::filesystem::path'
+                std::cerr << "\n\nParse error on line " << e.line << " in file \"" << e.file.native_file_string() << "\": " << e.reason << '\n';
+                                                                                      ~~~~~~ ^
+2 errors generated.
+
+ +

Turns out native_file_string was deprecated in Boost 1.57 and replaced with just string

+ +

Error 3 - Library Not Found

+ +

This one still boggles me because there was no reason for it to not work, as a workaround I downloaded the DEB, extracted it and used that path for compiling.

+ +

Error 4 - No Member Named 'nativefilestring' Again.

+ +

But, this time in another file and I quickly fixed it

+ +

Moment of Truth

+ +

Obviously it was working on my iPad, but would it work on another device? I transferred the compiled binary and

+ +

"AutoDock Vina running on my iPhone"

+ +

The package is available on my repository and only depends on boost. ( Both, Vina and Vina-Split are part of the package)

+ +
+ + + + + + \ No newline at end of file diff --git a/docs/posts/2020-07-01-Install-rdkit-colab.html b/docs/posts/2020-07-01-Install-rdkit-colab.html new file mode 100644 index 0000000..b60d405 --- /dev/null +++ b/docs/posts/2020-07-01-Install-rdkit-colab.html @@ -0,0 +1,124 @@ + + + + + + + + + Hey - Post + + + + + +
+

Installing RDKit on Google Colab

+ +

RDKit is one of the most integral part of any Cheminfomatic specialist's toolkit but it is notoriously difficult to install unless you already have conda installed. I originally found this in a GitHub Gist but I have not been able to find that gist again :/

+ +

Just copy and paste this in a Colab cell and it will install it 👍

+ +
import sys
+import os
+import requests
+import subprocess
+import shutil
+from logging import getLogger, StreamHandler, INFO
+
+
+logger = getLogger(__name__)
+logger.addHandler(StreamHandler())
+logger.setLevel(INFO)
+
+
+def install(
+        chunk_size=4096,
+        file_name="Miniconda3-latest-Linux-x86_64.sh",
+        url_base="https://repo.continuum.io/miniconda/",
+        conda_path=os.path.expanduser(os.path.join("~", "miniconda")),
+        rdkit_version=None,
+        add_python_path=True,
+        force=False):
+    """install rdkit from miniconda
+    ```
+    import rdkit_installer
+    rdkit_installer.install()
+    ```
+    """
+
+    python_path = os.path.join(
+        conda_path,
+        "lib",
+        "python{0}.{1}".format(*sys.version_info),
+        "site-packages",
+    )
+
+    if add_python_path and python_path not in sys.path:
+        logger.info("add {} to PYTHONPATH".format(python_path))
+        sys.path.append(python_path)
+
+    if os.path.isdir(os.path.join(python_path, "rdkit")):
+        logger.info("rdkit is already installed")
+        if not force:
+            return
+
+        logger.info("force re-install")
+
+    url = url_base + file_name
+    python_version = "{0}.{1}.{2}".format(*sys.version_info)
+
+    logger.info("python version: {}".format(python_version))
+
+    if os.path.isdir(conda_path):
+        logger.warning("remove current miniconda")
+        shutil.rmtree(conda_path)
+    elif os.path.isfile(conda_path):
+        logger.warning("remove {}".format(conda_path))
+        os.remove(conda_path)
+
+    logger.info('fetching installer from {}'.format(url))
+    res = requests.get(url, stream=True)
+    res.raise_for_status()
+    with open(file_name, 'wb') as f:
+        for chunk in res.iter_content(chunk_size):
+            f.write(chunk)
+    logger.info('done')
+
+    logger.info('installing miniconda to {}'.format(conda_path))
+    subprocess.check_call(["bash", file_name, "-b", "-p", conda_path])
+    logger.info('done')
+
+    logger.info("installing rdkit")
+    subprocess.check_call([
+        os.path.join(conda_path, "bin", "conda"),
+        "install",
+        "--yes",
+        "-c", "rdkit",
+        "python=={}".format(python_version),
+        "rdkit" if rdkit_version is None else "rdkit=={}".format(rdkit_version)])
+    logger.info("done")
+
+    import rdkit
+    logger.info("rdkit-{} installation finished!".format(rdkit.__version__))
+
+
+if __name__ == "__main__":
+    install()
+
+ +
+ + + + + + \ No newline at end of file diff --git a/docs/posts/2020-08-01-Natural-Feature-Tracking-ARJS.html b/docs/posts/2020-08-01-Natural-Feature-Tracking-ARJS.html new file mode 100644 index 0000000..524e371 --- /dev/null +++ b/docs/posts/2020-08-01-Natural-Feature-Tracking-ARJS.html @@ -0,0 +1,307 @@ + + + + + + + + + Hey - Post + + + + + +
+

Introduction to AR.js and Natural Feature Tracking

+ +

AR.js

+ +

AR.js is a lightweight library for Augmented Reality on the Web, coming with features like Image Tracking, Location based AR and Marker tracking. It is the easiest option for cross-browser augmented reality.

+ +

The same code works for iOS, Android, Desktops and even VR Browsers!

+ +

It was initially created by Jerome Etienne and is now maintained by Nicolo Carpignoli and the AR-js Organisation

+ +

NFT

+ +

Usually for augmented reality you need specialised markers, like this Hiro marker (notice the thick non-aesthetic borders 🤢)

+ +

+ +

This is called marker based tracking where the code knows what to look for. NFT or Natural Feature Tracing converts normal images into markers by extracting 'features' from it, this way you can use any image of your liking!

+ +

I'll be using my GitHub profile picture

+ +

+ +

Creating the Marker!

+ +

First we need to create the marker files required by AR.js for NFT. For this we use Carnaux's repository 'NFT-Marker-Creator'.

+ +
$ git clone https://github.com/Carnaux/NFT-Marker-Creator
+
+Cloning into 'NFT-Marker-Creator'...
+remote: Enumerating objects: 79, done.
+remote: Counting objects: 100% (79/79), done.
+remote: Compressing objects: 100% (72/72), done.
+remote: Total 580 (delta 10), reused 59 (delta 7), pack-reused 501
+Receiving objects: 100% (580/580), 9.88 MiB | 282.00 KiB/s, done.
+Resolving deltas: 100% (262/262), done.
+
+$ cd NFT-Makrer-Creator
+
+ +

Install the dependencies

+ +
$ npm install
+
+npm WARN nodegenerator@1.0.0 No repository field.
+
+added 67 packages from 56 contributors and audited 67 packages in 2.96s
+
+1 package is looking for funding
+  run `npm fund` for details
+
+found 0 vulnerabilities
+
+
+
+   ╭────────────────────────────────────────────────────────────────╮
+   │                                                                │
+   │      New patch version of npm available! 6.14.5 → 6.14.7       │
+   │   Changelog: https://github.com/npm/cli/releases/tag/v6.14.7   │
+   │               Run npm install -g npm to update!                │
+   │                                                                │
+   ╰────────────────────────────────────────────────────────────────╯
+
+
+
+ +

Copy the target marker to the folder

+ +
$ cp ~/CodingAndStuff/ARjs/me.png .
+
+ +

Generate Marker

+ +
$ node app.js -i me.png
+
+Confidence level: [ * * * * * ] 5/5 || Entropy: 5.24 || Current max: 5.17 min: 4.6
+
+Do you want to continue? (Y/N)
+y
+writeStringToMemory is deprecated and should not be called! Use stringToUTF8() instead!
+[info] 
+Commands: 
+[info] --
+Generator started at 2020-08-01 16:01:41 +0580
+[info] Tracking Extraction Level = 2
+[info] MAX_THRESH  = 0.900000
+[info] MIN_THRESH  = 0.550000
+[info] SD_THRESH   = 8.000000
+[info] Initialization Extraction Level = 1
+[info] SURF_FEATURE = 100
+[info]  min allow 3.699000.
+[info] Image DPI (1): 3.699000
+[info] Image DPI (2): 4.660448
+[info] Image DPI (3): 5.871797
+[info] Image DPI (4): 7.398000
+[info] Image DPI (5): 9.320896
+[info] Image DPI (6): 11.743593
+[info] Image DPI (7): 14.796000
+[info] Image DPI (8): 18.641792
+[info] Image DPI (9): 23.487186
+[info] Image DPI (10): 29.592001
+[info] Image DPI (11): 37.283585
+[info] Image DPI (12): 46.974373
+[info] Image DPI (13): 59.184002
+[info] Image DPI (14): 72.000000
+[info] Generating ImageSet...
+[info]    (Source image xsize=568, ysize=545, channels=3, dpi=72.0).
+[info]   Done.
+[info] Saving to asa.iset...
+[info]   Done.
+[info] Generating FeatureList...
+
+...
+
+[info] (46, 44) 5.871797[dpi]
+[info] Freak features - 23[info] ========= 23 ===========
+[info] (37, 35) 4.660448[dpi]
+[info] Freak features - 19[info] ========= 19 ===========
+[info] (29, 28) 3.699000[dpi]
+[info] Freak features - 9[info] ========= 9 ===========
+[info]   Done.
+[info] Saving FeatureSet3...
+[info]   Done.
+[info] Generator finished at 2020-08-01 16:02:02 +0580
+--
+
+Finished marker creation!
+Now configuring demo! 
+
+Finished!
+To run demo use: 'npm run demo'
+
+ +

Now we have the required files in the output folder

+ +
$ ls output
+
+me.fset  me.fset3 me.iset
+
+ +

Creating the HTML Page

+ +

Create a new file called index.html in your project folder. This is the basic template we are going to use. Replace me with the root filename of your image, for example NeverGonnaGiveYouUp.png will become NeverGonnaGiveYouUp. Make sure you have copied all three files from the output folder in the previous step to the root of your project folder.

+ +
<script src="https://cdn.jsdelivr.net/gh/aframevr/aframe@1c2407b26c61958baa93967b5412487cd94b290b/dist/aframe-master.min.js"></script>
+<script src="https://raw.githack.com/AR-js-org/AR.js/master/aframe/build/aframe-ar-nft.js"></script>
+
+<style>
+  .arjs-loader {
+    height: 100%;
+    width: 100%;
+    position: absolute;
+    top: 0;
+    left: 0;
+    background-color: rgba(0, 0, 0, 0.8);
+    z-index: 9999;
+    display: flex;
+    justify-content: center;
+    align-items: center;
+  }
+
+  .arjs-loader div {
+    text-align: center;
+    font-size: 1.25em;
+    color: white;
+  }
+</style>
+
+<body style="margin : 0px; overflow: hidden;">
+  <div class="arjs-loader">
+    <div>Calculating Image Descriptors....</div>
+  </div>
+  <a-scene
+    vr-mode-ui="enabled: false;"
+    renderer="logarithmicDepthBuffer: true;"
+    embedded
+    arjs="trackingMethod: best; sourceType: webcam;debugUIEnabled: false;"
+  >
+    <a-nft
+      type="nft"
+      url="./me"
+      smooth="true"
+      smoothCount="10"
+      smoothTolerance=".01"
+      smoothThreshold="5"
+    >
+
+    </a-nft>
+    <a-entity camera></a-entity>
+  </a-scene>
+</body>
+
+ +

In this we are creating a AFrame scene and we are telling it that we want to use NFT Tracking. The amazing part about using AFrame is that we are able to use all AFrame objects!

+ +

Adding a simple box

+ +

Let us add a simple box!

+ +
<a-nft .....>
+    <a-box position='100 0.5 -180' material='opacity: 0.5; side: double' scale="100 100 100"></a-box>
+</a-nft>
+
+ +

Now to test it out we will need to create a simple server, I use Python's inbuilt SimpleHTTPServer alongside ngrok

+ +

In one terminal window, cd to the project directory. Currently your project folder should have 4 files, index.html, me.fset3, me.fset and me.iset

+ +

Open up two terminal windows and cd into your project folder then run the following commands to start up your server.

+ +

In the first terminal window start the Python Server

+ +
$ cd ~/CodingAndStuff/ARjs
+$ python2 -m SimpleHTTPServer
+
+Serving HTTP on 0.0.0.0 port 8000 ...
+
+
+ +

In the other window run ngrok ( Make sure you have installed it prior to running this step )

+ +
$ ngrok http 8000
+
+
+ +

+ +

Now copy the url to your phone and try running the example

+ +

+ +

👏 Congratulations! You just built an Augmented Reality experience using AR.js and AFrame

+ +

Adding a Torus-Knot in the box

+ +

Edit your index.html

+ +
<a-nft ..>
+    <a-box ..>
+        <a-torus-knot radius='0.26' radius-tubular='0.05' ></a-torus-knot>
+    </ a-box>
+</ a-nft>
+
+ +

+ +

Where are the GIFs?

+ +

Now that we know how to place a box in the scene and add a torus knot in it, what do we do next? We bring the classic internet back!

+ +

AFrame GIF Shader is a gif shader for A-Frame created by mayognaise.

+ +

First things first

+ +

Add <script src="https://rawgit.com/mayognaise/aframe-gif-shader/master/dist/aframe-gif-shader.min.js"></script> to <head>

+ +

Change the box's material to add the GIF shader

+ +
...
+<a-box position='100 0.5 -180' material="shader:gif;src:url(https://media.tenor.com/images/412b1aa9149d98d561df62db221e0789/tenor.gif);opacity:.5" .....>
+
+ +

+ +

Bonus Idea: Integrate it with GitHub's new profile Readme Feature!

+ +

1) Host the code using GitHub Pages

+ +

2) Create a new repository ( the name should be your GitHub username )

+ +

3) Add QR Code to the page and tell the users to scan your profile picture

+ +

??) Profit 💸

+ +

Here is a screenshot of me scanning a rounded version of my profile picture ( It still works! Even though the image is cropped and I haven't changed any line of code )

+ +

+ +
+ + + + + + \ No newline at end of file diff --git a/docs/posts/2020-10-11-macOS-Virtual-Cam-OBS.html b/docs/posts/2020-10-11-macOS-Virtual-Cam-OBS.html new file mode 100644 index 0000000..5d59cd6 --- /dev/null +++ b/docs/posts/2020-10-11-macOS-Virtual-Cam-OBS.html @@ -0,0 +1,138 @@ + + + + + + + + + Hey - Post + + + + + +
+

Trying Different Camera Setups

+ +
    +
  1. Animated Overlays
  2. +
  3. Using a modern camera as your webcam
  4. +
  5. Using your phone's camera as your webcam
  6. +
  7. Using a USB Camera
  8. +
+ +

Comparison

+ +

Here are the results before you begin reading.

+ +
+ Normal Webcam + USB Webcam + Camo iPhone 5S + Camo iPhone 11 + Mirrorless Camera +
+ +

Prerequisites

+ +

I am running macOS and iOS but I will try to link the same steps for Windows as well. If you are running Arch, I assume you already know what you are doing and are using this post as an inspiration and not a how-to guide.

+ +

I assume that you have Homebrew installed.

+ +

OBS and OBS-Virtual-Cam

+ +

Description

+ +
brew cask install obs
+brew cask install obs-virtualcam
+
+ +

Windows users can install the latest version of the plugin from OBS-Forums

+ +

0. Animated Overlays

+ +

I have always liked PewDiePie's animated border he uses in his videos

+ +

Still grab from PewDiePie's video showing border

+ +

The border was apparently made by a YouTuber Sleepy Tanooki. He posted a link to a Google Drive folder containing the video file. (I will be using the video overlay for the example)

+ +

It is pretty simple to use overlays in OBS:

+ +

First, Create a new scene by clicking on the plus button on the bottom right corner.

+ +

Bottom Panel of OBS

+ +

Now, in the Sources section click on the add button -> Video Capture Device -> Create New -> Choose your webcam from the Device section.

+ +

You may, resize if you want

+ +

After this, again click on the add button, but this time choose the Media Source option

+ +

Media Source Option

+ +

and, locate and choose the downloaded overlay.

+ +

1. Using a Modern Camera (Without using a Capture Card)

+ +

I have a Sony mirrorless camera. Using Sony's Imaging Edge Desktop, you can use your laptop as a remote viewfinder and capture or record media.

+ +

After installing Image Edge Desktop or your Camera's equivalent, open the Remote application.

+ +

Remote showing available cameras

+ +

Once you are able to see the output of the camera on the application, switch to OBS. Create a new scene, and this time choose Window Capture in the Sources menu. After you have chosen the appropriate window, you may transform/crop the output using the properties/filters options.

+ +

2.1 Using your iPhone using Quicktime

+ +

Connect your iPhone via a USB cable, then Open Quicktime -> File -> New Movie Recording

+ +

In the Sources choose your device (No need to press record). You may open the camera app now.

+ +

Choose Source

+ +

Now, in OBS create a new scene, and in the sources choose the Window Capture option. You will need to rotate the source:

+ +

Rotation

+ +

2.2 Using your iPhone using an application like Camo

+ +

Install the Camo app on your phone through the app store -> connect to Mac using USB cable, install the companion app and you are done.

+ +

I tried both my current iPhone and an old iPhone 5S

+ +

3. A USB Webcam

+ +

The simplest solution, is to use a USB webcam. I used an old Logitech C310 that was collecting dust. I was surprised to find that Logitech is still selling it after years and proudly advertising it! (5MP)

+ +

It did not sit well on my laptop, so I placed it on my definitely-not-Joby Gorrila Pod i had bought on Amazon for ~₹500

+ +

USB Webcam + +

+ + + + + +
+ + + + + + \ No newline at end of file diff --git a/docs/posts/2020-11-17-Lets-Encrypt-DuckDns.html b/docs/posts/2020-11-17-Lets-Encrypt-DuckDns.html new file mode 100644 index 0000000..ef6f2e1 --- /dev/null +++ b/docs/posts/2020-11-17-Lets-Encrypt-DuckDns.html @@ -0,0 +1,98 @@ + + + + + + + + + Hey - Post + + + + + +
+

Generating HTTPS Certificate using DNS a Challenge through Let's Encrypt

+ +

I have a Raspberry-Pi running a Flask app through Gunicorn (Ubuntu 20.04 LTS). I am exposing it to the internet using DuckDNS.

+ +

Dependencies

+ +
sudo apt update && sudo apt install certbot -y
+
+ +

Get the Certificate

+ +
sudo certbot certonly --manual --preferred-challenges dns-01 --email senpai@email.com -d mydomain.duckdns.org
+
+ +

After you accept that you are okay with you IP address being logged, it will prompt you with updating your dns record. You need to create a new TXT record in the DNS settings for your domain.

+ +

For DuckDNS users it is as simple as entering this URL in their browser:

+ +
http://duckdns.org/update?domains=mydomain&token=duckdnstoken&txt=certbotdnstxt
+
+ +

Where mydomain is your DuckDNS domain, duckdnstoken is your DuckDNS Token ( Found on the dashboard when you login) and certbotdnstxt is the TXT record value given by the prompt.

+ +

You can check if the TXT records have been updated by using the dig command:

+ +
dig navanspi.duckdns.org TXT
+; <<>> DiG 9.16.1-Ubuntu <<>> navanspi.duckdns.org TXT
+;; global options: +cmd
+;; Got answer:
+;; ->>HEADER<<- opcode: QUERY, status: NOERROR, id: 27592
+;; flags: qr rd ra; QUERY: 1, ANSWER: 1, AUTHORITY: 0, ADDITIONAL: 1
+
+;; OPT PSEUDOSECTION:
+; EDNS: version: 0, flags:; udp: 65494
+;; QUESTION SECTION:
+;navanspi.duckdns.org.        IN    TXT
+
+;; ANSWER SECTION:
+navanspi.duckdns.org.    60    IN    TXT    "4OKbijIJmc82Yv2NiGVm1RmaBHSCZ_230qNtj9YA-qk"
+
+;; Query time: 275 msec
+;; SERVER: 127.0.0.53#53(127.0.0.53)
+;; WHEN: Tue Nov 17 15:23:15 IST 2020
+;; MSG SIZE  rcvd: 105
+
+ +

DuckDNS almost instantly propagates the changes but for other domain hosts, it could take a while.

+ +

Once you can ensure that the TXT record changes has been successfully applied and is visible through the dig command, press enter on the Certbot prompt and your certificate should be generated.

+ +

Renewing

+ +

As we manually generated the certificate certbot renew will fail, to renew the certificate you need to simply re-generate the certificate using the above steps.

+ +

Using the Certificate with Gunicorn

+ +

Example Gunicorn command for running a web-app:

+ +
gunicorn api:app -k uvicorn.workers.UvicornWorker -b 0.0.0.0:7589
+
+ +

To use the certificate with it, simply copy the cert.pem and privkey.pem to your working directory ( change the appropriate permissions ) and include them in the command

+ +
gunicorn api:app -k uvicorn.workers.UvicornWorker -b 0.0.0.0:7589 --certfile=cert.pem --keyfile=privkey.pem
+
+ +

Caveats with copying the certificate: If you renew the certificate you will have to re-copy the files

+ +
+ + + + + + \ No newline at end of file diff --git a/docs/posts/2020-12-1-HTML-JS-RSS-Feed.html b/docs/posts/2020-12-1-HTML-JS-RSS-Feed.html new file mode 100644 index 0000000..03609bf --- /dev/null +++ b/docs/posts/2020-12-1-HTML-JS-RSS-Feed.html @@ -0,0 +1,231 @@ + + + + + + + + + Hey - Post + + + + + +
+

RSS Feed written in HTML + JavaScript

+ +

If you want to directly open the HTML file in your browser after saving, don't forget to set CORS_PROXY=""

+ +
<!doctype html>
+<html lang="en">
+<head>
+  <meta charset="utf-8">
+  <meta name="viewport" content="width=device-width, initial-scale=1">
+    <title>
+        RSS Feed
+    </title>
+    <link rel="stylesheet" href="https://stackpath.bootstrapcdn.com/bootstrap/4.1.3/css/bootstrap.min.css" integrity="sha384-MCw98/SFnGE8fJT3GXwEOngsV7Zt27NXFoaoApmYm81iuXoPkFOJwJ8ERdknLPMO" crossorigin="anonymous">
+</head>
+<body>
+
+<h1 align="center" class="display-1">RSS Feed</h1>
+<main>
+    <div class="container">
+    <div class="list-group pb-4" id="contents"></div>
+<div id="feed">
+</div></div>
+</main>
+
+<script src="https://gitcdn.xyz/repo/rbren/rss-parser/master/dist/rss-parser.js"></script>
+<script>
+
+const feeds = {
+    "BuzzFeed - India": {
+      "link":"https://www.buzzfeed.com/in.xml",
+      "summary":true
+    },
+    "New Yorker": {
+      "link":"http://www.newyorker.com/feed/news",
+    },
+    "Vox":{
+      "link":"https://www.vox.com/rss/index.xml",
+      "limit": 3
+    },
+    "r/Jokes":{
+      "link":"https://reddit.com/r/Jokes/hot/.rss?sort=hot",
+      "ignore": ["repost","discord"]
+    }
+}
+
+const config_extra = {
+"Responsive-Images": true,
+"direct-link": false,
+"show-date":false,
+"left-column":false,
+"defaults": {
+  "limit": 5,
+  "summary": true
+}
+}
+
+const CORS_PROXY = "https://cors-anywhere.herokuapp.com/"
+
+var contents_title = document.createElement("h2")
+contents_title.textContent = "Contents"
+contents_title.classList.add("pb-1")
+document.getElementById("contents").appendChild(contents_title)
+
+async function myfunc(key){
+
+  var count_lim = feeds[key]["limit"]
+  var count_lim = (count_lim === undefined) ? config_extra["defaults"]["limit"] : count_lim
+
+  var show_summary = feeds[key]["summary"]
+  var show_summary = (show_summary === undefined) ? config_extra["defaults"]["summary"] : show_summary
+
+  var ignore_tags = feeds[key]["ignore"]
+  var ignore_tags = (ignore_tags === undefined) ? [] : ignore_tags
+
+  var contents = document.createElement("a")
+  contents.href = "#" + key
+  contents.classList.add("list-group-item","list-group-item-action")
+  contents.textContent = key
+  document.getElementById("contents").appendChild(contents)
+  var feed_div = document.createElement("div")
+  feed_div.id = key
+  feed_div.setAttribute("id", key);
+  var title = document.createElement("h2");
+  title.textContent = "From " + key;
+  title.classList.add("pb-1")
+  feed_div.appendChild(title)
+  document.getElementById("feed").appendChild(feed_div)
+  var parser = new RSSParser();
+  var countPosts = 0
+  parser.parseURL(CORS_PROXY + feeds[key]["link"], function(err, feed) {
+    if (err) throw err;
+    feed.items.forEach(function(entry) {
+      if (countPosts < count_lim) {
+
+      var skip = false
+      for(var i = 0; i < ignore_tags.length; i++) {
+        if (entry.title.includes(ignore_tags[i])){
+          var skip = true
+        } else if (entry.content.includes(ignore_tags[i])){
+          var skip = true
+        }
+      }
+
+      if (!skip) {
+
+      var node = document.createElement("div");
+      node.classList.add("card","mb-3");
+      var row = document.createElement("div")
+      row.classList.add("row","no-gutters")
+
+      if (config_extra["left-column"]){
+      var left_col = document.createElement("div")
+      left_col.classList.add("col-md-2")
+      var left_col_body = document.createElement("div")
+      left_col_body.classList.add("card-body")
+      }
+
+      var right_col = document.createElement("div")
+      if (config_extra["left-column"]){
+        right_col.classList.add("col-md-10")
+      }
+      var node_title = document.createElement("h5")
+
+      node_title.classList.add("card-header")
+      node_title.innerHTML = entry.title
+
+      node_body = document.createElement("div")
+      node_body.classList.add("card-body")
+
+      node_content = document.createElement("p")
+
+      if (show_summary){
+        node_content.innerHTML = entry.content
+      }
+      node_content.classList.add("card-text")
+
+      if (config_extra["direct-link"]){
+      node_link = document.createElement("p")
+      node_link.classList.add("card-text")
+      node_link.innerHTML = "<b>Link:</b> <a href='" + entry.link +"'>Direct Link</a>"
+      if (config_extra["left-column"]){
+      left_col_body.appendChild(node_link)
+        } else {
+          node_content.appendChild(node_link)
+        }
+      }
+
+      if (config_extra["show-date"]){
+        node_date = document.createElement("p")
+        node_date.classList.add("card-text")
+        node_date.innerHTML = "<p><b>Date: </b>" + entry.pubDate + "</p>"
+        if (config_extra["left-column"]){
+        left_col_body.appendChild(node_date)
+          } else {
+            node_content.appendChild(node_date)
+
+        }
+      }
+
+      node.appendChild(node_title)
+
+      node_body.appendChild(node_content)
+
+      right_col.appendChild(node_body)
+
+      if (config_extra["left-column"]){
+        left_col.appendChild(left_col_body)
+        row.appendChild(left_col)
+      }
+
+      row.appendChild(right_col)
+
+      node.appendChild(row)
+
+      document.getElementById(key).appendChild(node)
+      countPosts+=1
+    }
+    }
+  })
+
+  if (config_extra["Responsive-Images"]){
+  var inputs = document.getElementsByTagName('img')
+      for(var i = 0; i < inputs.length; i++) {
+        inputs[i].classList.add("img-fluid")
+      }
+  }
+
+  })
+
+  return true
+}
+(async () => {
+for(var key in feeds) {
+  let result = await myfunc(key);
+}})();
+
+</script>
+<noscript>Uh Oh! Your browser does not support JavaScript or JavaScript is currently disabled. Please enable JavaScript or switch to a different browser.</noscript>
+</body></html>
+
+ +
+ + + + + + \ No newline at end of file diff --git a/docs/posts/hello-world.html b/docs/posts/hello-world.html new file mode 100644 index 0000000..bf37c75 --- /dev/null +++ b/docs/posts/hello-world.html @@ -0,0 +1,36 @@ + + + + + + + + + Hey - Post + + + + + +
+

Hello World

+ +

Why a Hello World post?

+ +

Just re-did the entire website using Publish (Publish by John Sundell). So, a new hello world post :)

+ +
+ + + + + + \ No newline at end of file diff --git a/docs/posts/index.html b/docs/posts/index.html new file mode 100644 index 0000000..b94dd4d --- /dev/null +++ b/docs/posts/index.html @@ -0,0 +1,402 @@ + + + + + + + + + Hey - Section + + + + + + +
+

Posts

+ +

Tips, tricks and tutorials which I think might be useful.

+ +
+ + + + + + + + + \ No newline at end of file diff --git a/docs/publications/2019-05-14-Detecting-Driver-Fatigue-Over-Speeding-and-Speeding-up-Post-Accident-Response.html b/docs/publications/2019-05-14-Detecting-Driver-Fatigue-Over-Speeding-and-Speeding-up-Post-Accident-Response.html new file mode 100644 index 0000000..1e319fe --- /dev/null +++ b/docs/publications/2019-05-14-Detecting-Driver-Fatigue-Over-Speeding-and-Speeding-up-Post-Accident-Response.html @@ -0,0 +1,52 @@ + + + + + + + + + Hey - Post + + + + + +
+

Detecting Driver Fatigue, Over-Speeding, and Speeding up Post-Accident Response

+ +
+

Based on the project showcased at Toyota Hackathon, IITD - 17/18th December 2018

+
+ +

Edit: It seems like I haven't mentioned Adrian Rosebrock of PyImageSearch anywhere. I apologize for this mistake.

+ +

Download paper here

+ +

Recommended citation:

+ +

ATP

+ +
Chauhan, N. (2019). &quot;Detecting Driver Fatigue, Over-Speeding, and Speeding up Post-Accident Response.&quot; <i>International Research Journal of Engineering and Technology (IRJET), 6(5)</i>.
+
+ +

BibTeX

+ +
@article{chauhan_2019, title={Detecting Driver Fatigue, Over-Speeding, and Speeding up Post-Accident Response}, volume={6}, url={https://www.irjet.net/archives/V6/i5/IRJET-V6I5318.pdf}, number={5}, journal={International Research Journal of Engineering and Technology (IRJET)}, author={Chauhan, Navan}, year={2019}}
+
+ +
+ + + + + + \ No newline at end of file diff --git a/docs/publications/2020-03-14-generating-vaporwave.html b/docs/publications/2020-03-14-generating-vaporwave.html new file mode 100644 index 0000000..70e06db --- /dev/null +++ b/docs/publications/2020-03-14-generating-vaporwave.html @@ -0,0 +1,66 @@ + + + + + + + + + Hey - Post + + + + + +
+

Is it possible to programmatically generate Vaporwave?

+ +

This is still a pre-print.

+ +

Download paper here

+ +

Recommended citation:

+ +

APA

+ +
Chauhan, N. (2020, March 15). Is it possible to programmatically generate Vaporwave?. https://doi.org/10.35543/osf.io/9um2r
+
+ +

MLA

+ +
Chauhan, Navan. “Is It Possible to Programmatically Generate Vaporwave?.” IndiaRxiv, 15 Mar. 2020. Web.
+
+ +

Chicago

+ +
Chauhan, Navan. 2020. “Is It Possible to Programmatically Generate Vaporwave?.” IndiaRxiv. March 15. doi:10.35543/osf.io/9um2r.
+
+ +

Bibtex

+ +
@misc{chauhan_2020,
+ title={Is it possible to programmatically generate Vaporwave?},
+ url={indiarxiv.org/9um2r},
+ DOI={10.35543/osf.io/9um2r},
+ publisher={IndiaRxiv},
+ author={Chauhan, Navan},
+ year={2020},
+ month={Mar}
+}
+
+ +
+ + + + + + \ No newline at end of file diff --git a/docs/publications/2020-03-17-Possible-Drug-Candidates-COVID-19.html b/docs/publications/2020-03-17-Possible-Drug-Candidates-COVID-19.html new file mode 100644 index 0000000..9723c28 --- /dev/null +++ b/docs/publications/2020-03-17-Possible-Drug-Candidates-COVID-19.html @@ -0,0 +1,36 @@ + + + + + + + + + Hey - Post + + + + + +
+

Possible Drug Candidates for COVID-19

+ +

This is still a pre-print.

+ +

Download paper here

+ +
+ + + + + + \ No newline at end of file diff --git a/docs/publications/index.html b/docs/publications/index.html new file mode 100644 index 0000000..6d981dc --- /dev/null +++ b/docs/publications/index.html @@ -0,0 +1,77 @@ + + + + + + + + + Hey - Section + + + + + + +
+

Publications

+ +

Hopefully these grow with time, I already have tons of drafts ready. As I am currently studying in school, this allows me to experiment in Physics, Chemistry and Computer Science. I have started using LaTeX now ;)

+ +
+ + + + + + + + + \ No newline at end of file diff --git a/docs/pwabuilder-sw-register.js b/docs/pwabuilder-sw-register.js new file mode 100644 index 0000000..8850330 --- /dev/null +++ b/docs/pwabuilder-sw-register.js @@ -0,0 +1,19 @@ +// This is the service worker with the Cache-first network + +// Add this below content to your HTML page, or add the js file to your page at the very top to register service worker + +// Check compatibility for the browser we're running this in +if ("serviceWorker" in navigator) { + if (navigator.serviceWorker.controller) { + console.log("[PWA Builder] active service worker found, no need to register"); + } else { + // Register the service worker + navigator.serviceWorker + .register("/pwabuilder-sw.js", { + scope: "./" + }) + .then(function (reg) { + console.log("[PWA Builder] Service worker has been registered for scope: " + reg.scope); + }); + } +} diff --git a/docs/pwabuilder-sw.js b/docs/pwabuilder-sw.js new file mode 100644 index 0000000..0684da5 --- /dev/null +++ b/docs/pwabuilder-sw.js @@ -0,0 +1,83 @@ +// This is the service worker with the Cache-first network + +const CACHE = "pwabuilder-precache"; +const precacheFiles = [ + /* Add an array of files to precache for your app */ +]; + +self.addEventListener("install", function (event) { + console.log("[PWA Builder] Install Event processing"); + + console.log("[PWA Builder] Skip waiting on install"); + self.skipWaiting(); + + event.waitUntil( + caches.open(CACHE).then(function (cache) { + console.log("[PWA Builder] Caching pages during install"); + return cache.addAll(precacheFiles); + }) + ); +}); + +// Allow sw to control of current page +self.addEventListener("activate", function (event) { + console.log("[PWA Builder] Claiming clients for current page"); + event.waitUntil(self.clients.claim()); +}); + +// If any fetch fails, it will look for the request in the cache and serve it from there first +self.addEventListener("fetch", function (event) { + if (event.request.method !== "GET") return; + + event.respondWith( + fromCache(event.request).then( + function (response) { + // The response was found in the cache so we responde with it and update the entry + + // This is where we call the server to get the newest version of the + // file to use the next time we show view + event.waitUntil( + fetch(event.request).then(function (response) { + return updateCache(event.request, response); + }) + ); + + return response; + }, + function () { + // The response was not found in the cache so we look for it on the server + return fetch(event.request) + .then(function (response) { + // If request was success, add or update it in the cache + event.waitUntil(updateCache(event.request, response.clone())); + + return response; + }) + .catch(function (error) { + console.log("[PWA Builder] Network request failed and no cache." + error); + }); + } + ) + ); +}); + +function fromCache(request) { + // Check to see if you have it in the cache + // Return response + // If not in the cache, then return + return caches.open(CACHE).then(function (cache) { + return cache.match(request).then(function (matching) { + if (!matching || matching.status === 404) { + return Promise.reject("no-match"); + } + + return matching; + }); + }); +} + +function updateCache(request, response) { + return caches.open(CACHE).then(function (cache) { + return cache.put(request, response); + }); +} -- cgit v1.2.3