diff --git a/AI_Driver.ipynb b/AI_Driver.ipynb new file mode 100644 index 0000000..d78b952 --- /dev/null +++ b/AI_Driver.ipynb @@ -0,0 +1,780 @@ +{ + "nbformat": 4, + "nbformat_minor": 0, + "metadata": { + "colab": { + "name": "AI_Driver.ipynb", + "provenance": [], + "collapsed_sections": [] + }, + "kernelspec": { + "name": "python3", + "display_name": "Python 3" + }, + "accelerator": "GPU" + }, + "cells": [ + { + "cell_type": "code", + "metadata": { + "id": "NqdtXFe5n5KT", + "colab_type": "code", + "outputId": "8bcd2308-b1df-4314-a5a0-0f61a6a008e6", + "colab": { + "base_uri": "https://localhost:8080/", + "height": 986 + } + }, + "source": [ + "!git clone https://github.com/LiyuanLucasLiu/RAdam.git\n", + "!python RAdam/setup.py install" + ], + "execution_count": 1, + "outputs": [ + { + "output_type": "stream", + "text": [ + "Cloning into 'RAdam'...\n", + "remote: Enumerating objects: 24, done.\u001b[K\n", + "remote: Counting objects: 4% (1/24)\u001b[K\rremote: Counting objects: 8% (2/24)\u001b[K\rremote: Counting objects: 12% (3/24)\u001b[K\rremote: Counting objects: 16% (4/24)\u001b[K\rremote: Counting objects: 20% (5/24)\u001b[K\rremote: Counting objects: 25% (6/24)\u001b[K\rremote: Counting objects: 29% (7/24)\u001b[K\rremote: Counting objects: 33% (8/24)\u001b[K\rremote: Counting objects: 37% (9/24)\u001b[K\rremote: Counting objects: 41% (10/24)\u001b[K\rremote: Counting objects: 45% (11/24)\u001b[K\rremote: Counting objects: 50% (12/24)\u001b[K\rremote: Counting objects: 54% (13/24)\u001b[K\rremote: Counting objects: 58% (14/24)\u001b[K\rremote: Counting objects: 62% (15/24)\u001b[K\rremote: Counting objects: 66% (16/24)\u001b[K\rremote: Counting objects: 70% (17/24)\u001b[K\rremote: Counting objects: 75% (18/24)\u001b[K\rremote: Counting objects: 79% (19/24)\u001b[K\rremote: Counting objects: 83% (20/24)\u001b[K\rremote: Counting objects: 87% (21/24)\u001b[K\rremote: Counting objects: 91% (22/24)\u001b[K\rremote: Counting objects: 95% (23/24)\u001b[K\rremote: Counting objects: 100% (24/24)\u001b[K\rremote: Counting objects: 100% (24/24), done.\u001b[K\n", + "remote: Compressing objects: 4% (1/22)\u001b[K\rremote: Compressing objects: 9% (2/22)\u001b[K\rremote: Compressing objects: 13% (3/22)\u001b[K\rremote: Compressing objects: 18% (4/22)\u001b[K\rremote: Compressing objects: 22% (5/22)\u001b[K\rremote: Compressing objects: 27% (6/22)\u001b[K\rremote: Compressing objects: 31% (7/22)\u001b[K\rremote: Compressing objects: 36% (8/22)\u001b[K\rremote: Compressing objects: 40% (9/22)\u001b[K\rremote: Compressing objects: 45% (10/22)\u001b[K\rremote: Compressing objects: 50% (11/22)\u001b[K\rremote: Compressing objects: 54% (12/22)\u001b[K\rremote: Compressing objects: 59% (13/22)\u001b[K\rremote: Compressing objects: 63% (14/22)\u001b[K\rremote: Compressing objects: 68% (15/22)\u001b[K\rremote: Compressing objects: 72% (16/22)\u001b[K\rremote: Compressing objects: 77% (17/22)\u001b[K\rremote: Compressing objects: 81% (18/22)\u001b[K\rremote: Compressing objects: 86% (19/22)\u001b[K\rremote: Compressing objects: 90% (20/22)\u001b[K\rremote: Compressing objects: 95% (21/22)\u001b[K\rremote: Compressing objects: 100% (22/22)\u001b[K\rremote: Compressing objects: 100% (22/22), done.\u001b[K\n", + "Receiving objects: 0% (1/298) \rReceiving objects: 1% (3/298) \rReceiving objects: 2% (6/298) \rReceiving objects: 3% (9/298) \rReceiving objects: 4% (12/298) \rReceiving objects: 5% (15/298) \rReceiving objects: 6% (18/298) \rReceiving objects: 7% (21/298) \rReceiving objects: 8% (24/298) \rReceiving objects: 9% (27/298) \rReceiving objects: 10% (30/298) \rReceiving objects: 11% (33/298) \rReceiving objects: 12% (36/298) \rReceiving objects: 13% (39/298) \rReceiving objects: 14% (42/298) \rReceiving objects: 15% (45/298) \rReceiving objects: 16% (48/298) \rReceiving objects: 17% (51/298) \rReceiving objects: 18% (54/298) \rReceiving objects: 19% (57/298) \rReceiving objects: 20% (60/298) \rReceiving objects: 21% (63/298) \rReceiving objects: 22% (66/298) \rReceiving objects: 23% (69/298) \rReceiving objects: 24% (72/298) \rReceiving objects: 25% (75/298) \rReceiving objects: 26% (78/298) \rReceiving objects: 27% (81/298) \rReceiving objects: 28% (84/298) \rReceiving objects: 29% (87/298) \rReceiving objects: 30% (90/298) \rReceiving objects: 31% (93/298) \rReceiving objects: 32% (96/298) \rReceiving objects: 33% (99/298) \rReceiving objects: 34% (102/298) \rReceiving objects: 35% (105/298) \rReceiving objects: 36% (108/298) \rReceiving objects: 37% (111/298) \rReceiving objects: 38% (114/298) \rReceiving objects: 39% (117/298) \rReceiving objects: 40% (120/298) \rReceiving objects: 41% (123/298) \rReceiving objects: 42% (126/298) \rReceiving objects: 43% (129/298) \rReceiving objects: 44% (132/298) \rReceiving objects: 45% (135/298) \rReceiving objects: 46% (138/298) \rReceiving objects: 47% (141/298) \rReceiving objects: 48% (144/298) \rReceiving objects: 49% (147/298) \rReceiving objects: 50% (149/298) \rReceiving objects: 51% (152/298) \rReceiving objects: 52% (155/298) \rReceiving objects: 53% (158/298) \rReceiving objects: 54% (161/298) \rReceiving objects: 55% (164/298) \rReceiving objects: 56% (167/298) \rReceiving objects: 57% (170/298) \rReceiving objects: 58% (173/298) \rReceiving objects: 59% (176/298) \rReceiving objects: 60% (179/298) \rReceiving objects: 61% (182/298) \rReceiving objects: 62% (185/298) \rReceiving objects: 63% (188/298) \rReceiving objects: 64% (191/298) \rReceiving objects: 65% (194/298) \rReceiving objects: 66% (197/298) \rReceiving objects: 67% (200/298) \rReceiving objects: 68% (203/298) \rReceiving objects: 69% (206/298) \rremote: Total 298 (delta 9), reused 10 (delta 2), pack-reused 274\u001b[K\n", + "Receiving objects: 70% (209/298) \rReceiving objects: 71% (212/298) \rReceiving objects: 72% (215/298) \rReceiving objects: 73% (218/298) \rReceiving objects: 74% (221/298) \rReceiving objects: 75% (224/298) \rReceiving objects: 76% (227/298) \rReceiving objects: 77% (230/298) \rReceiving objects: 78% (233/298) \rReceiving objects: 79% (236/298) \rReceiving objects: 80% (239/298) \rReceiving objects: 81% (242/298) \rReceiving objects: 82% (245/298) \rReceiving objects: 83% (248/298) \rReceiving objects: 84% (251/298) \rReceiving objects: 85% (254/298) \rReceiving objects: 86% (257/298) \rReceiving objects: 87% (260/298) \rReceiving objects: 88% (263/298) \rReceiving objects: 89% (266/298) \rReceiving objects: 90% (269/298) \rReceiving objects: 91% (272/298) \rReceiving objects: 92% (275/298) \rReceiving objects: 93% (278/298) \rReceiving objects: 94% (281/298) \rReceiving objects: 95% (284/298) \rReceiving objects: 96% (287/298) \rReceiving objects: 97% (290/298) \rReceiving objects: 98% (293/298) \rReceiving objects: 99% (296/298) \rReceiving objects: 100% (298/298) \rReceiving objects: 100% (298/298), 948.24 KiB | 3.89 MiB/s, done.\n", + "Resolving deltas: 0% (0/138) \rResolving deltas: 2% (4/138) \rResolving deltas: 7% (11/138) \rResolving deltas: 10% (15/138) \rResolving deltas: 11% (16/138) \rResolving deltas: 14% (20/138) \rResolving deltas: 15% (22/138) \rResolving deltas: 16% (23/138) \rResolving deltas: 20% (28/138) \rResolving deltas: 21% (29/138) \rResolving deltas: 25% (35/138) \rResolving deltas: 26% (37/138) \rResolving deltas: 27% (38/138) \rResolving deltas: 28% (39/138) \rResolving deltas: 30% (42/138) \rResolving deltas: 32% (45/138) \rResolving deltas: 34% (48/138) \rResolving deltas: 36% (50/138) \rResolving deltas: 39% (54/138) \rResolving deltas: 42% (59/138) \rResolving deltas: 49% (68/138) \rResolving deltas: 50% (69/138) \rResolving deltas: 63% (88/138) \rResolving deltas: 72% (100/138) \rResolving deltas: 84% (116/138) \rResolving deltas: 86% (119/138) \rResolving deltas: 87% (121/138) \rResolving deltas: 88% (122/138) \rResolving deltas: 90% (125/138) \rResolving deltas: 91% (126/138) \rResolving deltas: 92% (127/138) \rResolving deltas: 93% (129/138) \rResolving deltas: 94% (130/138) \rResolving deltas: 96% (133/138) \rResolving deltas: 99% (137/138) \rResolving deltas: 100% (138/138) \rResolving deltas: 100% (138/138), done.\n", + "running install\n", + "running bdist_egg\n", + "running egg_info\n", + "creating RAdam.egg-info\n", + "writing RAdam.egg-info/PKG-INFO\n", + "writing dependency_links to RAdam.egg-info/dependency_links.txt\n", + "writing requirements to RAdam.egg-info/requires.txt\n", + "writing top-level names to RAdam.egg-info/top_level.txt\n", + "writing manifest file 'RAdam.egg-info/SOURCES.txt'\n", + "reading manifest file 'RAdam.egg-info/SOURCES.txt'\n", + "writing manifest file 'RAdam.egg-info/SOURCES.txt'\n", + "installing library code to build/bdist.linux-x86_64/egg\n", + "running install_lib\n", + "warning: install_lib: 'build/lib' does not exist -- no Python modules to install\n", + "\n", + "creating build\n", + "creating build/bdist.linux-x86_64\n", + "creating build/bdist.linux-x86_64/egg\n", + "creating build/bdist.linux-x86_64/egg/EGG-INFO\n", + "copying RAdam.egg-info/PKG-INFO -> build/bdist.linux-x86_64/egg/EGG-INFO\n", + "copying RAdam.egg-info/SOURCES.txt -> build/bdist.linux-x86_64/egg/EGG-INFO\n", + "copying RAdam.egg-info/dependency_links.txt -> build/bdist.linux-x86_64/egg/EGG-INFO\n", + "copying RAdam.egg-info/requires.txt -> build/bdist.linux-x86_64/egg/EGG-INFO\n", + "copying RAdam.egg-info/top_level.txt -> build/bdist.linux-x86_64/egg/EGG-INFO\n", + "zip_safe flag not set; analyzing archive contents...\n", + "creating dist\n", + "creating 'dist/RAdam-0.0.1-py3.6.egg' and adding 'build/bdist.linux-x86_64/egg' to it\n", + "removing 'build/bdist.linux-x86_64/egg' (and everything under it)\n", + "Processing RAdam-0.0.1-py3.6.egg\n", + "Copying RAdam-0.0.1-py3.6.egg to /usr/local/lib/python3.6/dist-packages\n", + "Adding RAdam 0.0.1 to easy-install.pth file\n", + "\n", + "Installed /usr/local/lib/python3.6/dist-packages/RAdam-0.0.1-py3.6.egg\n", + "Processing dependencies for RAdam==0.0.1\n", + "Searching for torch==1.3.1\n", + "Best match: torch 1.3.1\n", + "Adding torch 1.3.1 to easy-install.pth file\n", + "Installing convert-caffe2-to-onnx script to /usr/local/bin\n", + "Installing convert-onnx-to-caffe2 script to /usr/local/bin\n", + "\n", + "Using /usr/local/lib/python3.6/dist-packages\n", + "Searching for numpy==1.17.5\n", + "Best match: numpy 1.17.5\n", + "Adding numpy 1.17.5 to easy-install.pth file\n", + "Installing f2py script to /usr/local/bin\n", + "Installing f2py3 script to /usr/local/bin\n", + "Installing f2py3.6 script to /usr/local/bin\n", + "\n", + "Using /usr/local/lib/python3.6/dist-packages\n", + "Finished processing dependencies for RAdam==0.0.1\n" + ], + "name": "stdout" + } + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "jm8XsBAGEngR", + "colab_type": "code", + "colab": {} + }, + "source": [ + "# imports\n", + "import os\n", + "import sys\n", + "\n", + "import torch\n", + "import torch.nn as nn\n", + "import torch.optim as optim\n", + "from torch.utils import data\n", + "from torch.utils.data import DataLoader\n", + "import torchvision.transforms as transforms\n", + "from torch.utils.data.sampler import SubsetRandomSampler\n", + "from RAdam import radam\n", + "\n", + "import cv2\n", + "import matplotlib.image as mpimg\n", + "import numpy as np\n", + "import csv\n", + "import requests\n", + "import zipfile\n", + "import time\n", + "import pandas as pd" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "code", + "metadata": { + "id": "EzlbrzoVEpil", + "colab_type": "code", + "colab": {} + }, + "source": [ + "# class for download\n", + "class DataDownloader:\n", + "\n", + " def __init__(self, file_id, destination, download = True):\n", + " self.file_id = file_id\n", + " self.destination = destination\n", + "\n", + " if download:\n", + " self.download_dataset()\n", + " self.extract_zip()\n", + "\n", + " def download_dataset(self):\n", + " def get_confirm_token(response):\n", + " for key, value in response.cookies.items():\n", + " if key.startswith('download_warning'):\n", + " return value\n", + "\n", + " return None\n", + "\n", + " def save_response_content(response):\n", + " CHUNK_SIZE = 32768\n", + "\n", + " with open(self.destination, \"wb\") as f:\n", + " for chunk in response.iter_content(CHUNK_SIZE):\n", + " if chunk: # filter out keep-alive new chunks\n", + " f.write(chunk)\n", + "\n", + " URL = \"https://docs.google.com/uc?export=download\"\n", + "\n", + " session = requests.Session()\n", + "\n", + " response = session.get(URL, params = { 'id' : self.file_id }, stream = True)\n", + " token = get_confirm_token(response)\n", + "\n", + " if token:\n", + " params = { 'id' : self.file_id, 'confirm' : token }\n", + " response = session.get(URL, params = params, stream = True)\n", + "\n", + " save_response_content(response) \n", + "\n", + " def extract_zip(self):\n", + " if not os.path.exists('input'):\n", + " os.makedirs('input')\n", + "\n", + " if not os.path.exists('output'):\n", + " os.makedirs('output')\n", + "\n", + " with zipfile.ZipFile(self.destination, 'r') as zip_ref:\n", + " zip_ref.extractall('./input/')" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "code", + "metadata": { + "id": "r6e2HTjgJGKP", + "colab_type": "code", + "colab": {} + }, + "source": [ + "FILE_ID = '1VaYonsJUovGO1AamMQuC2LN47AZ4pkTm'\n", + "DST_LOC = './self_driving_dataset.zip'\n", + "DATA_CSV_FILE_PATH = './input/driving_log.csv'\n", + "DATA_IMAGES_DIR = './input/IMG'\n", + "MODEL_SAVE_PATH = './output/ai_driver_cnn.pth'\n", + "IMAGE_HEIGHT, IMAGE_WIDTH, IMAGE_CHANNELS = 66, 200, 3\n", + "\n", + "SAVE_DIR = './output/'\n", + "\n", + "data_download = DataDownloader(FILE_ID, DST_LOC, True)" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "code", + "metadata": { + "id": "HKFjzNBSOAE6", + "colab_type": "code", + "colab": {} + }, + "source": [ + "# Helper defs\n", + "\n", + "def load_image(data_dir, image_file):\n", + " \"\"\"\n", + " Load RGB images from a file\n", + " \"\"\"\n", + " name = image_file.split('/')[-1]\n", + " return mpimg.imread(os.path.join(data_dir, name))\n", + "\n", + "def crop(image):\n", + " \"\"\"\n", + " Crop the image (removing the sky at the top and the car front at the bottom)\n", + " \"\"\"\n", + " return image[60:-25, :, :] # remove the sky and the car front\n", + "\n", + "\n", + "def resize(image):\n", + " \"\"\"\n", + " Resize the image to the input shape used by the network model\n", + " \"\"\"\n", + " return cv2.resize(image, (IMAGE_WIDTH, IMAGE_HEIGHT), cv2.INTER_AREA)\n", + "\n", + "\n", + "def rgb2yuv(image):\n", + " \"\"\"\n", + " Convert the image from RGB to YUV (This is what the NVIDIA model does)\n", + " \"\"\"\n", + " return cv2.cvtColor(image, cv2.COLOR_RGB2YUV)\n", + "\n", + "\n", + "def preprocess(image):\n", + " \"\"\"\n", + " Combine all preprocess functions into one\n", + " \"\"\"\n", + " image = crop(image)\n", + " image = resize(image)\n", + " image = rgb2yuv(image)\n", + " return image\n", + "\n", + "\n", + "def choose_image(data_dir, center, left, right, steering_angle):\n", + " \"\"\"\n", + " Randomly choose an image from the center, left or right, and adjust\n", + " the steering angle.\n", + " \"\"\"\n", + " choice = np.random.choice(3)\n", + " if choice == 0:\n", + " return load_image(data_dir, left), steering_angle + 0.2\n", + " elif choice == 1:\n", + " return load_image(data_dir, right), steering_angle - 0.2\n", + " return load_image(data_dir, center), steering_angle\n", + "\n", + "\n", + "def random_flip(image, steering_angle):\n", + " \"\"\"\n", + " Randomly flipt the image left <-> right, and adjust the steering angle.\n", + " \"\"\"\n", + " if np.random.rand() < 0.5:\n", + " image = cv2.flip(image, 1)\n", + " steering_angle = -steering_angle\n", + " return image, steering_angle\n", + "\n", + "\n", + "def random_translate(image, steering_angle, range_x, range_y):\n", + " \"\"\"\n", + " Randomly shift the image virtially and horizontally (translation).\n", + " \"\"\"\n", + " trans_x = range_x * (np.random.rand() - 0.5)\n", + " trans_y = range_y * (np.random.rand() - 0.5)\n", + " steering_angle += trans_x * 0.002\n", + " trans_m = np.float32([[1, 0, trans_x], [0, 1, trans_y]])\n", + " height, width = image.shape[:2]\n", + " image = cv2.warpAffine(image, trans_m, (width, height))\n", + " return image, steering_angle\n", + "\n", + "\n", + "def random_shadow(image):\n", + " \"\"\"\n", + " Generates and adds random shadow\n", + " \"\"\"\n", + " print(image.shape)\n", + " # (x1, y1) and (x2, y2) forms a line\n", + " # xm, ym gives all the locations of the image\n", + " x1, y1 = IMAGE_WIDTH * np.random.rand(), 0\n", + " x2, y2 = IMAGE_WIDTH * np.random.rand(), IMAGE_HEIGHT\n", + " xm, ym = np.mgrid[0:IMAGE_HEIGHT, 0:IMAGE_WIDTH]\n", + "\n", + " # mathematically speaking, we want to set 1 below the line and zero otherwise\n", + " # Our coordinate is up side down. So, the above the line: \n", + " # (ym-y1)/(xm-x1) > (y2-y1)/(x2-x1)\n", + " # as x2 == x1 causes zero-division problem, we'll write it in the below form:\n", + " # (ym-y1)*(x2-x1) - (y2-y1)*(xm-x1) > 0\n", + " mask = np.zeros_like(image[:, :, 1])\n", + " mask[(ym - y1) * (x2 - x1) - (y2 - y1) * (xm - x1) > 0] = 1\n", + "\n", + " # choose which side should have shadow and adjust saturation\n", + " cond = mask == np.random.randint(2)\n", + " s_ratio = np.random.uniform(low=0.2, high=0.5)\n", + "\n", + " # adjust Saturation in HLS(Hue, Light, Saturation)\n", + " hls = cv2.cvtColor(image, cv2.COLOR_RGB2HLS)\n", + " hls[:, :, 1][cond] = hls[:, :, 1][cond] * s_ratio\n", + " return cv2.cvtColor(hls, cv2.COLOR_HLS2RGB)\n", + "\n", + "\n", + "def random_brightness(image):\n", + " \"\"\"\n", + " Randomly adjust brightness of the image.\n", + " \"\"\"\n", + " # HSV (Hue, Saturation, Value) is also called HSB ('B' for Brightness).\n", + " hsv = cv2.cvtColor(image, cv2.COLOR_RGB2HSV)\n", + " ratio = 1.0 + 0.4 * (np.random.rand() - 0.5)\n", + " hsv[:,:,2] = hsv[:,:,2] * ratio\n", + " return cv2.cvtColor(hsv, cv2.COLOR_HSV2RGB)\n", + "\n", + "\n", + "def augument(data_dir, center, left, right, steering_angle, range_x=100, range_y=10):\n", + " \"\"\"\n", + " Generate an augumented image and adjust steering angle.\n", + " (The steering angle is associated with the center image)\n", + " \"\"\"\n", + " image, steering_angle = choose_image(data_dir, center, left, right, steering_angle)\n", + " image, steering_angle = random_flip(image, steering_angle)\n", + " image, steering_angle = random_translate(image, steering_angle, range_x, range_y)\n", + " # image = random_shadow(image)\n", + " image = random_brightness(image)\n", + " return image, steering_angle" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "code", + "metadata": { + "id": "iAr84GNvJMYP", + "colab_type": "code", + "colab": {} + }, + "source": [ + "class CustomDataset(data.Dataset):\n", + "\n", + " def __init__(self, csv_file_path, image_dir, transform = None):\n", + " self.csv_file_path = csv_file_path\n", + " self.image_dir = image_dir\n", + " self.transform = transform\n", + "\n", + " self.examples = []\n", + "\n", + " with open(self.csv_file_path) as csvfile:\n", + " reader = csv.reader(csvfile)\n", + " next(reader, None)\n", + " for line in reader:\n", + " self.examples.append(line)\n", + "\n", + "\n", + " def __getitem__(self, index):\n", + " example = self.examples[index]\n", + " center, left, right = example[0], example[1], example[2]\n", + " steering_angle = float(example[3])\n", + "\n", + " if np.random.rand() < 0.6:\n", + " image, steering_angle = augument(self.image_dir, center, left, right, steering_angle)\n", + " else:\n", + " image = load_image(self.image_dir, center) \n", + " \n", + " image = preprocess(image)\n", + " \n", + " if self.transform is not None:\n", + " image = self.transform(image)\n", + " \n", + " return image, steering_angle\n", + "\n", + " def __len__(self):\n", + " return len(self.examples)" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "code", + "metadata": { + "id": "PwDQ2JIgMbqE", + "colab_type": "code", + "colab": {} + }, + "source": [ + "batch_size = 128\n", + "num_epochs = 40\n", + "\n", + "validation_split = 0.25\n", + "shuffle_dataset = True\n", + "random_seed = 42\n", + "num_workers = 4" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "code", + "metadata": { + "id": "Xl0_MCEMMhBi", + "colab_type": "code", + "outputId": "3164e237-c5d6-486d-d2f9-14ea95b0c75a", + "colab": { + "base_uri": "https://localhost:8080/", + "height": 34 + } + }, + "source": [ + "print(\"Initializing Datasets and Dataloaders...\")\n", + "\n", + "# Creating data indices for training and validation splits:\n", + "#Create a dataset object\n", + "transformations = transforms.Compose([transforms.Lambda(lambda x: (x / 127.5) - 1.0)])\n", + "\n", + "dataset = CustomDataset(DATA_CSV_FILE_PATH, DATA_IMAGES_DIR, transformations)\n", + "dataset_size = len(dataset)\n", + "# dataset_size = 3000\n", + "indices = list(range(dataset_size))\n", + "split = int(np.floor(validation_split * dataset_size))\n", + "\n", + "if shuffle_dataset :\n", + " np.random.seed(random_seed)\n", + " np.random.shuffle(indices)\n", + "\n", + "train_indices, val_indices = indices[split:], indices[:split]\n", + "\n", + "# Creating PT data samplers and loaders:\n", + "train_sampler = SubsetRandomSampler(train_indices)\n", + "valid_sampler = SubsetRandomSampler(val_indices)\n", + "\n", + "train_loader = torch.utils.data.DataLoader(dataset, batch_size=batch_size, \n", + " sampler=train_sampler, num_workers=num_workers)\n", + "validation_loader = torch.utils.data.DataLoader(dataset, batch_size=batch_size,\n", + " sampler=valid_sampler, num_workers=num_workers)\n", + "\n", + "test_loader = torch.utils.data.DataLoader(dataset, batch_size=1,\n", + " sampler=valid_sampler, num_workers=num_workers)\n", + "\n", + "\n", + "data_loader_dict = {\n", + " 'train': train_loader,\n", + " 'val': validation_loader \n", + "}\n", + "\n", + "# Detect if we have a GPU available\n", + "device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")" + ], + "execution_count": 9, + "outputs": [ + { + "output_type": "stream", + "text": [ + "Initializing Datasets and Dataloaders...\n" + ], + "name": "stdout" + } + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "4YlDs4IhMrhS", + "colab_type": "code", + "colab": {} + }, + "source": [ + "class DriverNet(nn.Module):\n", + "\n", + " def __init__(self):\n", + " super(DriverNet, self).__init__()\n", + "\n", + " self.conv_layers = nn.Sequential(\n", + " nn.Conv2d(3, 24, kernel_size=5, stride=2),\n", + " nn.ELU(),\n", + " nn.Conv2d(24, 36, kernel_size=5, stride=2),\n", + " nn.ELU(),\n", + " nn.Conv2d(36, 48, kernel_size=5, stride=2),\n", + " nn.ELU(),\n", + " nn.Conv2d(48, 64, kernel_size=3, stride=1),\n", + " nn.ELU(),\n", + " nn.Conv2d(64, 64, kernel_size=3, stride=1),\n", + " nn.ELU(),\n", + " nn.Dropout(p=0.5)\n", + " )\n", + " self.linear_layers = nn.Sequential(\n", + " nn.Linear(in_features=64*1*18, out_features=100),\n", + " nn.ELU(),\n", + " nn.Dropout(p=0.5),\n", + " nn.Linear(in_features=100, out_features=64),\n", + " nn.ELU(),\n", + " nn.Linear(in_features=64, out_features=10),\n", + " nn.ELU(),\n", + " nn.Linear(in_features=10, out_features=1)\n", + " )\n", + " \n", + "\n", + " def forward(self, input):\n", + " input = input.view(input.size(0), 3, 66, 200)\n", + " output = self.conv_layers(input)\n", + " output = output.view(output.size(0), -1)\n", + " output = self.linear_layers(output)\n", + " return output" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "code", + "metadata": { + "id": "Ja2-iF3jMuyI", + "colab_type": "code", + "outputId": "35f605a3-be24-4870-d4f3-6f7e6e3a7cd7", + "colab": { + "base_uri": "https://localhost:8080/", + "height": 340 + } + }, + "source": [ + "model_ft = DriverNet()\n", + "\n", + "# Send the model to GPU\n", + "model_ft = model_ft.to(device)\n", + "\n", + "# Gather the parameters to be optimized/updated in this run. If we are\n", + "# finetuning we will be updating all parameters. However, if we are\n", + "# doing feature extract method, we will only update the parameters\n", + "# that we have just initialized, i.e. the parameters with requires_grad\n", + "# is True.\n", + "params_to_update = model_ft.parameters()\n", + "print(\"Params to learn:\")\n", + "\n", + "for name,param in model_ft.named_parameters():\n", + " if param.requires_grad == True:\n", + " print(\"\\t\",name)\n", + "\n", + "# Observe that all parameters are being optimized\n", + "optimizer_ft = radam.RAdam(params_to_update)\n", + "# optimizer_ft = optim.SGD(params_to_update, lr = 0.00008)\n", + "# optimizer_ft = optim.Adam(params_to_update, lr = 0.0001)" + ], + "execution_count": 11, + "outputs": [ + { + "output_type": "stream", + "text": [ + "Params to learn:\n", + "\t conv_layers.0.weight\n", + "\t conv_layers.0.bias\n", + "\t conv_layers.2.weight\n", + "\t conv_layers.2.bias\n", + "\t conv_layers.4.weight\n", + "\t conv_layers.4.bias\n", + "\t conv_layers.6.weight\n", + "\t conv_layers.6.bias\n", + "\t conv_layers.8.weight\n", + "\t conv_layers.8.bias\n", + "\t linear_layers.0.weight\n", + "\t linear_layers.0.bias\n", + "\t linear_layers.3.weight\n", + "\t linear_layers.3.bias\n", + "\t linear_layers.5.weight\n", + "\t linear_layers.5.bias\n", + "\t linear_layers.7.weight\n", + "\t linear_layers.7.bias\n" + ], + "name": "stdout" + } + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "yy5DFQjoVO_E", + "colab_type": "code", + "colab": {} + }, + "source": [ + "def toDevice(data, device):\n", + " \n", + " return data.float().to(device)" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "code", + "metadata": { + "id": "_ErcaBWrMx-L", + "colab_type": "code", + "colab": {} + }, + "source": [ + "def train_model(model, dataloaders, criterion, optimizer, num_epochs=25):\n", + " since = time.time()\n", + "\n", + " epoch_number, train_losses, val_losses, = [], [], []\n", + " best_loss = 10000.0\n", + "\n", + " for epoch in range(num_epochs):\n", + " print('Epoch {}/{}'.format(epoch, num_epochs - 1))\n", + " print('-' * 10)\n", + " epoch_number.append(epoch) \n", + " # Each epoch has a training and validation phase\n", + " # Training loop\n", + " train_loss = 0.0\n", + " val_loss = 0.0\n", + "\n", + " # Training\n", + " model.train()\n", + " for inputs, labels in dataloaders['train']:\n", + " inputs = toDevice(inputs, device)\n", + " labels = toDevice(labels, device)\n", + "\n", + " optimizer.zero_grad()\n", + " # Generate predictions\n", + " out = model(inputs)\n", + " # Calculate loss\n", + " loss = criterion(out, labels.unsqueeze(1))\n", + " # Backpropagation\n", + " loss.backward()\n", + " # Update model parameters\n", + " optimizer.step()\n", + "\n", + " train_loss += loss.item()\n", + "\n", + " # Validation \n", + " model.eval()\n", + " with torch.no_grad():\n", + " for inputs, labels in dataloaders['val']:\n", + " inputs = toDevice(inputs, device)\n", + " labels = toDevice(labels, device)\n", + " # Generate predictions \n", + " out = model(inputs)\n", + " # Calculate loss\n", + " loss = criterion(out, labels.unsqueeze(1))\n", + " \n", + " val_loss += loss.item()\n", + "\n", + " # Average validation loss\n", + " train_loss = train_loss / len(dataloaders['train'])\n", + " val_loss = val_loss / len(dataloaders['val'])\n", + "\n", + " train_losses.append(train_loss)\n", + " val_losses.append(val_loss)\n", + "\n", + " print('Train Loss: {:.4f}'.format(train_loss))\n", + " print('Val Loss: {:.4f}'.format(val_loss))\n", + "\n", + " # If the validation loss is at a minimum\n", + " if val_loss < best_loss:\n", + " # Save the model\n", + " torch.save(model, MODEL_SAVE_PATH)\n", + " best_loss = val_loss\n", + "\n", + " time_elapsed = time.time() - since\n", + " print('Training complete in {:.0f}m {:.0f}s'.format(time_elapsed // 60, time_elapsed % 60))\n", + " print('Lead val Loss: {:4f}'.format(best_loss))\n", + "\n", + " #creating dataframe and record all the losses and accuracies at each epoch\n", + " log_frame = pd.DataFrame(columns = [\"Epoch\", \"Train Loss\", \"Test Loss\"])\n", + " log_frame[\"Epoch\"] = epoch_number\n", + " log_frame[\"Train Loss\"] = train_losses\n", + " log_frame[\"Test Loss\"] = val_losses\n", + " log_frame.to_csv(os.path.join(SAVE_DIR, \"log2.csv\"), index = False)\n", + "\n", + " # load best model weights\n", + " # model.load_state_dict(best_model_wts)\n", + " return model" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "code", + "metadata": { + "id": "gxvfwfW9M0Kx", + "colab_type": "code", + "colab": {} + }, + "source": [ + "criterion = nn.MSELoss()\n", + "\n", + "# Train and evaluate\n", + "model_ft = train_model(model_ft, data_loader_dict, criterion, optimizer_ft, num_epochs=num_epochs)" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "code", + "metadata": { + "id": "gOTlk_BaM2uu", + "colab_type": "code", + "colab": {} + }, + "source": [ + "frame = pd.read_csv(os.path.join(SAVE_DIR, \"log.csv\"))\n", + "frame" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "code", + "metadata": { + "id": "IgCRiZ4lod8E", + "colab_type": "code", + "outputId": "bf21aa67-fe53-45cb-ed76-91f7ceadb85b", + "colab": { + "base_uri": "https://localhost:8080/", + "height": 295 + } + }, + "source": [ + "from matplotlib import pyplot as plt\n", + "from matplotlib import style\n", + "\n", + "from numpy import genfromtxt\n", + "\n", + "data = genfromtxt(os.path.join(SAVE_DIR, \"log2.csv\"),delimiter=',', names=['Epoch', 'Train Loss', 'Test Loss'])\n", + "epoch_list = []\n", + "train_loss_list = []\n", + "test_loss_list = []\n", + "for row in data:\n", + " if not np.isnan(row[0]):\n", + " epoch_list.append(row[0])\n", + " train_loss_list.append(row[1])\n", + " test_loss_list.append(row[2])\n", + " \n", + "\n", + "plt.plot(epoch_list, train_loss_list, label = \"Training Loss\")\n", + "plt.plot(epoch_list, test_loss_list, label = \"Testing Loss\")\n", + "\n", + "plt.title('MSE Loss Vs Epoch')\n", + "plt.ylabel('Loss')\n", + "plt.xlabel('Epoch')\n", + "\n", + "plt.show()" + ], + "execution_count": 17, + "outputs": [ + { + "output_type": "display_data", + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAYgAAAEWCAYAAAB8LwAVAAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\nAAALEgAACxIB0t1+/AAAADh0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uMy4xLjIsIGh0\ndHA6Ly9tYXRwbG90bGliLm9yZy8li6FKAAAgAElEQVR4nO3dd3xV9f3H8dcnN5skBMhghE1QAgIy\nxYUKCi5wK+5dtdr212qrbbWtrR3a4W5ddeNeuIp7gwzZe0NYIUAgELK/vz/OAS7hJgTI5V7I+/l4\n3EfuPed7z/nco9zP/Z7vMuccIiIiNcVEOgAREYlOShAiIhKSEoSIiISkBCEiIiEpQYiISEhKECIi\nEpIShIjsFTNbamZDIx2HhJ8ShEQF/0un3MwyamyfYmbOzDr4r3PM7A0zKzSzTWY208yu9Pd18Mtu\nqfG4sJZzfmFm14b5o9U8ZxszqzSzziH2vWVmf9/L431hZqU1Pu+7DRexNGZKEBJNlgCjtr8wsyOA\n5BplngdWAO2BFsBlwNoaZdKdcylBj1fCGPNecc6tBD7Fi3sHM2sOnAY8uw+HvbnG5z2zAUIVUYKQ\nqPI8cHnQ6yuA52qU6Q8845zb6pyrdM5Ncc592NCBmNkIM5tlZkX+r/RuQft+ZWYrzazYzOaZ2RB/\n+wAzm2Rmm81srZn9s5bDP0uNBAFcBMx2zs0wz7/MrMA/1gwz67EPn+EEM8s3s1/7Na6lZnZJ0P6m\nZvacma0zs2Vm9lsziwnaf52ZzfE/52wz6xN0+N5mNt2vxb1iZol7G59EPyUIiSbjgTQz62ZmAbwv\nzRdClHnEzC4ys3bhCMLMugIvAT8DMoEPgHfNLN7MDgNuBvo751KBYcBS/60PAA8459KAzsCrtZzi\nLSDDzI4N2nYZO2sPpwDHA12BpsAFwPp9/DgtgQygDV7Cfdz/DAAP+cfvBAzGS85XAZjZ+cDv/W1p\nwIgaMVwADAc6Aj2BK/cxPoliShASbbbXIk4G5gAra+w/H/gauBNYYmZTzax/jTKF/i//7Y9u7J0L\ngfedcx875yqAvwNJwNFAFZAA5JlZnHNuqXNukf++CqCLmWU457Y458aHOrhzbhvwmv85MbNcoC8w\nOug4qcDhgDnn5jjnVtcR74M1Pu8fa+y/0zlX5pz7EngfuCAoAd/hnCt2zi0F/sHOms21wL3OuYnO\ns9A5tyz4nM65Vc65DcC7QO864pODlBKERJvngYvxfpHWvL2Ec26jc+5251x3IBuYCrxtZhZULMM5\nlx70mLOXMbQGdnwZOueq8do92jjnFuLVLH4PFJjZy2bW2i96Dd6v/rlmNtHMzqjjHM8C5/u3Zi4D\nxjrnCvzzfQY8DDzin+NxM0ur41g/qfF57wzat9E5tzXo9TL/82UAccGf03/exn/eFlhE7dYEPS8B\nUuooKwcpJQiJKv6v1CV4DbZv7qFsId6v+9ZA8wYMYxVeIzgAfvJpi1+bcc6Nds4d65dxwN/87Quc\nc6OALH/b62bWpJZzfANsAEYCl1Kjcdo596Bzri+Qh5d0btvHz9KsRgzt/M9XiFdTaV9j3/Ya2wq8\n22TSiClBSDS6Bjipxi9fAMzsb2bWw8xizSwVuBFY6Jzb13v0sWaWGPSIw2s7ON3MhvivfwGUAd+Z\n2WFmdpKZJQClwDag2o/tUjPL9GscRf7xq0Od1Hnz7D+Hl0jS8W7TbP+M/c1soH/urf55Qh6nnv7g\nt58cB5wBvOacq/I/5z1mlmpm7YGfs7PN50ngVjPr6zead/HLSCOiBCFRxzm3yDk3qZbdyXiNvEXA\nYrxfwCNqlCmqMS7g53Wc7t94X/LbH0875+bh/ap/CO+X9pnAmc65crz2h7/629fg1Rbu8I81HJhl\nZlvwGqwv8tsbavMc3q/2V5xzZUHb04AngI14t33WA/fVcZyHa3zeyUH71vjHWQW8CNzgnJvr77sF\nLwEtxqvRjAb+C+Ccew24x99WDLxNw9bS5CBgWjBI5NBkZicALzjnciIdixycVIMQEZGQlCBERCQk\n3WISEZGQVIMQEZGQYiMdQEPJyMhwHTp0iHQYIiIHlcmTJxc65zJD7TtkEkSHDh2YNKm2npEiIhKK\nmS2rbZ9uMYmISEhKECIiEpIShIiIhKQEISIiISlBiIhISEoQIiISkhKEiIiE1OgTxObSCu7/ZD7T\nVhTtubCISCPS6BOEc3D/JwuYuHRDpEMREYkqjT5BpCXGEh+IYd2Wsj0XFhFpRBp9gjAzMlMTWFes\nBCEiEqzRJwiADCUIEZHdKEEAmSkJFG4pj3QYIiJRRQkCdItJRCQEJQi8BLFhaxlV1VpdT0RkOyUI\nIDMlnmoH67eqFiEisp0SBF4NAtBtJhGRIEoQ7EwQaqgWEdlJCQLITEkEVIMQEQmmBAFkpMYDShAi\nIsGUIIDk+FiaxAeUIEREgihB+DJTEzQfk4hIECUInzdYrjTSYYiIRA0lCF9mqqbbEBEJpgThy0zR\ndBsiIsGUIHwZKQls2lZBWWVVpEMREYkKShA+DZYTEdmVEoRP022IiOxKCcK3owahBCEiAihB7LCj\nBqGxECIigBLEDi2a6BaTiEgwJQhffGwM6clxShAiIj4liCAaCyEispMSRBBvNLUShIgIhDlBmNlw\nM5tnZgvN7PYQ+483sx/MrNLMzqux714zm2Vmc8zsQTOzcMYKmrBPRCRY2BKEmQWAR4BTgTxglJnl\n1Si2HLgSGF3jvUcDxwA9gR5Af2BwuGLdTreYRER2CmcNYgCw0Dm32DlXDrwMjAwu4Jxb6pybDlTX\neK8DEoF4IAGIA9aGMVYAMlITKCmvYmtZZbhPJSIS9cKZINoAK4Je5/vb9sg5Nw74HFjtP8Y65+bU\nLGdm15vZJDObtG7duv0OODNFXV1FRLaLykZqM+sCdANy8JLKSWZ2XM1yzrnHnXP9nHP9MjMz9/u8\nGiwnIrJTOBPESqBt0Oscf1t9nA2Md85tcc5tAT4EBjVwfLvRdBsiIjuFM0FMBHLNrKOZxQMXAWPq\n+d7lwGAzizWzOLwG6t1uMTU01SBERHYKW4JwzlUCNwNj8b7cX3XOzTKzu81sBICZ9TezfOB84DEz\nm+W//XVgETADmAZMc869G65Yt2uWHE8gxtQGISICxIbz4M65D4APamy7K+j5RLxbTzXfVwX8KJyx\nhRKIMZo3iVeCEBEhShupI0ljIUREPEoQNWi6DRERjxJEDZmpqkGIiIASxG62z8fknIt0KCIiEaUE\nUUNGSgIVVY5N2yoiHYqISEQpQdSwYyyEbjOJSCOnBFHDjvmY1FAtIo2cEkQNqkGIiHiUIGpQghAR\n8ShB1JCWGEt8IEa3mESk0VOCqMHMNBZCRAQliJAylCBERJQgQslMSaBwS3mkwxARiSgliBB0i0lE\nRAkipMzUBDZsLaOqWtNtiEjjpQQRQmZKPNUO1m9VLUJEGi8liBA0FkJERAkipO0JQg3VItKYKUGE\nkJmSCKgGISKNmxJECBmp8YAShIg0bkoQISTHx9IkPqAEISKNmhJELbavLCci0lgpQdQiMzWBQtUg\nRKQRU4KohWoQItLYKUFsXgUPHgnTX91lc2aKptsQkcZNCaJJJmzKhzUzdtmckZLApm0VlFVWRSgw\nEZHIUoIIxEHGYVAwe5fNGiwnIo2dEgRAdh6srSVB6DaTiDRSShAAWXlQvAq2bdyxSfMxiUhjpwQB\nkN3d+xtUi9iRINSTSUQaKSUI8GoQsEs7RIsmqkGISOOmBAGQ1hoSm8LaWTs2xcfGkJ4cpwQhIo1W\nWBOEmQ03s3lmttDMbg+x/3gz+8HMKs3svBr72pnZR2Y2x8xmm1mHMAYKWd1378mksRAi0oiFLUGY\nWQB4BDgVyANGmVlejWLLgSuB0SEO8Rxwn3OuGzAAKAhXrIDXk6lgDridy4xmpiZQqDYIEWmkwlmD\nGAAsdM4tds6VAy8DI4MLOOeWOuemA9XB2/1EEuuc+9gvt8U5VxLGWL12iLLNsGnFjk2abkNEGrNw\nJog2wIqg1/n+tvroChSZ2ZtmNsXM7vNrJLsws+vNbJKZTVq3bt3+RRuqJ5NuMYlIIxatjdSxwHHA\nrUB/oBPerahdOOced871c871y8zM3L8zZnXz/hbsbKjOSE2gpLyKrWWV+3dsEZGDUDgTxEqgbdDr\nHH9bfeQDU/3bU5XA20CfBo5vV4lNoWnb3WoQoK6uItI4hTNBTARyzayjmcUDFwFj9uK96Wa2vVpw\nEjC7jvINIytvl55MO+djUoIQkcYnbAnC/+V/MzAWmAO86pybZWZ3m9kIADPrb2b5wPnAY2Y2y39v\nFd7tpU/NbAZgwBPhinWH7DwonA+V3gR9mm5DRBqz2HAe3Dn3AfBBjW13BT2fiHfrKdR7PwZ6hjO+\n3WR1h+pKWL8Asrtrug0RadSitZE6MrL9YRp+O0Sz5HgCMaYahIg0SkoQwVrkQkzsjp5MgRijeZN4\nJQgRaZSUIILFxkNG1916MqmRWkQaIyWImkL0ZFINQkQaIyWImrLzvOk2SjcBShAi0ngpQdSU5U+5\nUTAH2DkfkwuaxE9EpDFQgqhpR08mr6E6IyWBiirHpm0VEQxKROTAU4KoqWlbSEjb0Q6hwXIi0lgp\nQdRk5k3c5/dk6pqdAsDXCwojGZWIyAGnBBFKVp43FsI5Dm+ZRp926Tw3binV1WqHEJHGQwkilOzu\nXi+mzasAuPKYjixdX8KX8/dzzQkRkYOIEkQoWX5Dtd8OcWqPlmSlJvD0d0sjF5OIyAGmBBFKjZ5M\ncYEYLj2qPV/NX8eidVsiGJiIyIGjBBFKUjNIbb3LiOpRA9oRH4jhOdUiRKSRUIKoTXbernMypSZw\nRs9WvD45n+JSjYkQkUNfvRKEmXU2swT/+Qlm9hMzSw9vaBGWlQeF86Bq53rUVxzdga3lVbw+OT+C\ngYmIHBj1rUG8AVSZWRfgcby1pkeHLapokN0dqsphw6Idm3q1TefIduk8N26ZuryKyCGvvgmi2l9C\n9GzgIefcbUCr8IUVBbJ2baje7sqjO7CkcCtfLlCXVxE5tNU3QVSY2SjgCuA9f1tceEKKEpmHgQV2\naagGOLVHKzJTE3jm26WRiUtE5ACpb4K4ChgE3OOcW2JmHYHnwxdWFIhNgBZddmmoBoiPjeHSge35\ncv46FqvLq4gcwuqVIJxzs51zP3HOvWRmzYBU59zfwhxb5GXn7Vh+NNiogW2JCxjPjVsWgaBERA6M\n+vZi+sLM0sysOfAD8ISZ/TO8oUWBrO6wcSmU7VpTyEpN5IyerdXlVUQOafW9xdTUObcZOAd4zjk3\nEBgavrCixPYR1evm7rbriqM7sKWskjfU5VVEDlH1TRCxZtYKuICdjdSHvlp6MgH0bptO77bq8ioi\nh676Joi7gbHAIufcRDPrBCwIX1hRIr09xDXZrSfTdlce3YHFhVv5Sl1eReQQVN9G6teccz2dczf6\nrxc7584Nb2hRICbGXzxo9xoEwGlH+F1eNT+TiByC6ttInWNmb5lZgf94w8xywh1cVMjO82oQbvfb\nSPGxMVx+VHu+mLeOF8arR5OIHFrqe4vpaWAM0Np/vOtvO/RldYeS9bClIOTuG07ozJDDs7jznZm8\nM3XlAQ5ORCR86psgMp1zTzvnKv3HM0BmGOOKHm37e38nPBZyd1wghkcu6cPAjs35+avT+GT22gMY\nnIhI+NQ3Qaw3s0vNLOA/LgXWhzOwqNGmL/S+FL65H1ZODlkkMS7Ak1f0p0frNG4a/QPfLSo8wEGK\niDS8+iaIq/G6uK4BVgPnAVeGKaboM+weSG0Jb98EFaUhi6QkxPLMVQPo0CKZ656dxJTlGw9wkCIi\nDau+vZiWOedGOOcynXNZzrmzgEO/F9N2Sekw4kFvwNwXf6m1WLMm8bxwzUBapCRw5dMTmbem+AAG\nKSLSsPZnRbmfN1gUB4MuQ6HP5fDdg7BiYq3FstISefHagSTGxXDpU9+ztHDrAQxSRKTh7E+CsD0W\nMBtuZvPMbKGZ3R5i//Fm9oOZVZrZeSH2p5lZvpk9vB9xNpxT7vHWqn7nJqjYVmuxts2TeeGagVRW\nVXPJk9+zelPtZUVEotX+JIg655cwswDwCHAqkAeMMrO8GsWW47Vl1LY63R+Br/YjxoaVmAYjH4bC\n+fD5PXUWzc1O5bmrB7JpWwVXPzOJ8srqAxSkiEjDqDNBmFmxmW0O8SjGGw9RlwHAQn/UdTnwMjAy\nuIBzbqlzbjqw27enmfUFsoGP9uYDhV3nE6Hf1fDdw7D8+zqLHpHTlH9d2Js5qzfz2JeL6iwrIhJt\n6kwQzrlU51xaiEeqcy52D8duA6wIep3vb9sjM4sB/gHcuody15vZJDObtG7dAZwP6eS7Ib0tvH0j\nlJfUXTQvm9N7tuKhzxaysECN1iJy8NifW0zhdBPwgXOuzrm0nXOPO+f6Oef6ZWYewHF7Cakw8hHY\nsAg+++Mei//+zO4kxQe4/Y0ZmvlVRA4a4UwQK4G2Qa9z/G31MQi42cyWAn8HLjezvzZsePup4/HQ\n/zoY/29Y+m2dRTNTE7jzjDwmLdvIi99rziYROTiEM0FMBHLNrKOZxQMX4c3ntEfOuUucc+2ccx3w\nbjM955zbrRdUxA39PTRr791qKtlQZ9Fz+7ThuNwM/vrhXFYVqVeTiES/sCUI51wlcDPeOhJzgFed\nc7PM7G4zGwFgZv3NLB84H3jMzELPqx2tElLg7MeheDW8ejlUltda1Mz489lHUO3gzrdn4kLMDisi\nEk3sUPmi6tevn5s0aVJkTj7tFXjreuh9idc2YbUPEXny68X86f05PDjqSEb02lNHMBGR8DKzyc65\nfqH2RWsj9cGl14Uw+HaY+iJ8/Y86i151TEd65TTlD2NmsXFr7TUOEZFIU4JoKCfcDkdc4PVqmvlG\nrcUCMcZfz+3Jpm0V/PH90EuZiohEAyWIhmLmjbJuNwjeuhFWTKi1aLdWadwwuDNv/bCCaV+9A0Ur\nai0rIhIpShANKTYBLnwRmraBl0bBhiWhy1VV8JPMyXyW/Gt6fXY5le/85MDGKSJSD0oQDa1JC7j4\nNXBVMPoC2Ba0LkTFNpjwBDzUh/gxN5KdlshXVUdgS77ctZyISBRQggiHjC5eTWLDEq/769b1XuP1\n/UfAB7dCSksY9TLJP/mexT1/RoAqvnn/hUhHLSKyC3VzDaepL8HbN4AFvBpF5yFw3M+h/TE7usJW\nVVVT9Oeu/FDRjsTLXuG43Max1LeIRIe6urnuacI92R+9R8G2DbB6Ghx1E7TuvVuRQCCG1CPPYfCk\n/3Lci9/y4k1D6ZKVEoFgRUR2pVtM4Tbox3DO4yGTw3bxPUYSTwUn2lSufXaixkeISFRQgogG7Y6C\nJpn8sv18VhWVcuOLk7XAkIhEnBJENIgJwOFn0HzlF9x7Vi7jF2/gd2P2PF+Tc45NJRUHKEgRaWzU\nBhEt8kbA5Kc5K3Ue80/ozKNfLCI3K5Wrj+24S7HqaseUFUV8MGM1H85YzapNpdx8Yhd+cUpXrI45\noERE9pYSRLTocBwkpsPsMdx61n9YWLCFP70/m46ZTRicm8mUFRt5f/oaPpy5mtWbSokPxHBcbgZH\ntmvGw58vZENJOX8c2YNAjJ8kthbCqqle76ncU+qcQFBEJBQliGgRiIPDToO57xNTXcG/LuzN+f8Z\nxy2jp5CSEMuazV5SOL5rJr8cfhhDumWTlhiHc47D3ytn4rh3eT//v5yeUUBgzTTYFDR9R79r4LT7\nvFtZIiL1pAQRTfJGwLTRsOQrmuQO5ckr+nHts5No0yyJ2484nCHdskhNjNtZft087OWLuWX9QogH\n1sOaTa1pkXsUcQOuh9ZHwsKP4dsHvJHaZz8GsfER+3gicnBRgogmnU6E+BSYMwZyh9I6PYkPfnpc\n6LLV1TDmJ1CyHk6+G1r1ZkxBJv83Zind16Xx9On9aZGSAB2Pg+QM+PhOL0lc+IK30JGIyB6oF1M0\niUuErsNg7vtQXVV32akvwIrxcPIf4ZifQqfBjDgqj8cv68u8NcWc/9g4Vm5f2vSYn8DIR2HJV/Dc\nCNatXckns9fyz4/n8/y4pVRXHxqj6UWkYWmqjWgz6y147Uq44j3v138oWwvh4X6Q2Q2u+mC3BuiJ\nSzdw9TMTaRIfy6OX9mFrWSXT8zfB3A+4du3drKjO5LLyO1hjLXAOTjwsk/svOpKmSXGhzycihyyt\nKHcw6XIyxCZ6t5lq89FvoawYzvhXyN5J/Ts059UfDaLKOc559Dsue2oC942dx+tbe/JYu7/TLm4z\nX7T4C7Nv6cgfR3bn6wWFjHj4G+atKQ7jBxORg41qENHo5Utg5WT4v9kQUyOHL/kKnj0Tjv05DP1d\nnYdZWbSNT2avpUtWCj1aN6Vpsl9DWD0dXjjHu411yetMrurIDS/8wJbSSu49rydnaq1skUZDNYiD\nTbcRULzaSxLBKsvgvZ9Dens4/rY9HqZNehJXHN2BY7pk7EwOAK16wtVjISEVnj+bvkkFvH/LsXRv\nncYtL03hnvdnU1mlqT5EGjsliGjUdRjExMGcd3bd/u0DsH4BnP5PiE/ev3O06AxXvuetgjf6fLJi\nNjP6uqO4fFB7nvh6CZf/dwLrt5Tt3zlE5KCmBBGNktKh02CYPQa23wJcvwi++jt0PxtyhzbMedLb\nwcUvw5Z18NJFxFeXcvfIHvz9/F5MWraREQ9/y/T8ooY5l4gcdJQgolW3EVC0DNZM95LE+7/wfu0P\n+0vDnqdNXzjvKVj5A7x1PVRXc17fHN644WgAzvvPOF6btGIPBxGRQ5ESRLQ6/HSwGK8WMfMNWPw5\nnHQnpLUKz7mG/RnmvAuf3AXAETlNefeWY+nfoRm3vT6dO9+eqSnIRRoZjaSOVk0yvKVJZ74B5Vu9\naTP6XxO+8x11I2xcAt89BM06Qv9raN4knmevGsC9Y+fx+FeLmbN6M49e2oes1MTwxSEiUUM1iGiW\nN9L70i4phDPuD+9ke2Yw/K/QdTh8cCss+BiA2EAMvz6tGw+NOpJZqzZzxoPfMHnZxvDFISJRQwki\nmh1+htebaeANdS5Z2mBiAnDuU5DdwxvNvWbGjl1n9mrNWz8+msS4ABc9Po4Xv1+2xwWNROTgpoFy\n0W7jMmiac2Cn6t68Gp4c4jWOX/cppO0cOLeppIKfvjKFL+at48J+bfndiDyS43WnUuRgpYFyB7Nm\n7Q/8Og5preDiV73pPF67yps51tc0OY6nrujPzSd24ZVJK+j/p0/45evT+H7xetUoRA4xqkFI7aa+\nBG/fAKfeBwOv3233D8s38vKE5bw/fTVby6to1zyZc/vkcE6fNrRtvp8D+UTkgKirBqEEIbVzDl44\nF5aPhx+P9wbWhVBSXsnYWWt4fXI+3y1aj3MwqFMLzu2bw+CumWSmJhzgwEWkviKWIMxsOPAAEACe\ndM79tcb+44H7gZ7ARc651/3tvYF/A2lAFXCPc+6Vus6lBBEmRcvh0UHQdgBc+uYe17bO31jCWz+s\n5PUf8lm2vgSA3KwUju7cgkGdWzCwYwuaNdGqdiLRIiIJwswCwHzgZCAfmAiMcs7NDirTAS8J3AqM\nCUoQXQHnnFtgZq2ByUA351yt8z4oQYTRhCe8rq9n/Rt6X1yvtzjnmLFyE98tWs93i9YzcckGtlVU\nYQbdWqYxqHMLju7cgqM6taBJghq5RSKlrgQRzn+ZA4CFzrnFfhAvAyOBHQnCObfU37fLEF3n3Pyg\n56vMrADIBDQxUCT0u8YbsPe/O6DzEEjN3uNbzIyeOen0zEnnhsGdKa+sZnp+EeP8hPH8+GU89c0S\n4gJGn3bNOL5rJsflZtCjdVNiYuqupYjIgRHOBNEGCJ7EJx8YuLcHMbMBQDywqIHikr0VEwMjHoJ/\nHwMf3gYXPLfXh4iPjaFfh+b069CcW4bkUlpRxeRlG/l6QSFfL1jHfWPncd/YeTRLjuOYLhkcn5vJ\nsbkZtE5PCsMHEpH6iOq6vZm1Ap4HrnDO7TYRkJldD1wP0K5d6AZUaSAZuXDC7fDpH7z5ofJG7N37\nK0q9UeHrF8GGRSSuX8QxlaUc0/MCbh92EoUlFXy7sJCv5nsJ473pqwHomNFkl9tRGSlq8BY5UMKZ\nIFYCbYNe5/jb6sXM0oD3gd8458aHKuOcexx4HLw2iH0PVerl6Fu8NbPf/4W3XnZSs9rL5k+C6a/A\nunmwYTFsygeC/hMltwBX7ZVp3pmMAdcxsvfFjOzdBucc89YW8+3C9YxbVMi7U1cx+vvlABzeMtVP\nGBkclp1K0+Q4UhNidVtKJAzC2Ugdi9dIPQQvMUwELnbOzQpR9hngvaBG6njgQ+Bd59z99TmfGqkP\nkNXT4fEToNcoOOuRXfdVVcDsd2D8v2HlJIhLhqxu0Lyzt0BR887QohM07+Qll8oyr/yExyF/IsQ1\ngV4XQv/rIDtvx2Erq6p3NHiPW7SeiUs3UBY0s2yMQVpSHOlJcTRNiqNpcjzNkuM4r28Ox+VmHqAL\nI3JwimQ319PwurEGgP865+4xs7uBSc65MWbWH3gLaAaUAmucc93N7FLgaSA4mVzpnJta27mUIA6g\nT++Gr//hdXvtMgS2FsLkp2HiU95Sqc07efNH9b7YW9a0PlZNgQlPwszXobIU2h8Lx/4Mck/erWhZ\nZRVTlhexYkMJm7ZVsGlbBUUl/l//9cqNJRRuKWfUgHb85vRupKinlEhIGignDauiFB47zvvbaTBM\nfxWqyqDTid604V1O9hq290XJBpjyPEx80huDcczPYMhdez3dSGlFFf/6eD6Pf72Y1k2TuPe8nhzT\nJWPfYhI5hClBSMNb/j38dxjEJkKvi7waQ9bhDXf8ynL48JdezaTzEG/Vu7raPGoxedkGbnttOosL\nt3LpUe2449RuGnchEkQJQsJjzUxvptfk5uE7x6Sn4YPbvBltR73ktWnspdKKKv4+dh5PfbuEnGZJ\n3HtuLwZ1bhGGYEUOPprNVcKjZY/wJgeAflfBle9DRQk8OdTrYruXEuMC/PaMPF790SACZox6Yjy/\ne2cm28qrwhCwyKFDCUKiX7uBcP0XkHkYvHoZfHbPLlOQ7+AcFK+FxV96DeaLPvPaSXz9OzTnw58e\nz1XHdODZccs446GvmbVq06rC2+8AABV6SURBVAH7GCIHG91ikoNHRSl88AuY8oK3NOpRN8K6+bBu\nDhTM9f5uq7Ecamyit7Z3lyHQ+STIPBzM+GZBIT9/dSpFJRX8cvhhXH1MR42lkEZJbRBy6HDO6+H0\nv9uhutLbltgUMrt5jeSZ/qNFZy9pLPoMFn0Khf70XqmtvUTR5SQ25AzlV2MW8PHstRyXm8E/LuhF\nVmpiraeuqKrmk9lreWniCkorqrjj1MM5st3eN5yLRBMlCDn0FMyF4lVeYkhtucdpyCla4SeLz2Dx\nF1BaBEnNcL0vZUz8qfzqs800iY/lvvN7ctLhu05GuHx9CS9PXM6rk/Ip3FJG66aJVDlHQXEZVwzq\nwK3DDtM4CzloKUGIBKuugmXfeTWROe+Cq2Zr+yH8Zf1xvFjYmcsHdeTWYYfxzYJCRk9YztcLCokx\nOOnwbC4e2JbBXbMoKa/k72Pn8dz4ZbRMS+QPI7pzSveWkf5kIntNCUKkNptXeV1pJz8DWwtYn5DD\nI1tO5C03mI3VybRumsiF/dtxQf8cWjXdfWbZH5Zv5I43ZjBvbTHDu7fk9yO607Jp7bepRKKNEoTI\nnlSWw5wx3rxQK76nJJDGlNPGcNSRvQnsofG6oqqaJ75ezAOfLCAuEMOvhh/GJQPbq9FbDgpKECJ7\nI38SPHMG5A6FC1+o99uWFm7lN2/P4NuF60mKC9AsOY5mTeJplhxPenIczZvEk54cT/PkOAZ2akG3\nVmlh/BAi9ROpFeVEDk45/WDwbd6khAs+8RJFPXTIaMIL1wzkgxlr+GH5RjaWlFNUUsHGknJWFm1j\nY0k5m7ZVsP03WY82aZzXJ4eRvdtonW6JSqpBiIRSWQb/Ptpr0L5pPMQ1TLtCVbWjcEsZH85YzWuT\n85m1ajPxgRiG5mVxXt8cjs/NJDag8aty4OgWk8i+WPQZPH82nPhbr0bRUMpLIC4JzJi9ajOvT87n\n7akr2bC1nMzUBM4+sg0DOjSne5s0WqYlYnvqwiuyH5QgRPbVq1fA/P/Bj7+HZh32/TjlJTD3fZj2\nEiz+HHL6w8hHIaOLt7uyms/nFfD65Hw+n1tAZbX377JZchx5rdPo3ropea3S6N46jY4ZTVTLkAaj\nBCGyrzathIf7e+tejHpp795bXQ3Lv4OpL3kr55UXQ9N20HUYzHjNWxhpyF3eVOlB611sLatk7prN\nzF61mVmrNjN79Wbmrimm3F9FL8YgOT6WpPgASXEBkuMDJPp/k+ICdM5K4aYTOpOerHYN2TMlCJH9\n8e0D8PFdMOoVOGz4nstvXOYtejTtFdi0HOJTIO8s6D0K2h3tLaZUvAbe/RnM/xDaHgVnPepND1KL\nyqpqFq3byuxVRSwuKGZrBWyrqGRbeRUl5VVsq6ja8Xze2mKaJsVx+6mHc16fHHW3lTopQYjsj8py\n+M+x3i/+H3/vtR+EUl0F4x6Bz++BqnJvhb1eo+Dw0yE+effyzsH0V7yFkSrLYejvYMCPdl+Nr3Qz\nLPkKFn7izSu1tRCG/xX6XhEyjDmrN3Pn2zOZtGwjfds3448je5DXWl1qJTQlCJH9teQrePZMGHw7\nnHjH7vvXzYd3boL8iXDYaXDqvZDetn7H3rwa3v0pLBjr1TBGPgxlm2Hhp94jf4I3MWF8CnQ8HsqK\nYenXcOSlcNrfQyas6mrHGz/k85cP57JpWwVXDOrA/52cS2pi3H5eCDnUKEGINITXr/HmbvrxeGje\nydtWXQXjHvbWqIhPhlPvgyPO2/PkgTU55zVgf3g7lAWtUdGyJ3QZ6k1XnjMAYuO9c37xF/jqPmjV\nCy54Hpq1D3nYopJy7hs7j9ETlpOZksBvTu/GiF6t97pn1PotZSws2MLKom30aptO58yUvft8ErWU\nIEQawubV8HA/aDcILnnNm0L87Ztg5SQ4/Aw4/Z+Qmr3n49R5Dn9uqBZdoPOJkJJVe9l5H8Kb/i2p\nc56sc0DftBVF/PbtmcxYuYms1ARapSeRlZpAdloC2amJZKUlkJWWSGZKAoV+Mli0bgsLC7zHxpKK\nXY7XKaMJJ+dlc3JeNke2a7bH6UgkeilBiDSU7x6Gj34DPS+CWW9BfBM47T7oce7e1xoawobF8Mrl\nsHYmnHAHHH/b7m0YvqpqxxuT85m4dANri8so2FzK2s2lu335b9csOY4uWSl0yUqhc6b3t2XTRCYs\n2cDHs9cyfvF6KqocLZrEc9LhWZycl81xuZkkxQdCHk+ikxKESEOpqoDHjoeC2dDtTK/WUNev/AOh\nvATe+z+Y/jLkngJnP7ZXa4WXVlSxrriMguJS1hWX0Sw5ni5ZKbRISdi14KZ877ZWn8uhTV82l1bw\n5bx1fDJnLZ/NLaC4tJIm8QF+NLgz1x3XSYniIKEEIdKQNi7zfrl3OiEytYZQnINJT3ltGOAliKTm\nkNTMfx70t+NgaNNn746/ehqMvhCKV0MgHob9Gfpfu+PzV1RVM2HJBp4ft4z/zVpDy7REbht2GGcf\n2UbdbKOcEoRIY7FqCsx6G7ZtgJINsK0o6PkGr/utxcBxv4DBv4JAPXo1zf8IXrvSSy7nPOaNC1nw\nkXdb7cwHICF1l+ITlmzgT+/PZnr+Jnq0SeO3p+dxVKcW4fm8st+UIETEq2Vs2wgf3wlTXoDWR8I5\nT0BGbu3vmfgkfHAbtDzCGyiY1sobIf7tv+CzP0HzznDBc5Cdt8vbqqsdY6at4t7/zWXVplJOzsvm\njlMPp9Oh2vtpyzpvbfTYg2/0uhKEiOxq9hh49ydQUQrD7oF+V+96u6y6Gj65C757CLoOh3OfgoQa\nX+5LvobXr/bGZZzxT+h98W6nKa2o4qlvlvDo5wspq6xmeI+WZKUmkpoYu+ORkhDn/U2MJTcr5eAb\nq7FqCjx9OmR3h8ve2v06RTklCBHZ3ebV8M6PvdHZucO8AXopWVCxDd683lthb8D13qjtmFoanIvX\neONDln0DR17m9eiKS/JqK2XFsGUtFK9h87p8vp46k4I1a6CqlJiqMhKoIMEqiKfCe04F62OzaXfc\nJfQZPBICB8FyNUUr4Mkh3uctKYQOx8LFrzXY9PAHghKEiITmnLfM6sd3eSO1h/0ZJj7hrao37M9w\n1I17boivqvSmF/nmn9C0rdeuUbwGKkp2L2sxEJuEi03ABRKoDsRTFZNAZUw8FS6W+A3zSGYbWwLp\nJPQ8i7ie50H7o2tPUJFUugmeGuaNXblmLKyaCm/fAF1PhQufr1/7ThRQghCRuhXMhTevhTUzIDYJ\nzn3C68a7N+aP9ZJNQhqktvIGDaa0DPrb0rtPX0fCKSvdyvtvPE/c3LcYGphCEmXee7ufBd3P8aZJ\nr2WcR7CS8kpWFZWyetM2VhVtY2VRKeu3lBEfG0NinDfrbWJcDElxAZIC1XRb8TKtypfRfMQ90CRj\nz5+1qgJePA+WfgOXvuH1aAOY8AR8cKsX67lPRmdiq0EJQkT2rLLc6yrbbhC07h3RUCYs2cCvXxlP\nt+LvuCV7BrmbxmFVZV7yadMX2g6AnP5Ute7LnKIA4xevZ/KyjSzfUMKqom27Df4zg/SkOCqrHKWV\nVVRUOcBxSswk7ogdTceYtVQ5Y2tsOtUjHyG95+m1B+ccjLnZa+gf+Sgcecmu+7+5Hz75nXfL7cwH\n65XQIkkJQkQOOsWlFdz97mxem5xP/1axPNR3LVkbJlO2dAIJG+YSg7c+xqLqVkxxuSxN6k5hxgAC\nGV1o3SyZ1umJtG6aROv0JLLTEomP3flFXbFyGjb218Qu/4aKZrmsPfouPlsZYOCUX3GYrWBuu4vI\nveRfBBJCzML71X1eD67Bv4ITfx06+M/uga/uhYE3wvC/RM94mRCUIETkoPW/mWv49Vsz2FJWSXwg\nhi1llSRTyvD0lZySnk8v5pO1aTqBbeu9N6S18W75dDrBGxQYPD9W8Vr4/E/ww/PeuI4Tfw19r9rR\nIL5kdSFzX/wFp255m+WBdpSNeIzcXkfvfP/0V+HN67ypVs7+T+1f/M7B2F/D+Ee96U9O+m0YrkzD\niFiCMLPhwANAAHjSOffXGvuPB+4HegIXOedeD9p3BbD9qv7JOfdsXedSghA5dBUUl/Kvj+cTY8bA\nTi04qmNzstKCego5541uX/IlLP7S+7tto7cvs5u3ImBCmveFXVkGA38Ex9/qJYkanHOM+/g1un53\nG6luC1/m3MBRl95F2tqJ3hrlbQfCpW/uecyDc15X4h+eg6F/gGN/1oBXpOFEJEGYWQCYD5wM5AMT\ngVHOudlBZToAacCtwJjtCcLMmgOTgH6AAyYDfZ1zG2s7nxKEiOxQXQ1rpvsJ4wtYNg4qt8Fhp8Mp\nf6xz9b7tNq9fQ/5z15G36SsmWXd6BFawLb4FHw16ntjkZt4Sr/4yr0nxAbaUVlLgz2lVsLmMguIy\n1m3eyrXr/saQyq8Y12QomwbeyqD+/Wia1DA9nErKKxn9/XJKyyu5eUjXfTpGpBLEIOD3zrlh/us7\nAJxzfwlR9hngvaAEMQo4wTn3I//1Y8AXzrlaFwVWghCRWlWWeWMy0tvt3fucI//T/5Dxze8odgmc\nXX43+W7PkzMmxsWQlZpIVmoCLVMDnFP0DMcUvo65Kl6tPonJ7a/lmCN7cHJe9t4li+pqKFpGycoZ\nTJ88jqJl0+hYtYyyJq054pcf7fU6H1B3ggjnSJQ2wIqg1/nAwP14b5uahczseuB6gHbt9vI/vIg0\nHrEJe58cAMzIGXojDBhJXLXjf0nZlJSHXgu8SXyArDRvbY3UhNgaX9YDqd50J+s//BOj5r3Meflf\n8vTSYQx5cwRHdOnAqT1a0ad9Oh0zUnaurVFZ7k3jvnKyN1liwWxcwRysooRk4ChgXaAlcTndST/s\n+LA0hB8EQxVr55x7HHgcvBpEhMMRkUNVWmsCQAqQkrBvX5sxTVuTedGjsOFWYj7/CzfMeI2rAp/z\n7MoR/GHeEDKtiP5xSzihyQp62kJaly4gUF0OQHVSC5bHdeTr8sHMrMyheYdenDn0JPI67va7uUGF\nM0GsBIIX5c3xt9X3vSfUeO8XDRKViEgkNe+EnfsEHPszEj+7hx/NG831SS9jzuu2W1qawIzqTnxQ\ndTLTqjsz23JZVZlBZZXjzF6t+fGJXeianbqHkzSMcCaIiUCumXXE+8K/CNh9Nq/QxgJ/NrPtXQxO\nAUKsFC8icpDK7g6jRsOKidicd6BFLuT0IzHjMPpagBbrt9Jq1WbartpEeWU1lw/qQMeMJgc0xHB3\ncz0NrxtrAPivc+4eM7sbmOScG2Nm/YG3gGZAKbDGOdfdf+/VwPZRKPc4556u61xqpBYR2XsaKCci\nIiHVlSCie5IQERGJGCUIEREJSQlCRERCUoIQEZGQlCBERCQkJQgREQlJCUJEREI6ZMZBmNk6YNl+\nHCIDKGygcBqaYts3im3fKLZ9c7DG1t45lxlqxyGTIPaXmU2qbbBIpCm2faPY9o1i2zeHYmy6xSQi\nIiEpQYiISEhKEDs9HukA6qDY9o1i2zeKbd8ccrGpDUJEREJSDUJEREJSghARkZAafYIws+FmNs/M\nFprZ7ZGOJ5iZLTWzGWY21cwivtiFmf3XzArMbGbQtuZm9rGZLfD/NqvrGAcwrt+b2Ur/2k31F686\n4MysrZl9bmazzWyWmf3U3x4N16222CJ+7cws0cwmmNk0P7Y/+Ns7mtn3/r/XV8wsPopie8bMlgRd\nt94HOragGANmNsXM3vNf79t1c8412gfeSneLgE5APDANyIt0XEHxLQUyIh1HUDzHA32AmUHb7gVu\n95/fDvwtSuL6PXBrFFyzVkAf/3kqMB/Ii5LrVltsEb92gAEp/vM44HvgKOBV4CJ/+3+AG6MotmeA\n8yL9/5wf18+B0cB7/ut9um6NvQYxAFjonFvsnCsHXgZGRjimqOWc+wrYUGPzSOBZ//mzwFkHNChq\njSsqOOdWO+d+8J8XA3OANkTHdasttohzni3+yzj/4YCTgNf97ZG6brXFFhXMLAc4HXjSf23s43Vr\n7AmiDbAi6HU+UfIPxOeAj8xsspldH+lgapHtnFvtP18DZEcymBpuNrPp/i2oA34LpyYz6wAcifeL\nM6quW43YIAqunX+bZCpQAHyMV9svcs5V+kUi9u+1ZmzOue3X7R7/uv3LzBIiERtwP/BLoNp/3YJ9\nvG6NPUFEu2Odc32AU4Efm9nxkQ6oLs6rv0bLL6l/A52B3sBq4B+RDMbMUoA3gJ855zYH74v0dQsR\nW1RcO+dclXOuN5CDV9s/PBJxhFIzNjPrAdyBF2N/oDnwqwMdl5mdARQ45yY3xPEae4JYCbQNep3j\nb4sKzrmV/t8C4C28fyTRZq2ZtQLw/xZEOB4AnHNr/X/E1cATRPDamVkc3hfwi865N/3NUXHdQsUW\nTdfOj6cI+BwYBKSbWay/K+L/XoNiG+7fsnPOuTLgaSJz3Y4BRpjZUrxb5icBD7CP162xJ4iJQK7f\nwh8PXASMiXBMAJhZEzNL3f4cOAWYWfe7ImIMcIX//ArgnQjGssP2L1/f2UTo2vn3f58C5jjn/hm0\nK+LXrbbYouHamVmmmaX7z5OAk/HaSD4HzvOLReq6hYptblDCN7x7/Af8ujnn7nDO5TjnOuB9n33m\nnLuEfb1ukW5tj/QDOA2v98Yi4DeRjicork54vaqmAbOiITbgJbxbDhV49zGvwbu/+SmwAPgEaB4l\ncT0PzACm430Zt4rQNTsW7/bRdGCq/zgtSq5bbbFF/NoBPYEpfgwzgbv87Z2ACcBC4DUgIYpi+8y/\nbjOBF/B7OkXqAZzAzl5M+3TdNNWGiIiE1NhvMYmISC2UIEREJCQlCBERCUkJQkREQlKCEBGRkJQg\nRPaCmVUFzdY51RpwBmAz6xA8I61IpMXuuYiIBNnmvCkWRA55qkGINADz1u6417z1OyaYWRd/ewcz\n+8yfwO1TM2vnb882s7f8NQWmmdnR/qECZvaEv87AR/5IXZGIUIIQ2TtJNW4xXRi0b5Nz7gjgYbwZ\nNQEeAp51zvUEXgQe9Lc/CHzpnOuFt5bFLH97LvCIc647UAScG+bPI1IrjaQW2QtmtsU5lxJi+1Lg\nJOfcYn8CvDXOuRZmVog3VUWFv321cy7DzNYBOc6b2G37MTrgTR2d67/+FRDnnPtT+D+ZyO5UgxBp\nOK6W53ujLOh5FWonlAhSghBpOBcG/R3nP/8Ob1ZNgEuAr/3nnwI3wo7FZ5oeqCBF6ku/TkT2TpK/\nkth2/3PObe/q2szMpuPVAkb5224Bnjaz24B1wFX+9p8Cj5vZNXg1hRvxZqQViRpqgxBpAH4bRD/n\nXGGkYxFpKLrFJCIiIakGISIiIakGISIiISlBiIhISEoQIiISkhKEiIiEpAQhIiIh/T/ZcxDL3yQG\nKwAAAABJRU5ErkJggg==\n", + "text/plain": [ + "
" + ] + }, + "metadata": { + "tags": [] + } + } + ] + } + ] +} \ No newline at end of file diff --git a/README.md b/README.md new file mode 100644 index 0000000..a99e8bf --- /dev/null +++ b/README.md @@ -0,0 +1 @@ +# AI-Driver-CNN-DeepLearning-PyTorch diff --git a/ai_driver.py b/ai_driver.py new file mode 100644 index 0000000..ad48375 --- /dev/null +++ b/ai_driver.py @@ -0,0 +1,478 @@ +# -*- coding: utf-8 -*- +"""AI_Driver.ipynb + +Automatically generated by Colaboratory. + +Original file is located at + https://colab.research.google.com/drive/1duHGNGqfOBkiaLVnnrFDt-FHLskcOgm5 +""" + +!git clone https://github.com/LiyuanLucasLiu/RAdam.git +!python RAdam/setup.py install + +# imports +import os +import sys + +import torch +import torch.nn as nn +import torch.optim as optim +from torch.utils import data +from torch.utils.data import DataLoader +import torchvision.transforms as transforms +from torch.utils.data.sampler import SubsetRandomSampler +from RAdam import radam + +import cv2 +import matplotlib.image as mpimg +import numpy as np +import csv +import requests +import zipfile +import time +import pandas as pd + +# class for download +class DataDownloader: + + def __init__(self, file_id, destination, download = True): + self.file_id = file_id + self.destination = destination + + if download: + self.download_dataset() + self.extract_zip() + + def download_dataset(self): + def get_confirm_token(response): + for key, value in response.cookies.items(): + if key.startswith('download_warning'): + return value + + return None + + def save_response_content(response): + CHUNK_SIZE = 32768 + + with open(self.destination, "wb") as f: + for chunk in response.iter_content(CHUNK_SIZE): + if chunk: # filter out keep-alive new chunks + f.write(chunk) + + URL = "https://docs.google.com/uc?export=download" + + session = requests.Session() + + response = session.get(URL, params = { 'id' : self.file_id }, stream = True) + token = get_confirm_token(response) + + if token: + params = { 'id' : self.file_id, 'confirm' : token } + response = session.get(URL, params = params, stream = True) + + save_response_content(response) + + def extract_zip(self): + if not os.path.exists('input'): + os.makedirs('input') + + if not os.path.exists('output'): + os.makedirs('output') + + with zipfile.ZipFile(self.destination, 'r') as zip_ref: + zip_ref.extractall('./input/') + +FILE_ID = '1VaYonsJUovGO1AamMQuC2LN47AZ4pkTm' +DST_LOC = './self_driving_dataset.zip' +DATA_CSV_FILE_PATH = './input/driving_log.csv' +DATA_IMAGES_DIR = './input/IMG' +MODEL_SAVE_PATH = './output/ai_driver_cnn.pth' +IMAGE_HEIGHT, IMAGE_WIDTH, IMAGE_CHANNELS = 66, 200, 3 + +SAVE_DIR = './output/' + +data_download = DataDownloader(FILE_ID, DST_LOC, True) + +# Helper defs + +def load_image(data_dir, image_file): + """ + Load RGB images from a file + """ + name = image_file.split('/')[-1] + return mpimg.imread(os.path.join(data_dir, name)) + +def crop(image): + """ + Crop the image (removing the sky at the top and the car front at the bottom) + """ + return image[60:-25, :, :] # remove the sky and the car front + + +def resize(image): + """ + Resize the image to the input shape used by the network model + """ + return cv2.resize(image, (IMAGE_WIDTH, IMAGE_HEIGHT), cv2.INTER_AREA) + + +def rgb2yuv(image): + """ + Convert the image from RGB to YUV (This is what the NVIDIA model does) + """ + return cv2.cvtColor(image, cv2.COLOR_RGB2YUV) + + +def preprocess(image): + """ + Combine all preprocess functions into one + """ + image = crop(image) + image = resize(image) + image = rgb2yuv(image) + return image + + +def choose_image(data_dir, center, left, right, steering_angle): + """ + Randomly choose an image from the center, left or right, and adjust + the steering angle. + """ + choice = np.random.choice(3) + if choice == 0: + return load_image(data_dir, left), steering_angle + 0.2 + elif choice == 1: + return load_image(data_dir, right), steering_angle - 0.2 + return load_image(data_dir, center), steering_angle + + +def random_flip(image, steering_angle): + """ + Randomly flipt the image left <-> right, and adjust the steering angle. + """ + if np.random.rand() < 0.5: + image = cv2.flip(image, 1) + steering_angle = -steering_angle + return image, steering_angle + + +def random_translate(image, steering_angle, range_x, range_y): + """ + Randomly shift the image virtially and horizontally (translation). + """ + trans_x = range_x * (np.random.rand() - 0.5) + trans_y = range_y * (np.random.rand() - 0.5) + steering_angle += trans_x * 0.002 + trans_m = np.float32([[1, 0, trans_x], [0, 1, trans_y]]) + height, width = image.shape[:2] + image = cv2.warpAffine(image, trans_m, (width, height)) + return image, steering_angle + + +def random_shadow(image): + """ + Generates and adds random shadow + """ + print(image.shape) + # (x1, y1) and (x2, y2) forms a line + # xm, ym gives all the locations of the image + x1, y1 = IMAGE_WIDTH * np.random.rand(), 0 + x2, y2 = IMAGE_WIDTH * np.random.rand(), IMAGE_HEIGHT + xm, ym = np.mgrid[0:IMAGE_HEIGHT, 0:IMAGE_WIDTH] + + # mathematically speaking, we want to set 1 below the line and zero otherwise + # Our coordinate is up side down. So, the above the line: + # (ym-y1)/(xm-x1) > (y2-y1)/(x2-x1) + # as x2 == x1 causes zero-division problem, we'll write it in the below form: + # (ym-y1)*(x2-x1) - (y2-y1)*(xm-x1) > 0 + mask = np.zeros_like(image[:, :, 1]) + mask[(ym - y1) * (x2 - x1) - (y2 - y1) * (xm - x1) > 0] = 1 + + # choose which side should have shadow and adjust saturation + cond = mask == np.random.randint(2) + s_ratio = np.random.uniform(low=0.2, high=0.5) + + # adjust Saturation in HLS(Hue, Light, Saturation) + hls = cv2.cvtColor(image, cv2.COLOR_RGB2HLS) + hls[:, :, 1][cond] = hls[:, :, 1][cond] * s_ratio + return cv2.cvtColor(hls, cv2.COLOR_HLS2RGB) + + +def random_brightness(image): + """ + Randomly adjust brightness of the image. + """ + # HSV (Hue, Saturation, Value) is also called HSB ('B' for Brightness). + hsv = cv2.cvtColor(image, cv2.COLOR_RGB2HSV) + ratio = 1.0 + 0.4 * (np.random.rand() - 0.5) + hsv[:,:,2] = hsv[:,:,2] * ratio + return cv2.cvtColor(hsv, cv2.COLOR_HSV2RGB) + + +def augument(data_dir, center, left, right, steering_angle, range_x=100, range_y=10): + """ + Generate an augumented image and adjust steering angle. + (The steering angle is associated with the center image) + """ + image, steering_angle = choose_image(data_dir, center, left, right, steering_angle) + image, steering_angle = random_flip(image, steering_angle) + image, steering_angle = random_translate(image, steering_angle, range_x, range_y) + # image = random_shadow(image) + image = random_brightness(image) + return image, steering_angle + +class CustomDataset(data.Dataset): + + def __init__(self, csv_file_path, image_dir, transform = None): + self.csv_file_path = csv_file_path + self.image_dir = image_dir + self.transform = transform + + self.examples = [] + + with open(self.csv_file_path) as csvfile: + reader = csv.reader(csvfile) + next(reader, None) + for line in reader: + self.examples.append(line) + + + def __getitem__(self, index): + example = self.examples[index] + center, left, right = example[0], example[1], example[2] + steering_angle = float(example[3]) + + if np.random.rand() < 0.6: + image, steering_angle = augument(self.image_dir, center, left, right, steering_angle) + else: + image = load_image(self.image_dir, center) + + image = preprocess(image) + + if self.transform is not None: + image = self.transform(image) + + return image, steering_angle + + def __len__(self): + return len(self.examples) + +batch_size = 128 +num_epochs = 40 + +validation_split = 0.25 +shuffle_dataset = True +random_seed = 42 +num_workers = 4 + +print("Initializing Datasets and Dataloaders...") + +# Creating data indices for training and validation splits: +#Create a dataset object +transformations = transforms.Compose([transforms.Lambda(lambda x: (x / 127.5) - 1.0)]) + +dataset = CustomDataset(DATA_CSV_FILE_PATH, DATA_IMAGES_DIR, transformations) +dataset_size = len(dataset) +# dataset_size = 3000 +indices = list(range(dataset_size)) +split = int(np.floor(validation_split * dataset_size)) + +if shuffle_dataset : + np.random.seed(random_seed) + np.random.shuffle(indices) + +train_indices, val_indices = indices[split:], indices[:split] + +# Creating PT data samplers and loaders: +train_sampler = SubsetRandomSampler(train_indices) +valid_sampler = SubsetRandomSampler(val_indices) + +train_loader = torch.utils.data.DataLoader(dataset, batch_size=batch_size, + sampler=train_sampler, num_workers=num_workers) +validation_loader = torch.utils.data.DataLoader(dataset, batch_size=batch_size, + sampler=valid_sampler, num_workers=num_workers) + +test_loader = torch.utils.data.DataLoader(dataset, batch_size=1, + sampler=valid_sampler, num_workers=num_workers) + + +data_loader_dict = { + 'train': train_loader, + 'val': validation_loader +} + +# Detect if we have a GPU available +device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") + +class DriverNet(nn.Module): + + def __init__(self): + super(DriverNet, self).__init__() + + self.conv_layers = nn.Sequential( + nn.Conv2d(3, 24, kernel_size=5, stride=2), + nn.ELU(), + nn.Conv2d(24, 36, kernel_size=5, stride=2), + nn.ELU(), + nn.Conv2d(36, 48, kernel_size=5, stride=2), + nn.ELU(), + nn.Conv2d(48, 64, kernel_size=3, stride=1), + nn.ELU(), + nn.Conv2d(64, 64, kernel_size=3, stride=1), + nn.ELU(), + nn.Dropout(p=0.5) + ) + self.linear_layers = nn.Sequential( + nn.Linear(in_features=64*1*18, out_features=100), + nn.ELU(), + nn.Dropout(p=0.5), + nn.Linear(in_features=100, out_features=64), + nn.ELU(), + nn.Linear(in_features=64, out_features=10), + nn.ELU(), + nn.Linear(in_features=10, out_features=1) + ) + + + def forward(self, input): + input = input.view(input.size(0), 3, 66, 200) + output = self.conv_layers(input) + output = output.view(output.size(0), -1) + output = self.linear_layers(output) + return output + +model_ft = DriverNet() + +# Send the model to GPU +model_ft = model_ft.to(device) + +# Gather the parameters to be optimized/updated in this run. If we are +# finetuning we will be updating all parameters. However, if we are +# doing feature extract method, we will only update the parameters +# that we have just initialized, i.e. the parameters with requires_grad +# is True. +params_to_update = model_ft.parameters() +print("Params to learn:") + +for name,param in model_ft.named_parameters(): + if param.requires_grad == True: + print("\t",name) + +# Observe that all parameters are being optimized +optimizer_ft = radam.RAdam(params_to_update) +# optimizer_ft = optim.SGD(params_to_update, lr = 0.00008) +# optimizer_ft = optim.Adam(params_to_update, lr = 0.0001) + +def toDevice(data, device): + + return data.float().to(device) + +def train_model(model, dataloaders, criterion, optimizer, num_epochs=25): + since = time.time() + + epoch_number, train_losses, val_losses, = [], [], [] + best_loss = 10000.0 + + for epoch in range(num_epochs): + print('Epoch {}/{}'.format(epoch, num_epochs - 1)) + print('-' * 10) + epoch_number.append(epoch) + # Each epoch has a training and validation phase + # Training loop + train_loss = 0.0 + val_loss = 0.0 + + # Training + model.train() + for inputs, labels in dataloaders['train']: + inputs = toDevice(inputs, device) + labels = toDevice(labels, device) + + optimizer.zero_grad() + # Generate predictions + out = model(inputs) + # Calculate loss + loss = criterion(out, labels.unsqueeze(1)) + # Backpropagation + loss.backward() + # Update model parameters + optimizer.step() + + train_loss += loss.item() + + # Validation + model.eval() + with torch.no_grad(): + for inputs, labels in dataloaders['val']: + inputs = toDevice(inputs, device) + labels = toDevice(labels, device) + # Generate predictions + out = model(inputs) + # Calculate loss + loss = criterion(out, labels.unsqueeze(1)) + + val_loss += loss.item() + + # Average validation loss + train_loss = train_loss / len(dataloaders['train']) + val_loss = val_loss / len(dataloaders['val']) + + train_losses.append(train_loss) + val_losses.append(val_loss) + + print('Train Loss: {:.4f}'.format(train_loss)) + print('Val Loss: {:.4f}'.format(val_loss)) + + # If the validation loss is at a minimum + if val_loss < best_loss: + # Save the model + torch.save(model, MODEL_SAVE_PATH) + best_loss = val_loss + + time_elapsed = time.time() - since + print('Training complete in {:.0f}m {:.0f}s'.format(time_elapsed // 60, time_elapsed % 60)) + print('Lead val Loss: {:4f}'.format(best_loss)) + + #creating dataframe and record all the losses and accuracies at each epoch + log_frame = pd.DataFrame(columns = ["Epoch", "Train Loss", "Test Loss"]) + log_frame["Epoch"] = epoch_number + log_frame["Train Loss"] = train_losses + log_frame["Test Loss"] = val_losses + log_frame.to_csv(os.path.join(SAVE_DIR, "log2.csv"), index = False) + + # load best model weights + # model.load_state_dict(best_model_wts) + return model + +criterion = nn.MSELoss() + +# Train and evaluate +model_ft = train_model(model_ft, data_loader_dict, criterion, optimizer_ft, num_epochs=num_epochs) + +frame = pd.read_csv(os.path.join(SAVE_DIR, "log.csv")) +frame + +from matplotlib import pyplot as plt +from matplotlib import style + +from numpy import genfromtxt + +data = genfromtxt(os.path.join(SAVE_DIR, "log2.csv"),delimiter=',', names=['Epoch', 'Train Loss', 'Test Loss']) +epoch_list = [] +train_loss_list = [] +test_loss_list = [] +for row in data: + if not np.isnan(row[0]): + epoch_list.append(row[0]) + train_loss_list.append(row[1]) + test_loss_list.append(row[2]) + + +plt.plot(epoch_list, train_loss_list, label = "Training Loss") +plt.plot(epoch_list, test_loss_list, label = "Testing Loss") + +plt.title('MSE Loss Vs Epoch') +plt.ylabel('Loss') +plt.xlabel('Epoch') + +plt.show() \ No newline at end of file diff --git a/drive.py b/drive.py new file mode 100644 index 0000000..6ea0d15 --- /dev/null +++ b/drive.py @@ -0,0 +1,184 @@ +import argparse +import base64 +from datetime import datetime +import os +import shutil + +import numpy as np +import socketio +import eventlet +import eventlet.wsgi +from PIL import Image +from flask import Flask +from io import BytesIO + +import torch +from torch.autograd import Variable +import torchvision.transforms as transforms +from model import * +import cv2 +import traceback + +sio = socketio.Server() +app = Flask(__name__) +model = None +prev_image_array = None + +transformations = transforms.Compose([transforms.Lambda(lambda x: (x / 127.5) - 1.0)]) + +IMAGE_HEIGHT, IMAGE_WIDTH, IMAGE_CHANNELS = 66, 200, 3 + +def crop(image): + """ + Crop the image (removing the sky at the top and the car front at the bottom) + """ + return image[60:-25, :, :] # remove the sky and the car front + + +def resize(image): + """ + Resize the image to the input shape used by the network model + """ + return cv2.resize(image, (IMAGE_WIDTH, IMAGE_HEIGHT), cv2.INTER_AREA) + + +def rgb2yuv(image): + """ + Convert the image from RGB to YUV (This is what the NVIDIA model does) + """ + return cv2.cvtColor(image, cv2.COLOR_RGB2YUV) + + +def preprocess(image): + """ + Combine all preprocess functions into one + """ + image = crop(image) + image = resize(image) + image = rgb2yuv(image) + return image + +class SimplePIController: + def __init__(self, Kp, Ki): + self.Kp = Kp + self.Ki = Ki + self.set_point = 0. + self.error = 0. + self.integral = 0. + + def set_desired(self, desired): + self.set_point = desired + + def update(self, measurement): + # proportional error + self.error = self.set_point - measurement + + # integral error + self.integral += self.error + + return self.Kp * self.error + self.Ki * self.integral + + +controller = SimplePIController(0.1, 0.002) +set_speed = 10 +controller.set_desired(set_speed) + +# set min/max speed for our autonomous car +MAX_SPEED = 25 +MIN_SPEED = 10 + +# and a speed limit +speed_limit = MAX_SPEED + +@sio.on('telemetry') +def telemetry(sid, data): + if data: + # The current steering angle of the car + steering_angle = float(data["steering_angle"]) + # The current throttle of the car, how hard to push peddle + throttle = float(data["throttle"]) + # The current speed of the car + speed = float(data["speed"]) + # The current image from the center camera of the car + image = Image.open(BytesIO(base64.b64decode(data["image"]))) + try: + image = np.asarray(image) # from PIL image to numpy array + image = preprocess(image) # apply the preprocessing + image = np.array([image]) # the model expects 4D array + image = transformations(image) + image = torch.Tensor(image) + + # predict the steering angle for the image + steering_angle = model(image).view(-1).data.numpy()[0] + + global speed_limit + if speed > speed_limit: + speed_limit = MIN_SPEED # slow down + else: + speed_limit = MAX_SPEED + throttle = 1.0 - steering_angle**2 - (speed/speed_limit)**2 + + print('{} {} {}'.format(steering_angle, throttle, speed)) + send_control(steering_angle, throttle) + + except Exception as e: + print(traceback.format_exc()) + + else: + + sio.emit('manual', data={}, skip_sid=True) + + +@sio.on('connect') +def connect(sid, environ): + print("connect ", sid) + send_control(0, 0) + + +def send_control(steering_angle, throttle): + sio.emit( + "steer", + data={ + 'steering_angle': steering_angle.__str__(), + 'throttle': throttle.__str__() + }, + skip_sid=True) + + +if __name__ == '__main__': + parser = argparse.ArgumentParser(description='Remote Driving') + parser.add_argument( + 'model', + type=str, + help='Path to model h5 file. Model should be on the same path.' + ) + parser.add_argument( + 'image_folder', + type=str, + nargs='?', + default='', + help='Path to image folder. This is where the images from the run will be saved.' + ) + args = parser.parse_args() + + # check that model Keras version is same as local Keras version + # checkpoint = torch.load(args.model, map_location=lambda storage, loc: storage) + model = torch.load(args.model, map_location=lambda storage, loc: storage) + # model = checkpoint['model'] + + if args.image_folder != '': + print("Creating image folder at {}".format(args.image_folder)) + if not os.path.exists(args.image_folder): + os.makedirs(args.image_folder) + else: + shutil.rmtree(args.image_folder) + os.makedirs(args.image_folder) + print("RECORDING THIS RUN ...") + else: + print("NOT RECORDING THIS RUN ...") + + # wrap Flask application with engineio's middleware + app = socketio.Middleware(sio, app) + + # deploy as an eventlet WSGI server + eventlet.wsgi.server(eventlet.listen(('', 4567)), app) \ No newline at end of file diff --git a/images/loss.png b/images/loss.png new file mode 100644 index 0000000..5dbebd5 Binary files /dev/null and b/images/loss.png differ diff --git a/model.py b/model.py new file mode 100644 index 0000000..9307ed9 --- /dev/null +++ b/model.py @@ -0,0 +1,38 @@ +import torch.nn as nn + +class DriverNet(nn.Module): + + def __init__(self): + super(DriverNet, self).__init__() + + self.conv_layers = nn.Sequential( + nn.Conv2d(3, 24, kernel_size=5, stride=2), + nn.ELU(), + nn.Conv2d(24, 36, kernel_size=5, stride=2), + nn.ELU(), + nn.Conv2d(36, 48, kernel_size=5, stride=2), + nn.ELU(), + nn.Conv2d(48, 64, kernel_size=3, stride=1), + nn.ELU(), + nn.Conv2d(64, 64, kernel_size=3, stride=1), + nn.ELU(), + nn.Dropout(p=0.5) + ) + self.linear_layers = nn.Sequential( + nn.Linear(in_features=64*1*18, out_features=100), + nn.ELU(), + nn.Dropout(p=0.4), + nn.Linear(in_features=100, out_features=64), + nn.ELU(), + nn.Linear(in_features=64, out_features=10), + nn.ELU(), + nn.Linear(in_features=10, out_features=1) + ) + + + def forward(self, x): + x = x.view(x.size(0), 3, 66, 200) + output = self.conv_layers(x) + output = output.view(output.size(0), -1) + output = self.linear_layers(output) + return output \ No newline at end of file diff --git a/model/model.pth b/model/model.pth new file mode 100644 index 0000000..baad997 Binary files /dev/null and b/model/model.pth differ