Skip to content

Created using Colab #49

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Open
wants to merge 4 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
393 changes: 393 additions & 0 deletions cnn.ipynb
Original file line number Diff line number Diff line change
@@ -0,0 +1,393 @@
{
"cells": [
{
"cell_type": "markdown",
"metadata": {
"id": "view-in-github",
"colab_type": "text"
},
"source": [
"<a href=\"https://colab.research.google.com/github/andremicci/Generative_Deep_Learning_2nd_Edition/blob/main/cnn.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>"
]
},
{
"cell_type": "code",
"source": [],
"metadata": {
"id": "Mhp4GfwTjOee"
},
"execution_count": null,
"outputs": []
},
{
"cell_type": "code",
"source": [
"!git clone https://github.com/andremicci/Generative_Deep_Learning_2nd_Edition.git"
],
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
},
"id": "KOnmreJbjLIz",
"outputId": "a3a49f71-3b29-41cb-fc11-a031442b32a9"
},
"execution_count": null,
"outputs": [
{
"output_type": "stream",
"name": "stdout",
"text": [
"Cloning into 'Generative_Deep_Learning_2nd_Edition'...\n",
"remote: Enumerating objects: 653, done.\u001b[K\n",
"remote: Counting objects: 100% (195/195), done.\u001b[K\n",
"remote: Compressing objects: 100% (70/70), done.\u001b[K\n",
"remote: Total 653 (delta 142), reused 125 (delta 125), pack-reused 458 (from 1)\u001b[K\n",
"Receiving objects: 100% (653/653), 37.09 MiB | 8.67 MiB/s, done.\n",
"Resolving deltas: 100% (377/377), done.\n"
]
}
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "TFy-fuvQjJJp"
},
"source": [
"# 🏞 Convolutional Neural Network"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "wEIaaUnijJJr"
},
"source": [
"In this notebook, we'll walk through the steps required to train your own convolutional neural network (CNN) on the CIFAR dataset"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/",
"height": 350
},
"id": "Ewl7DskxjJJs",
"outputId": "72f7b930-03ff-4bab-97c7-8f337c0da519"
},
"outputs": [
{
"output_type": "error",
"ename": "ModuleNotFoundError",
"evalue": "No module named 'notebooks'",
"traceback": [
"\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
"\u001b[0;31mModuleNotFoundError\u001b[0m Traceback (most recent call last)",
"\u001b[0;32m<ipython-input-2-a230bc8cc151>\u001b[0m in \u001b[0;36m<cell line: 0>\u001b[0;34m()\u001b[0m\n\u001b[1;32m 2\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 3\u001b[0m \u001b[0;32mfrom\u001b[0m \u001b[0mtensorflow\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mkeras\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mlayers\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mmodels\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0moptimizers\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mutils\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mdatasets\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m----> 4\u001b[0;31m \u001b[0;32mfrom\u001b[0m \u001b[0mnotebooks\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mutils\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mdisplay\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m",
"\u001b[0;31mModuleNotFoundError\u001b[0m: No module named 'notebooks'",
"",
"\u001b[0;31m---------------------------------------------------------------------------\u001b[0;32m\nNOTE: If your import is failing due to a missing package, you can\nmanually install dependencies using either !pip or !apt.\n\nTo view examples of installing some common dependencies, click the\n\"Open Examples\" button below.\n\u001b[0;31m---------------------------------------------------------------------------\u001b[0m\n"
],
"errorDetails": {
"actions": [
{
"action": "open_url",
"actionText": "Open Examples",
"url": "/notebooks/snippets/importing_libraries.ipynb"
}
]
}
}
],
"source": [
"import numpy as np\n",
"\n",
"from tensorflow.keras import layers, models, optimizers, utils, datasets\n",
"from notebooks.utils import display"
]
},
{
"cell_type": "markdown",
"metadata": {
"tags": [],
"id": "5xHzrj8jjJJu"
},
"source": [
"## 0. Parameters <a name=\"parameters\"></a>"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "4dTmq7jjjJJv"
},
"outputs": [],
"source": [
"NUM_CLASSES = 10"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "MTLZ1IG6jJJw"
},
"source": [
"## 1. Prepare the Data <a name=\"prepare\"></a>"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "DR22w6jsjJJx"
},
"outputs": [],
"source": [
"(x_train, y_train), (x_test, y_test) = datasets.cifar10.load_data()"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "gkQBN7pojJJx"
},
"outputs": [],
"source": [
"x_train = x_train.astype(\"float32\") / 255.0\n",
"x_test = x_test.astype(\"float32\") / 255.0\n",
"\n",
"y_train = utils.to_categorical(y_train, NUM_CLASSES)\n",
"y_test = utils.to_categorical(y_test, NUM_CLASSES)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "p6D1z7qPjJJz"
},
"outputs": [],
"source": [
"display(x_train[:10])\n",
"print(y_train[:10])"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "uqly1nGYjJJ0"
},
"source": [
"## 2. Build the model <a name=\"build\"></a>"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "rvLwhVf7jJJ1"
},
"outputs": [],
"source": [
"input_layer = layers.Input((32, 32, 3))\n",
"\n",
"x = layers.Conv2D(filters=32, kernel_size=3, strides=1, padding=\"same\")(\n",
" input_layer\n",
")\n",
"x = layers.BatchNormalization()(x)\n",
"x = layers.LeakyReLU()(x)\n",
"\n",
"x = layers.Conv2D(filters=32, kernel_size=3, strides=2, padding=\"same\")(x)\n",
"x = layers.BatchNormalization()(x)\n",
"x = layers.LeakyReLU()(x)\n",
"\n",
"x = layers.Conv2D(filters=64, kernel_size=3, strides=1, padding=\"same\")(x)\n",
"x = layers.BatchNormalization()(x)\n",
"x = layers.LeakyReLU()(x)\n",
"\n",
"x = layers.Conv2D(filters=64, kernel_size=3, strides=2, padding=\"same\")(x)\n",
"x = layers.BatchNormalization()(x)\n",
"x = layers.LeakyReLU()(x)\n",
"\n",
"x = layers.Flatten()(x)\n",
"\n",
"x = layers.Dense(128)(x)\n",
"x = layers.BatchNormalization()(x)\n",
"x = layers.LeakyReLU()(x)\n",
"x = layers.Dropout(rate=0.5)(x)\n",
"\n",
"x = layers.Dense(NUM_CLASSES)(x)\n",
"output_layer = layers.Activation(\"softmax\")(x)\n",
"\n",
"model = models.Model(input_layer, output_layer)\n",
"\n",
"model.summary()"
]
},
{
"cell_type": "markdown",
"metadata": {
"tags": [],
"id": "_0JEpfyWjJJ3"
},
"source": [
"## 3. Train the model <a name=\"train\"></a>"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "RQWD-6i6jJJ4"
},
"outputs": [],
"source": [
"opt = optimizers.Adam(learning_rate=0.0005)\n",
"model.compile(\n",
" loss=\"categorical_crossentropy\", optimizer=opt, metrics=[\"accuracy\"]\n",
")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"tags": [],
"id": "pajMd0T0jJJ5"
},
"outputs": [],
"source": [
"model.fit(\n",
" x_train,\n",
" y_train,\n",
" batch_size=32,\n",
" epochs=10,\n",
" shuffle=True,\n",
" validation_data=(x_test, y_test),\n",
")"
]
},
{
"cell_type": "markdown",
"metadata": {
"tags": [],
"id": "M2Jl9j7ljJJ5"
},
"source": [
"## 4. Evaluation <a name=\"evaluate\"></a>"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "t7GXyVvYjJJ6"
},
"outputs": [],
"source": [
"model.evaluate(x_test, y_test, batch_size=1000)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "Ttvkm2g_jJJ6"
},
"outputs": [],
"source": [
"CLASSES = np.array(\n",
" [\n",
" \"airplane\",\n",
" \"automobile\",\n",
" \"bird\",\n",
" \"cat\",\n",
" \"deer\",\n",
" \"dog\",\n",
" \"frog\",\n",
" \"horse\",\n",
" \"ship\",\n",
" \"truck\",\n",
" ]\n",
")\n",
"\n",
"preds = model.predict(x_test)\n",
"preds_single = CLASSES[np.argmax(preds, axis=-1)]\n",
"actual_single = CLASSES[np.argmax(y_test, axis=-1)]"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "5SkFQ0pHjJJ7"
},
"outputs": [],
"source": [
"import matplotlib.pyplot as plt\n",
"\n",
"n_to_show = 10\n",
"indices = np.random.choice(range(len(x_test)), n_to_show)\n",
"\n",
"fig = plt.figure(figsize=(15, 3))\n",
"fig.subplots_adjust(hspace=0.4, wspace=0.4)\n",
"\n",
"for i, idx in enumerate(indices):\n",
" img = x_test[idx]\n",
" ax = fig.add_subplot(1, n_to_show, i + 1)\n",
" ax.axis(\"off\")\n",
" ax.text(\n",
" 0.5,\n",
" -0.35,\n",
" \"pred = \" + str(preds_single[idx]),\n",
" fontsize=10,\n",
" ha=\"center\",\n",
" transform=ax.transAxes,\n",
" )\n",
" ax.text(\n",
" 0.5,\n",
" -0.7,\n",
" \"act = \" + str(actual_single[idx]),\n",
" fontsize=10,\n",
" ha=\"center\",\n",
" transform=ax.transAxes,\n",
" )\n",
" ax.imshow(img)"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.8.10"
},
"vscode": {
"interpreter": {
"hash": "31f2aee4e71d21fbe5cf8b01ff0e069b9275f58929596ceb00d14d90e3e16cd6"
}
},
"colab": {
"provenance": [],
"include_colab_link": true
}
},
"nbformat": 4,
"nbformat_minor": 0
}
Loading