diff --git a/docs/tutorials/convolutional_neural_network_for_image_classification.ipynb b/docs/tutorials/convolutional_neural_network_for_image_classification.ipynb
index ee15eaa03..2c06928f5 100644
--- a/docs/tutorials/convolutional_neural_network_for_image_classification.ipynb
+++ b/docs/tutorials/convolutional_neural_network_for_image_classification.ipynb
@@ -2,14 +2,18 @@
"cells": [
{
"cell_type": "markdown",
- "source": [],
+ "id": "6d2c03ea985891aa",
"metadata": {
"collapsed": false
},
- "id": "6d2c03ea985891aa"
+ "source": []
},
{
"cell_type": "markdown",
+ "id": "d65ab4a80f6bc842",
+ "metadata": {
+ "collapsed": false
+ },
"source": [
"# Image Classification with Convolutional Neural Networks\n",
"\n",
@@ -21,119 +25,125 @@
" All operations on an NeuralNetworkClassifier
return a new NeuralNetworkClassifier
. The original NeuralNetworkClassifier
will not be changed.\n",
"
\n",
""
- ],
- "metadata": {
- "collapsed": false
- },
- "id": "d65ab4a80f6bc842"
+ ]
},
{
"cell_type": "markdown",
- "source": "## Load data into an `ImageDataset`",
+ "id": "74dacfa56deeaed3",
"metadata": {
"collapsed": false
},
- "id": "74dacfa56deeaed3"
+ "source": [
+ "## Load data into an `ImageDataset`"
+ ]
},
{
"cell_type": "markdown",
- "source": [
- "1. Load images via files in an `ImageList`. The data is available under `docs/tutorials/data/shapes`. If the `return_filenames` parameter is set to `True`, a list of all filepaths will be returned as well in the same order as the images in the returned `ImageList`."
- ],
+ "id": "90dfbc18037f0201",
"metadata": {
"collapsed": false
},
- "id": "90dfbc18037f0201"
+ "source": [
+ "1. Load images via files in an `ImageList`. The data is available under `docs/tutorials/data/shapes`. If the `return_filenames` parameter is set to `True`, a list of all filepaths will be returned as well in the same order as the images in the returned `ImageList`."
+ ]
},
{
"cell_type": "code",
- "execution_count": null,
+ "execution_count": 1,
"id": "initial_id",
"metadata": {
"collapsed": true
},
+ "outputs": [],
"source": [
"from safeds.data.image.containers import ImageList\n",
"\n",
"images, filepaths = ImageList.from_files(\"data/shapes\", return_filenames=True)"
- ],
- "outputs": []
+ ]
},
{
"cell_type": "markdown",
- "source": [
- "2. Create a `Column` with the labels of the images:"
- ],
+ "id": "76bc8612f449edf",
"metadata": {
"collapsed": false
},
- "id": "76bc8612f449edf"
+ "source": [
+ "2. Create a `Column` with the labels of the images:"
+ ]
},
{
"cell_type": "code",
+ "execution_count": null,
+ "id": "66dcf95a3fa51f23",
+ "metadata": {
+ "collapsed": false
+ },
+ "outputs": [],
"source": [
"import re\n",
+ "\n",
"from safeds.data.tabular.containers import Column\n",
"\n",
"labels = Column(\n",
" \"label\", \n",
- " [re.search(r\"(.*)[\\\\/](.*)[\\\\/](.*)\\.\", filepath).group(2) for filepath in filepaths]\n",
+ " [re.search(r\"(.*)[\\\\/](.*)[\\\\/](.*)\\.\", filepath).group(2) for filepath in filepaths],\n",
")"
- ],
- "metadata": {
- "collapsed": false
- },
- "id": "66dcf95a3fa51f23",
- "execution_count": null,
- "outputs": []
+ ]
},
{
"cell_type": "markdown",
- "source": [
- "3. Create an `ImageDataset` from the `ImageList` and the `Column` of labels. If the `shuffle` parameter is set to `True`, the `ImageDataset` will be shuffled after each epoch while training a neural network."
- ],
+ "id": "596b0c9ec9627ad0",
"metadata": {
"collapsed": false
},
- "id": "596b0c9ec9627ad0"
+ "source": [
+ "3. Create an `ImageDataset` from the `ImageList` and the `Column` of labels. If the `shuffle` parameter is set to `True`, the `ImageDataset` will be shuffled after each epoch while training a neural network."
+ ]
},
{
"cell_type": "code",
+ "execution_count": null,
+ "id": "32056ddf5396e070",
+ "metadata": {
+ "collapsed": false
+ },
+ "outputs": [],
"source": [
"from safeds.data.labeled.containers import ImageDataset\n",
"\n",
"dataset = ImageDataset[Column](images, labels, shuffle=True)"
- ],
- "metadata": {
- "collapsed": false
- },
- "id": "32056ddf5396e070",
- "execution_count": null,
- "outputs": []
+ ]
},
{
"cell_type": "markdown",
- "source": "## Create the neural network with a `NeuralNetworkClassifier`",
+ "id": "358bd4cc05c8daf3",
"metadata": {
"collapsed": false
},
- "id": "358bd4cc05c8daf3"
+ "source": [
+ "## Create the neural network with a `NeuralNetworkClassifier`"
+ ]
},
{
"cell_type": "markdown",
- "source": [
- "1. Create a list of `Layer` instances for your neural network:"
- ],
+ "id": "fe40c93a1cfd3a7b",
"metadata": {
"collapsed": false
},
- "id": "fe40c93a1cfd3a7b"
+ "source": [
+ "1. Create a list of `Layer` instances for your neural network:"
+ ]
},
{
"cell_type": "code",
+ "execution_count": null,
+ "id": "806a8091249d533a",
+ "metadata": {
+ "collapsed": false
+ },
+ "outputs": [],
"source": [
- "from safeds.ml.nn.layers import (Convolutional2DLayer, FlattenLayer,\n",
- " ForwardLayer, MaxPooling2DLayer)\n",
+ "from safeds.ml.nn.layers import Convolutional2DLayer, FlattenLayer, ForwardLayer, MaxPooling2DLayer\n",
"\n",
"layers = [\n",
" Convolutional2DLayer(output_channel=16, kernel_size=3, padding=1),\n",
@@ -144,24 +154,26 @@
" ForwardLayer(neuron_count=128),\n",
" ForwardLayer(neuron_count=3),\n",
"]"
- ],
- "metadata": {
- "collapsed": false
- },
- "id": "806a8091249d533a",
- "execution_count": null,
- "outputs": []
+ ]
},
{
"cell_type": "markdown",
- "source": "2. Create a `NeuralNetworkClassifier` from an `InputConversion`, the list of `Layer` instances:",
+ "id": "fe4f6a4d14404a85",
"metadata": {
"collapsed": false
},
- "id": "fe4f6a4d14404a85"
+ "source": [
+ "2. Create a `NeuralNetworkClassifier` from an `InputConversion`, the list of `Layer` instances:"
+ ]
},
{
"cell_type": "code",
+ "execution_count": null,
+ "id": "af68cc0d32655d32",
+ "metadata": {
+ "collapsed": false
+ },
+ "outputs": [],
"source": [
"from safeds.ml.nn import NeuralNetworkClassifier\n",
"from safeds.ml.nn.converters import InputConversionImageToColumn\n",
@@ -170,131 +182,158 @@
" InputConversionImageToColumn(dataset.input_size), \n",
" layers,\n",
")"
- ],
- "metadata": {
- "collapsed": false
- },
- "id": "af68cc0d32655d32",
- "execution_count": null,
- "outputs": []
+ ]
},
{
"cell_type": "markdown",
- "source": "## Fit and predict the `NeuralNetworkClassifier`",
+ "id": "4f9387686ba50c37",
"metadata": {
"collapsed": false
},
- "id": "4f9387686ba50c37"
+ "source": [
+ "## Fit and predict the `NeuralNetworkClassifier`"
+ ]
},
{
"cell_type": "markdown",
- "source": [
- "1. Fit the `NeuralNetworkClassifier`:"
- ],
+ "id": "3d8efa74951725cb",
"metadata": {
"collapsed": false
},
- "id": "3d8efa74951725cb"
+ "source": [
+ "1. Fit the `NeuralNetworkClassifier`:"
+ ]
},
{
"cell_type": "code",
- "source": [
- "cnn_fitted = cnn.fit(dataset, epoch_size=32, batch_size=16)"
- ],
+ "execution_count": null,
+ "id": "381627a94d500675",
"metadata": {
"collapsed": false
},
- "id": "381627a94d500675",
- "execution_count": null,
- "outputs": []
+ "outputs": [],
+ "source": [
+ "cnn_fitted = cnn.fit(dataset, epoch_size=32, batch_size=16)"
+ ]
},
{
"cell_type": "markdown",
- "source": [
- "2. Predict values from the `NeuralNetworkClassifier`:"
- ],
+ "id": "35bb7d0ebfabf597",
"metadata": {
"collapsed": false
},
- "id": "35bb7d0ebfabf597"
+ "source": [
+ "2. Predict values from the `NeuralNetworkClassifier`:"
+ ]
},
{
"cell_type": "code",
- "source": [
- "prediction = cnn_fitted.predict(dataset.get_input())"
- ],
+ "execution_count": null,
+ "id": "62f63dd68362c8b7",
"metadata": {
"collapsed": false
},
- "id": "62f63dd68362c8b7",
- "execution_count": null,
- "outputs": []
+ "outputs": [],
+ "source": [
+ "prediction = cnn_fitted.predict(dataset.get_input())"
+ ]
},
{
"cell_type": "markdown",
- "source": [
- "3. Shuffle the prediction to get a random order:"
- ],
+ "id": "a8ecd71982a0cc97",
"metadata": {
"collapsed": false
},
- "id": "a8ecd71982a0cc97"
+ "source": [
+ "3. Shuffle the prediction to get a random order:"
+ ]
},
{
"cell_type": "code",
- "source": [
- "shuffled_prediction = prediction.shuffle()"
- ],
+ "execution_count": null,
+ "id": "779277d73e30554d",
"metadata": {
"collapsed": false
},
- "id": "779277d73e30554d",
- "execution_count": null,
- "outputs": []
+ "outputs": [],
+ "source": [
+ "shuffled_prediction = prediction.shuffle()"
+ ]
},
{
"cell_type": "markdown",
- "source": [
- "4. Display a subset of the input data:"
- ],
+ "id": "2c1ae7438df15cae",
"metadata": {
"collapsed": false
},
- "id": "2c1ae7438df15cae"
+ "source": [
+ "4. Display a subset of the input data:"
+ ]
},
{
"cell_type": "code",
- "source": [
- "shuffled_prediction.get_input().remove_image_by_index(list(range(9, len(prediction))))"
- ],
+ "execution_count": null,
+ "id": "a5ddbbfba41aa7f",
"metadata": {
"collapsed": false
},
- "id": "a5ddbbfba41aa7f",
- "execution_count": null,
- "outputs": []
+ "outputs": [
+ {
+ "data": {
+ "image/png": "iVBORw0KGgoAAAANSUhEUgAAAFwAAABcCAYAAADj79JYAAAUAElEQVR4nO2daVBTVxvH/0mAJASCLBJAUaCtC+JSGcwI2NdWVOhUSusCWpeqY6u16ojS2nGbqlNntLvjVmux1qpT1yrKKGoVXEbcWiyoLCGULUiMSchKcu95P1gyIlluEkTb5veJyT33PM/533vPOfc8z7kAHjz8m2G1/UEIIV1qmMVi/RftsrvSqAeP4F2OR/AuxiN4F+Oy4IWFhaiqqkIXjz3/eLycKUwIQWVlJY4ePYrc3FyEhoZi5MiRyM7OhlAofFo+/qtgfIcbDAZ8+eWX+PDDDxEQEIDc3FzMmzcPFy5cgEwme5o+/jshNqBpmkgkEpKZmUkWL15Mmpqa2h0/cOAASU9PJy0tLbaqsIoju0+LZ23XruAURZGrV6+SSZMmkf379xOTydShTGtrK1m9ejVZunQpUavVTjvgehNc41nbtdmlEEJw69Yt7Ny5EzNnzkRmZia8vDp2+d7e3li6dClYLBZyc3NhNBqdfbj+U9gUXCaT4dtvv8Wbb76J0aNH263Ez88P2dnZqKysxKVLlzrdyX8TVgU3Go3Izs7Ga6+9hrS0NHA4HIcVhYWFYdy4ccjPz4dare50R7uauro6/PLLL9Dr9Z1ab4c+ghCC3bt3w9/fH++88w7+XnNhhFgsRl5eHkpLSzF8+HCXnSJ/Tz/lcjlYLBYEAgGCgoIQFhbG6OK7i06nw/Lly6FUKjFw4ED079+/0+ruILhUKsXBgwexY8cOq322PYRCIVJTU7Ft2zaIxWKw2a69V9E0jT179qCxsREhISEwGAzQarXo3r07kpOTMWjQIIhEoqcm/q5du8Bms5GQkICbN2+ib9++LrflSTooevDgQYwbNw69evVyqcKUlBRs3boVly9fRnJyskt1cDgcyOVyTJkyBUOGDAFFUTAajTh//jwOHTqEtWvXQqlUgs/no2fPnvjoo4+QmJjokq0nkUgk2L17N/Ly8lBSUoLCwkJotVr4+/t3Sv0WCCFEp9ORWbNmEalU6tYU6MyZMyQrK4uYzWaH0yRrxyiKIjNnziQSicTquSaTiajValJfX08WLlxIdu3axdg3e3ZbW1tJTk4Oyc3NJYQQIpfLyYwZM0hlZSXj+h3ZbfecGI1GcLlcp7uSJxk5ciTUajVKS0tdOr+lpQU+Pj7w8fGxetzLywv+/v6IiIiASCRyx9V23LhxAy0tLRg3bhwAIDg4GEOGDEF+fn6n2WgnOEVRAODUQGkNDoeDiRMn4tSpUy6dL5PJIBQKweVyHZYNDg6GUql0yc7j6PV6nDt3DklJSejWrZvl94yMDBw6dAgPHjxw2wbwhOAsFqvTBoeRI0eisrLSJUcrKioQEhICPz8/h2VjYmJQXV3tiovtuH//Pu7cuYNRo0a1G4x79+5tGZc6g3bq8vl8mEwmNDc3u12xr68v/P39odVqnTrPaDTi3r17eOmll8Dj8RyWj46OhkqlgtlsdtVVAI+Wm6OiohAeHt7udxaLhTlz5iAvLw/19fVu2QCsCD569Gh89913aGpqcrtymqad7p60Wi0ePHiAF154gVF5Pp8PLpfr9IV9HL1ej127dln67icJDQ3F3LlzsW3bNmg0GpftAFbeNFNTUxEdHY2VK1ciLy8PBoPBpYppmobZbIa3t7dT5xmNRjx8+BChoaGMz2GxWG4FQgoKCiAQCDBkyBCbZTIyMtDa2ori4mKX7QBWBPfz88OCBQuwcOFCHDx4ENnZ2WhsbHS64paWFuh0OgQEBDh1XmtrK9RqNaP+G3j0RJjNZpfnyTRN4+uvv8a8efNszooAICAgAElJSbh8+TJ0Op1LtgAbER8ul4u4uDhs2bIF+/btQ0ZGBl577TWMHz8evXr1gkAgAI/Hs/mmZzKZcPToUQwePBh8Pt8ph3x8fCCRSDBlyhQMHjwYAwcOxJAhQ9CtWzdwuVz4+PiAw+HAy8sLbDYbLS0t4HK5Lr91Go1G0DSN48ePo7a2FrGxsejevTsEAgH4fL6ly+JwOBg+fDgOHz6Muro69OnTxyV7jBKB1Go1Tp48iUuXLoHD4SA0NBRBQUEICgoCj8cDIcTSV9M0DalUilu3buHTTz9FVFSUdcN2EnI0Gg2kUikkEgnKysoglUrB5/PRrVs3CAQCcLlc+Pn5QSQSoaysDIQQ5OTkMGuwFbvl5eUoLy+HRCKBVCqFSqWyzPVDQkIQFBSEwMBA+Pj4YP/+/UhLS8P06dMZ2XvSrlOZV0ajEXV1dZBKpbh//z4ePnwImqZB0zQ4HA4IIaAoCiEhIRCLxXjxxRedargt9Ho9ZDIZmpuboVKpoNfr8fDhQ8jlclRUVGD27NlISEhwquH27BJCoFarUVdXh4aGBjQ1NaGlpQUmkwkajQZjx45FfHw8I3tP2nUr1c1sNoOm6Q6/e3t7O5yduJtyRgiB0WiEwWCAv78/4y7FFbttNxJN0yCEwNvb2+n3FZa7b5MePPwj8GTPdrFdT6pbF+MR/G9MJhOkUulTzzpwb+H7XwJFUZgwYQIAwN/fH+PHjwebzW63PPz4JKPtvYPFYoGmabDZbNA0DS8vL/Tu3RsxMTE2Z01uCU4IQUFBAYKCghAfH+/2Orq7EEKg1+uhUCjAYrEYB53v3bsHrVaLvLw8HD16FCdPnoTBYGgnOJvNtqzXPCk4h8MBRVEwGAy4fv069u7di6FDh1q15ZbgEokE+/btQ1lZGdavX4+RI0d22nq6M9A0jcbGRhQXF+POnTu4fv06goOD8fnnnzNay2mbW/N4PGRlZSErK8tlXzZt2oTz5893vuAmkwkFBQV4+eWXMWHCBGzduhU0TSMlJcVlZ11BIpHg0KFDkEgk6NGjBxISEhAeHo76+nqnVyo7gxEjRmDVqlXIzs62etxlwe/fv4/i4mIsXrwYcXFxCAkJwTfffIOIiAjExsa67DATzGYzSkpK8N1336GsrAzTp09HTk4OwsPDwefzcfDgQQQEBDAWvDNniDExMaiqqrJ53GXB9+3bh7i4OMTFxYHFYiE+Ph6vvPIKDh8+jJiYGEbRGqaYTCao1WrIZDKcPHkSp0+fhpeXF2bNmoXNmzd36Kd1Op1lVZEJbDa708aftgHUFi4JLpPJcOrUKezcudPiqJeXF9LS0rBu3TpUVlYiLi7ONY/xqE9uampCeXk5FAoFGhoaUF1dDa1Wi4SEBGzevBkxMTFWswvaLo5IJGI8ngiFQqhUqnarnk8LlwTfvHkzUlJSOiQLRUZGYsSIEfjmm2+wfft2pwfQtsj5pUuXoFQqERYWhqCgIAQHB2PEiBHo27evw0g+RVHQ6/Xw9fVlbDc8PBw0TUOhUCA4ONgpn61h76I5LXhNTQ3Onj2L06dPdzjGZrMxadIk/Pzzz8jLy0N6errD+iiKQklJCQ4cOIDTp08jPj4eGRkZiIuLswQdmKw+tqHT6dDc3IyIiAjGbeJwOOjXrx/u3r2LpKQkxudZw2w2283rcUpwiqKwd+9eTJ061WYIjMfjYevWrZgwYQKSkpIc3jFJSUkQCoXIzMzEsWPHnBLKGi0tLVAoFIiOjnbqvOTkZBQXF7stuEqlsttmp5750tJS1NTUICMjw265mJgYTJ48Gbm5uTCZTHbLxsbGYs2aNZg2bZrbYgNAVVUVunXr1i6ZhwlisRiVlZVup1uwWCy7gzVjwVtbW1FYWIhhw4YxiqhPnjwZlZWVuHPnjt1yaWlp2Lt3L7Zv3w6VSsXUHZuUlpbipZdecvq84OBgsFgsyOVyt+wTQuzOUhgLLpfLcevWLSQnJzPKPQwPD0diYiIOHz5sN8o9fvx45OTkQKlUIj09HQUFBUxdskpxcbFLM6S2OGlnJEHZg1EfTlEUduzYgYEDBzKOVnM4HLz++uv4+OOP8fvvv9tMJ2az2YiMjMTKlSuRnp6OTz75BMXFxZgxYwbCw8OdisabzWb8+eef6Nu3L+Nz2vD29gZFUdi4cSMiIiIQGBiIgIAAhIaGok+fPggMDASPxwOfzwePx7M5AzOZTHZvSEYBiGvXrmHVqlXYs2eP09OmoqIifPnll9iyZUu7NDJbgQC1Wo0ffvgBVVVVePnll/G///0PMTExjGYpUqkUmZmZuHr1qs0ytuzSNI2KigpUV1eDoihoNBqo1Wo0NzejtrYWwKP5enBwMAIDAyESieDv7w8/Pz8IBAL4+fmBz+fjypUrOHLkCHJzc63adSi4SqXCe++9h2nTpuGNN95w2OgnoWkamzZtgkQiwRdffGG5+vYiLyaTCffu3cO5c+dQWlqK2NhYZGVlOUxN3rNnD27cuIGvvvrKZhlXg8hKpRJNTU1oaGhAfX09FAoFWltbYTQaodVqLX8rFAqMGTMG7777rlW7j1dqld27d5NFixaR1tZW1zLRCSF6vZ689dZb5Ny5c5bfHNkl5FGCfE1NDdmwYQMZO3Ysyc/Pt2tn4sSJ5LfffrNbholdR9A0TQwGA9FqtUSlUhG5XE6amppIQ0MDaWxsJHq93qZdu4LX19eTqVOnkvLyckZO6PV6otVqLY7IZDJSXl5ONm/eTCZOnEjWrl3bwQGmjfzjjz9ISkoKmTdvHrl48WKHRhkMBjJw4ECHm3M7Q3BXaLNrs3enaRqXL19G37590aNHD6uPmUajgVKpRG1tLaqqqiCTyUAIsaSgNTc3Q6fToX///sjMzGScrGONQYMG4ddff8W5c+fw888/49KlSxg9ejQGDRoEDoeDkpISDBgwoPP34nQyNvtwtVqNJUuWgMfjQSwWWzJU5XI5GhsbLYn2vr6+8PX1Re/evREWFgahUAgWiwUul4uwsDCIRCIIBIIOg56r0XNCCBobG3HmzBlcvXoVfD4fkyZNwuXLl8HhcLBgwQL7DX7GUXubghsMBpw4cQJSqbTdVpTu3bvjxRdfRFBQEIRCIby9vcHlcp3KfnrcAVcbbjaboVQqUVRUhE2bNqGhoQHHjx93+NLz3AreVQ50ht26ujqUl5cjMTHR4Tq8R/D/mF1PXkoX4xHcgwcPnYhn0Oxiu54+vIvxJHO6CEVRKCoqwtmzZ+Hr64vw8HAkJCSgT58+dhOQnrs73N2dvl1BdXU13nrrLSxbtgwajQYikQi1tbXIycnBhQsXmFXytFfLnsSa3ZqaGhIWFkYKCgqIwWDoMrtMUavVZOvWrUQsFpNffvmFUBTV7vjx48fJ8uXLrX7Dsc3uc3WHKxQKCAQCHDhwADt27EBFRcWzdsmCXC7H119/jZqaGhw5cgQTJ07sEGYbPnw4ZDKZ3Z3bz5XgbDYb0dHRWLFiBTgcDtavX4/t27c/F93Mhg0b4OXlhSVLlnT44kQbwcHBeOGFF3Dt2jWb9TxXggOPViQjIyMxe/ZsrFmzBuXl5Zg7d67djNSnzV9//YWioiIsXLgQISEhdsv27NnT7mc+3BbcZDJBp9OhpaUFjY2NdnMymNC2bu7j44OePXti/fr1SElJwbJly3Dq1Klncrdfv34dYrEYAoHAYdnHd0pYw6VpIUVRqK2tRUVFBW7fvg29Xo/ExETs378fs2bNQkJCgks7IaxF5n18fDBt2jT0798fO3bsQGFhIV5//XXEx8d3akq0PZRKJeNMLrPZ3LnJnNXV1ZYPtYhEIkRFRSEsLAxisRharRY7d+60fPPPWdr2zTwJh8OBWCxGv379cObMGezduxfXrl3D7NmzuySkJhAIGD1ZBoMBt2/fxqhRoxxXymRa1NTURGbPnk1+/PFHIpPJiEajaTc1MplMpLCwkCQlJZGqqiq7dVmze/v2bTJ27Fi759E0TWpqasiKFStIWloaKSkpYeK6XbuOKCsrI8OHDyc6nc5uuYsXL5IPPviANDc327TrlOB5eXlk1apVRKPR2C138uRJkpKSQhQKhc0y1uz++eefZMyYMUxcITRNk/z8fDJ06FDy2WefkerqarvfSbRnlwlLly4l27Zts5kuUldXR95//32Sn59PaJq2adepjpamaQQEBDjMLUxLS8OwYcNw7NgxZ6q37ERgAovFQmpqKk6cOAFCCNasWYNdu3bh+vXrbg/c1li0aBFu376N48ePd8gIvnv3Lj777DMMHjwYr776KrNcdiZXuaioiKxevZqoVCqHZYuLi8mCBQts3nXW7CoUCjJgwAAmrrSjtbWVnD59miQkJJC3337b7luqM+19HIqiyK1bt8j8+fPJxo0bSUVFBXnw4AE5ceIEmTp1Kvnpp5+IVqt1aNepQTMqKgpNTU3QarUO/8lGeHi4Zb9NYGAgo/qd2V9jNpvx8OFD3Lx5E+fPn8fZs2cxYsQIzJ8/3+63q1yFzWZj8ODBWLlyJXJzczFnzhw0NzcjNjYWK1aswIABAxhlLTgleI8ePcDlclFeXm7zbasN8neetDMituW+kMc2N7Xt8NVoNKiurkZVVRWKi4tx48YNaDQa9O/fH8nJyTh06BB69uz5VDdFsVgsiEQiLFu2DMuWLXOpDqcEZ7FYSEtLw/fff4/ExES7y5BXrlyxfKOKKUKhEDRNQyaTQaVSobKyEnV1dXjw4IFlfSIyMhKpqanIyclBRETEM9n57A5OR3wMBgNmzpyJ1NRUTJ06tcNjZDAYcOzYMRw4cACrV6+2mRxvK/KSnp5u2UwVGRmJ6OhohIWFITo6GlFRUW5/iPhZR3xcCrHV19dj3rx5CAkJwdChQ8Hj8aDT6SCRSHDt2jX06tUL69ats5vXbavhO3fuhFwuR1ZWFrp37w4+n9+p3cQ/UnDg0W6xgoIC3Lt3D0ajEUKhEL1798awYcPQo0cPh4/6s274P07wznLgv2b3nzXi/AvwCN7FeATvYjyCdzEewbsYj+AePHjoRP4Pxfd6KSRNevAAAAAASUVORK5CYII=",
+ "text/plain": [
+ ""
+ ]
+ },
+ "execution_count": 10,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "shuffled_prediction.get_input().remove_image_by_index(list(range(9, len(prediction))))"
+ ]
},
{
"cell_type": "markdown",
- "source": [
- "5. Display the corresponding predicted labels:"
- ],
+ "id": "131db684a431d4ec",
"metadata": {
"collapsed": false
},
- "id": "131db684a431d4ec"
+ "source": [
+ "5. Display the corresponding predicted labels:"
+ ]
},
{
"cell_type": "code",
- "source": [
- "shuffled_prediction.get_output().to_list()[0:9]"
- ],
+ "execution_count": null,
+ "id": "7081595d7100fb42",
"metadata": {
"collapsed": false
},
- "id": "7081595d7100fb42",
- "execution_count": null,
- "outputs": []
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "['triangles',\n",
+ " 'triangles',\n",
+ " 'triangles',\n",
+ " 'triangles',\n",
+ " 'circles',\n",
+ " 'squares',\n",
+ " 'triangles',\n",
+ " 'squares',\n",
+ " 'squares']"
+ ]
+ },
+ "execution_count": 11,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "shuffled_prediction.get_output().to_list()[0:9]"
+ ]
}
],
"metadata": {
@@ -306,14 +345,14 @@
"language_info": {
"codemirror_mode": {
"name": "ipython",
- "version": 2
+ "version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
- "pygments_lexer": "ipython2",
- "version": "2.7.6"
+ "pygments_lexer": "ipython3",
+ "version": "3.12.4"
}
},
"nbformat": 4,
diff --git a/docs/tutorials/regression.ipynb b/docs/tutorials/regression.ipynb
index 6ec3792a3..c966031f8 100644
--- a/docs/tutorials/regression.ipynb
+++ b/docs/tutorials/regression.ipynb
@@ -33,11 +33,48 @@
},
{
"cell_type": "code",
- "execution_count": null,
+ "execution_count": 1,
"metadata": {
"collapsed": false
},
- "outputs": [],
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "\n",
+ "
shape: (15, 23)id | year | month | day | zipcode | latitude | longitude | sqft_lot | sqft_living | sqft_above | sqft_basement | floors | bedrooms | bathrooms | waterfront | view | condition | grade | year_built | year_renovated | sqft_lot_15nn | sqft_living_15nn | price |
---|
i64 | i64 | i64 | i64 | i64 | f64 | f64 | i64 | i64 | i64 | i64 | f64 | i64 | f64 | i64 | i64 | i64 | i64 | i64 | i64 | i64 | i64 | i64 |
0 | 2014 | 5 | 2 | 98001 | 47.3406 | -122.269 | 9397 | 2200 | 2200 | 0 | 2.0 | 4 | 2.5 | 0 | 1 | 3 | 8 | 1987 | 0 | 9176 | 2310 | 285000 |
1 | 2014 | 5 | 2 | 98003 | 47.3537 | -122.303 | 10834 | 2090 | 1360 | 730 | 1.0 | 3 | 2.5 | 0 | 1 | 4 | 8 | 1987 | 0 | 8595 | 1750 | 285000 |
2 | 2014 | 5 | 2 | 98006 | 47.5443 | -122.177 | 8119 | 2160 | 1080 | 1080 | 1.0 | 4 | 2.25 | 0 | 1 | 3 | 8 | 1966 | 0 | 9000 | 1850 | 440000 |
3 | 2014 | 5 | 2 | 98006 | 47.5746 | -122.135 | 8800 | 1450 | 1450 | 0 | 1.0 | 4 | 1.0 | 0 | 1 | 4 | 7 | 1954 | 0 | 8942 | 1260 | 435000 |
4 | 2014 | 5 | 2 | 98006 | 47.5725 | -122.133 | 10000 | 1920 | 1070 | 850 | 1.0 | 4 | 1.5 | 0 | 1 | 4 | 7 | 1954 | 0 | 10836 | 1450 | 430000 |
… | … | … | … | … | … | … | … | … | … | … | … | … | … | … | … | … | … | … | … | … | … | … |
10 | 2014 | 5 | 2 | 98023 | 47.3256 | -122.378 | 33151 | 3240 | 3240 | 0 | 2.0 | 3 | 2.5 | 0 | 3 | 3 | 10 | 1995 | 0 | 24967 | 4050 | 604000 |
11 | 2014 | 5 | 2 | 98024 | 47.5643 | -121.897 | 16215 | 1580 | 1580 | 0 | 1.0 | 3 | 2.25 | 0 | 1 | 4 | 7 | 1978 | 0 | 16215 | 1450 | 335000 |
12 | 2014 | 5 | 2 | 98027 | 47.4635 | -121.991 | 35100 | 1970 | 1970 | 0 | 2.0 | 3 | 2.25 | 0 | 1 | 4 | 9 | 1977 | 0 | 35100 | 2340 | 437500 |
13 | 2014 | 5 | 2 | 98027 | 47.4634 | -121.987 | 37277 | 2710 | 2710 | 0 | 2.0 | 4 | 2.75 | 0 | 1 | 3 | 9 | 2000 | 0 | 39299 | 2390 | 630000 |
14 | 2014 | 5 | 2 | 98029 | 47.5794 | -122.025 | 67518 | 2820 | 2820 | 0 | 2.0 | 5 | 2.5 | 0 | 1 | 3 | 8 | 1979 | 0 | 48351 | 2820 | 675000 |
"
+ ],
+ "text/plain": [
+ "+-----+------+-------+-----+---+----------------+---------------+------------------+--------+\n",
+ "| id | year | month | day | … | year_renovated | sqft_lot_15nn | sqft_living_15nn | price |\n",
+ "| --- | --- | --- | --- | | --- | --- | --- | --- |\n",
+ "| i64 | i64 | i64 | i64 | | i64 | i64 | i64 | i64 |\n",
+ "+===========================================================================================+\n",
+ "| 0 | 2014 | 5 | 2 | … | 0 | 9176 | 2310 | 285000 |\n",
+ "| 1 | 2014 | 5 | 2 | … | 0 | 8595 | 1750 | 285000 |\n",
+ "| 2 | 2014 | 5 | 2 | … | 0 | 9000 | 1850 | 440000 |\n",
+ "| 3 | 2014 | 5 | 2 | … | 0 | 8942 | 1260 | 435000 |\n",
+ "| 4 | 2014 | 5 | 2 | … | 0 | 10836 | 1450 | 430000 |\n",
+ "| … | … | … | … | … | … | … | … | … |\n",
+ "| 10 | 2014 | 5 | 2 | … | 0 | 24967 | 4050 | 604000 |\n",
+ "| 11 | 2014 | 5 | 2 | … | 0 | 16215 | 1450 | 335000 |\n",
+ "| 12 | 2014 | 5 | 2 | … | 0 | 35100 | 2340 | 437500 |\n",
+ "| 13 | 2014 | 5 | 2 | … | 0 | 39299 | 2390 | 630000 |\n",
+ "| 14 | 2014 | 5 | 2 | … | 0 | 48351 | 2820 | 675000 |\n",
+ "+-----+------+-------+-----+---+----------------+---------------+------------------+--------+"
+ ]
+ },
+ "execution_count": 1,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
"source": [
"from safeds.data.tabular.containers import Table\n",
"\n",
@@ -57,9 +94,40 @@
},
{
"cell_type": "code",
- "execution_count": null,
+ "execution_count": 2,
"metadata": {},
- "outputs": [],
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "\n",
+ "
shape: (5, 21)id | year | month | day | zipcode | sqft_lot | sqft_living | sqft_above | sqft_basement | floors | bedrooms | bathrooms | waterfront | view | condition | grade | year_built | year_renovated | sqft_lot_15nn | sqft_living_15nn | price |
---|
i64 | i64 | i64 | i64 | i64 | i64 | i64 | i64 | i64 | f64 | i64 | f64 | i64 | i64 | i64 | i64 | i64 | i64 | i64 | i64 | i64 |
0 | 2014 | 5 | 2 | 98001 | 9397 | 2200 | 2200 | 0 | 2.0 | 4 | 2.5 | 0 | 1 | 3 | 8 | 1987 | 0 | 9176 | 2310 | 285000 |
1 | 2014 | 5 | 2 | 98003 | 10834 | 2090 | 1360 | 730 | 1.0 | 3 | 2.5 | 0 | 1 | 4 | 8 | 1987 | 0 | 8595 | 1750 | 285000 |
2 | 2014 | 5 | 2 | 98006 | 8119 | 2160 | 1080 | 1080 | 1.0 | 4 | 2.25 | 0 | 1 | 3 | 8 | 1966 | 0 | 9000 | 1850 | 440000 |
3 | 2014 | 5 | 2 | 98006 | 8800 | 1450 | 1450 | 0 | 1.0 | 4 | 1.0 | 0 | 1 | 4 | 7 | 1954 | 0 | 8942 | 1260 | 435000 |
4 | 2014 | 5 | 2 | 98006 | 10000 | 1920 | 1070 | 850 | 1.0 | 4 | 1.5 | 0 | 1 | 4 | 7 | 1954 | 0 | 10836 | 1450 | 430000 |
"
+ ],
+ "text/plain": [
+ "+-----+------+-------+-----+---+----------------+---------------+------------------+--------+\n",
+ "| id | year | month | day | … | year_renovated | sqft_lot_15nn | sqft_living_15nn | price |\n",
+ "| --- | --- | --- | --- | | --- | --- | --- | --- |\n",
+ "| i64 | i64 | i64 | i64 | | i64 | i64 | i64 | i64 |\n",
+ "+===========================================================================================+\n",
+ "| 0 | 2014 | 5 | 2 | … | 0 | 9176 | 2310 | 285000 |\n",
+ "| 1 | 2014 | 5 | 2 | … | 0 | 8595 | 1750 | 285000 |\n",
+ "| 2 | 2014 | 5 | 2 | … | 0 | 9000 | 1850 | 440000 |\n",
+ "| 3 | 2014 | 5 | 2 | … | 0 | 8942 | 1260 | 435000 |\n",
+ "| 4 | 2014 | 5 | 2 | … | 0 | 10836 | 1450 | 430000 |\n",
+ "+-----+------+-------+-----+---+----------------+---------------+------------------+--------+"
+ ]
+ },
+ "execution_count": 2,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
"source": [
"pricing_columns = (\n",
" # Removes columns \"latitude\" and \"longitude\" from table\n",
@@ -76,7 +144,9 @@
{
"cell_type": "markdown",
"metadata": {},
- "source": "See how to perform further data cleaning in the dedicated [Data Processing Tutorial](../data_processing)."
+ "source": [
+ "See how to perform further data cleaning in the dedicated [Data Processing Tutorial](../data_processing)."
+ ]
},
{
"cell_type": "markdown",
@@ -91,7 +161,7 @@
},
{
"cell_type": "code",
- "execution_count": null,
+ "execution_count": 3,
"metadata": {
"collapsed": false
},
@@ -111,7 +181,7 @@
},
{
"cell_type": "code",
- "execution_count": null,
+ "execution_count": 4,
"metadata": {
"collapsed": false
},
@@ -135,7 +205,7 @@
},
{
"cell_type": "code",
- "execution_count": null,
+ "execution_count": 5,
"metadata": {
"collapsed": false
},
@@ -159,11 +229,49 @@
},
{
"cell_type": "code",
- "execution_count": null,
+ "execution_count": 6,
"metadata": {
"collapsed": false
},
- "outputs": [],
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "\n",
+ "
shape: (15, 21)id | year | month | day | zipcode | sqft_lot | sqft_living | sqft_above | sqft_basement | floors | bedrooms | bathrooms | waterfront | view | condition | grade | year_built | year_renovated | sqft_lot_15nn | sqft_living_15nn | price |
---|
i64 | i64 | i64 | i64 | i64 | i64 | i64 | i64 | i64 | f64 | i64 | f64 | i64 | i64 | i64 | i64 | i64 | i64 | i64 | i64 | f64 |
10549 | 2014 | 10 | 13 | 98103 | 5000 | 1240 | 1000 | 240 | 1.0 | 2 | 1.0 | 0 | 1 | 3 | 7 | 1920 | 0 | 3500 | 1480 | 550661.111111 |
17590 | 2015 | 3 | 14 | 98002 | 7312 | 2010 | 2010 | 0 | 1.0 | 4 | 2.0 | 0 | 1 | 4 | 7 | 1976 | 0 | 7650 | 2010 | 269944.444444 |
10889 | 2014 | 10 | 17 | 98103 | 3220 | 1120 | 1120 | 0 | 1.0 | 2 | 1.0 | 0 | 1 | 4 | 7 | 1923 | 0 | 3220 | 1440 | 550661.111111 |
12511 | 2014 | 11 | 14 | 98144 | 2457 | 1950 | 1950 | 0 | 3.0 | 2 | 2.5 | 0 | 1 | 3 | 8 | 2009 | 0 | 1639 | 1650 | 334662.5 |
20572 | 2015 | 4 | 27 | 98056 | 5038 | 1220 | 1220 | 0 | 1.0 | 3 | 1.0 | 0 | 1 | 5 | 6 | 1942 | 0 | 5038 | 1140 | 195130.555556 |
… | … | … | … | … | … | … | … | … | … | … | … | … | … | … | … | … | … | … | … | … |
9097 | 2014 | 9 | 18 | 98003 | 7400 | 1130 | 1130 | 0 | 1.0 | 4 | 1.0 | 0 | 1 | 4 | 7 | 1969 | 0 | 7379 | 1540 | 223916.666667 |
8454 | 2014 | 9 | 8 | 98023 | 8470 | 840 | 840 | 0 | 1.0 | 3 | 1.0 | 0 | 1 | 4 | 6 | 1961 | 0 | 8450 | 840 | 109291.666667 |
7829 | 2014 | 8 | 26 | 98126 | 4025 | 820 | 820 | 0 | 1.0 | 2 | 1.0 | 0 | 3 | 5 | 6 | 1922 | 0 | 5750 | 1410 | 330190.0 |
19952 | 2015 | 4 | 17 | 98198 | 10187 | 1120 | 1120 | 0 | 1.0 | 3 | 1.75 | 0 | 1 | 3 | 7 | 1968 | 0 | 8736 | 1900 | 201880.0 |
18382 | 2015 | 3 | 26 | 98042 | 5929 | 2210 | 2210 | 0 | 2.0 | 4 | 2.5 | 0 | 1 | 3 | 8 | 2004 | 0 | 5901 | 2200 | 311491.666667 |
"
+ ],
+ "text/plain": [
+ "+-------+------+-------+-----+---+----------------+---------------+-----------------+--------------+\n",
+ "| id | year | month | day | … | year_renovated | sqft_lot_15nn | sqft_living_15n | price |\n",
+ "| --- | --- | --- | --- | | --- | --- | n | --- |\n",
+ "| i64 | i64 | i64 | i64 | | i64 | i64 | --- | f64 |\n",
+ "| | | | | | | | i64 | |\n",
+ "+==================================================================================================+\n",
+ "| 10549 | 2014 | 10 | 13 | … | 0 | 3500 | 1480 | 550661.11111 |\n",
+ "| 17590 | 2015 | 3 | 14 | … | 0 | 7650 | 2010 | 269944.44444 |\n",
+ "| 10889 | 2014 | 10 | 17 | … | 0 | 3220 | 1440 | 550661.11111 |\n",
+ "| 12511 | 2014 | 11 | 14 | … | 0 | 1639 | 1650 | 334662.50000 |\n",
+ "| 20572 | 2015 | 4 | 27 | … | 0 | 5038 | 1140 | 195130.55556 |\n",
+ "| … | … | … | … | … | … | … | … | … |\n",
+ "| 9097 | 2014 | 9 | 18 | … | 0 | 7379 | 1540 | 223916.66667 |\n",
+ "| 8454 | 2014 | 9 | 8 | … | 0 | 8450 | 840 | 109291.66667 |\n",
+ "| 7829 | 2014 | 8 | 26 | … | 0 | 5750 | 1410 | 330190.00000 |\n",
+ "| 19952 | 2015 | 4 | 17 | … | 0 | 8736 | 1900 | 201880.00000 |\n",
+ "| 18382 | 2015 | 3 | 26 | … | 0 | 5901 | 2200 | 311491.66667 |\n",
+ "+-------+------+-------+-----+---+----------------+---------------+-----------------+--------------+"
+ ]
+ },
+ "execution_count": 6,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
"source": [
"prediction = fitted_model.predict(testing_table)\n",
"# For visualisation purposes we only print out the first 15 rows.\n",
@@ -183,11 +291,22 @@
},
{
"cell_type": "code",
- "execution_count": null,
+ "execution_count": 7,
"metadata": {
"collapsed": false
},
- "outputs": [],
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "92598.0054595909"
+ ]
+ },
+ "execution_count": 7,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
"source": [
"fitted_model.mean_absolute_error(testing_table)"
]
@@ -201,9 +320,20 @@
},
{
"cell_type": "code",
- "execution_count": null,
+ "execution_count": 8,
"metadata": {},
- "outputs": [],
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "92541.16556653795"
+ ]
+ },
+ "execution_count": 8,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
"source": [
"from safeds.data.tabular.containers import Table\n",
"from safeds.ml.classical.regression import DecisionTreeRegressor\n",
diff --git a/src/safeds/ml/nn/_internal_model.py b/src/safeds/ml/nn/_internal_model.py
index cb1105503..4c4f42d45 100644
--- a/src/safeds/ml/nn/_internal_model.py
+++ b/src/safeds/ml/nn/_internal_model.py
@@ -10,7 +10,7 @@
from safeds._config import _init_default_device
from safeds.ml.nn.converters._input_converter_image import _InputConversionImage
-from safeds.ml.nn.layers import FlattenLayer, Layer
+from safeds.ml.nn.layers import DropoutLayer, FlattenLayer, Layer
from safeds.ml.nn.layers._pooling2d_layer import _Pooling2DLayer
if TYPE_CHECKING:
@@ -36,7 +36,7 @@ def __init__(self, input_conversion: InputConversion, layers: list[Layer], is_fo
layer._set_input_size(previous_output_size)
elif isinstance(input_conversion, _InputConversionImage):
layer._set_input_size(input_conversion._data_size)
- if isinstance(layer, FlattenLayer | _Pooling2DLayer):
+ if isinstance(layer, FlattenLayer | _Pooling2DLayer | DropoutLayer):
internal_layers.append(layer._get_internal_layer())
else:
internal_layers.append(layer._get_internal_layer(activation_function="relu"))
diff --git a/src/safeds/ml/nn/_model.py b/src/safeds/ml/nn/_model.py
index 3d9086d61..aa2927af2 100644
--- a/src/safeds/ml/nn/_model.py
+++ b/src/safeds/ml/nn/_model.py
@@ -27,7 +27,7 @@
ForwardLayer,
)
from safeds.ml.nn.layers._pooling2d_layer import _Pooling2DLayer
-from safeds.ml.nn.typing import ConstantImageSize, ModelImageSize, VariableImageSize
+from safeds.ml.nn.typing import ConstantImageSize, ModelImageSize, TensorShape, VariableImageSize
if TYPE_CHECKING:
from collections.abc import Callable
@@ -108,6 +108,21 @@ def __init__(
self._total_number_of_batches_done = 0
self._total_number_of_epochs_done = 0
+ def get_parameter_count(self) -> int:
+ if self._input_size is None:
+ raise ValueError("The input_size is not yet set.")
+
+ summand = 0
+ last_type = "int" if isinstance(self.input_size, int) else "ImageSize"
+ last_input_neurons = self.input_size if isinstance(self.input_size, int) else 0
+ last_input_channels = self.input_size.channel if isinstance(self.input_size, ModelImageSize) else 0
+ for layer in self._layers:
+ layer._set_input_size(last_input_neurons if last_type == "int" else last_input_channels)
+ summand += layer.get_parameter_count(TensorShape([last_input_neurons, last_input_channels]))
+ last_input_neurons = layer.output_size if isinstance(layer.output_size, int) else 0
+ last_input_channels = layer.output_size.channel if isinstance(layer.output_size, ModelImageSize) else 0
+ return summand
+
@staticmethod
def load_pretrained_model(huggingface_repo: str) -> NeuralNetworkRegressor: # pragma: no cover
"""
@@ -387,6 +402,21 @@ def __init__(
self._total_number_of_batches_done = 0
self._total_number_of_epochs_done = 0
+ def get_parameter_count(self) -> int:
+ if self._input_size is None:
+ raise ValueError("The input_size is not yet set.")
+
+ summand = 0
+ last_type = "int" if isinstance(self.input_size, int) else "ImageSize"
+ last_input_neurons = self.input_size if isinstance(self.input_size, int) else 0
+ last_input_channels = self.input_size.channel if isinstance(self.input_size, ModelImageSize) else 0
+ for layer in self._layers:
+ layer._set_input_size(last_input_neurons if last_type == "int" else last_input_channels)
+ summand += layer.get_parameter_count(TensorShape([last_input_neurons, last_input_channels]))
+ last_input_neurons = layer.output_size if isinstance(layer.output_size, int) else 0
+ last_input_channels = layer.output_size.channel if isinstance(layer.output_size, ModelImageSize) else 0
+ return summand
+
@staticmethod
def load_pretrained_model(huggingface_repo: str) -> NeuralNetworkClassifier: # pragma: no cover
"""
diff --git a/src/safeds/ml/nn/layers/_convolutional2d_layer.py b/src/safeds/ml/nn/layers/_convolutional2d_layer.py
index dd42f2d97..c59f02480 100644
--- a/src/safeds/ml/nn/layers/_convolutional2d_layer.py
+++ b/src/safeds/ml/nn/layers/_convolutional2d_layer.py
@@ -11,7 +11,7 @@
if TYPE_CHECKING:
from torch import nn
- from safeds.ml.nn.typing import ModelImageSize
+ from safeds.ml.nn.typing import ModelImageSize, TensorShape
class Convolutional2DLayer(Layer):
@@ -157,6 +157,9 @@ def __sizeof__(self) -> int:
+ sys.getsizeof(self._output_size)
)
+ def get_parameter_count(self, input_size: TensorShape) -> int:
+ return int((self._kernel_size * self._kernel_size * input_size._dims[1] + 1) * self._output_channel)
+
class ConvolutionalTranspose2DLayer(Convolutional2DLayer):
"""
@@ -261,3 +264,6 @@ def __eq__(self, other: object) -> bool:
def __sizeof__(self) -> int:
return sys.getsizeof(self._output_padding) + super().__sizeof__()
+
+ def get_parameter_count(self, input_size: TensorShape) -> int:
+ return int((self._kernel_size * self._kernel_size * input_size._dims[1] + 1) * self._output_channel)
diff --git a/src/safeds/ml/nn/layers/_dropout_layer.py b/src/safeds/ml/nn/layers/_dropout_layer.py
index 1814e4383..af857ef90 100644
--- a/src/safeds/ml/nn/layers/_dropout_layer.py
+++ b/src/safeds/ml/nn/layers/_dropout_layer.py
@@ -4,7 +4,7 @@
from safeds._utils import _structural_hash
from safeds._validation import _check_bounds, _ClosedBound
-from safeds.ml.nn.typing import ModelImageSize
+from safeds.ml.nn.typing import ModelImageSize, TensorShape
from ._layer import Layer
@@ -104,3 +104,6 @@ def __sizeof__(self) -> int:
return int(self._input_size)
elif isinstance(self._input_size, ModelImageSize):
return self._input_size.__sizeof__()
+
+ def get_parameter_count(self, input_size: TensorShape) -> int: # noqa: ARG002
+ return 0
diff --git a/src/safeds/ml/nn/layers/_flatten_layer.py b/src/safeds/ml/nn/layers/_flatten_layer.py
index a84551c2b..63a9e9cd2 100644
--- a/src/safeds/ml/nn/layers/_flatten_layer.py
+++ b/src/safeds/ml/nn/layers/_flatten_layer.py
@@ -4,7 +4,7 @@
from typing import TYPE_CHECKING, Any
from safeds._utils import _structural_hash
-from safeds.ml.nn.typing import ConstantImageSize
+from safeds.ml.nn.typing import ConstantImageSize, TensorShape
from ._layer import Layer
@@ -86,3 +86,6 @@ def __eq__(self, other: object) -> bool:
def __sizeof__(self) -> int:
return sys.getsizeof(self._input_size) + sys.getsizeof(self._output_size)
+
+ def get_parameter_count(self, input_size: TensorShape) -> int: # noqa: ARG002
+ return 0
diff --git a/src/safeds/ml/nn/layers/_forward_layer.py b/src/safeds/ml/nn/layers/_forward_layer.py
index e420b78ec..9f9335e5a 100644
--- a/src/safeds/ml/nn/layers/_forward_layer.py
+++ b/src/safeds/ml/nn/layers/_forward_layer.py
@@ -4,7 +4,7 @@
from safeds._utils import _structural_hash
from safeds._validation import _check_bounds, _ClosedBound
-from safeds.ml.nn.typing import ModelImageSize
+from safeds.ml.nn.typing import ModelImageSize, TensorShape
from ._layer import Layer
@@ -96,3 +96,6 @@ def __sizeof__(self) -> int:
import sys
return sys.getsizeof(self._input_size) + sys.getsizeof(self._output_size)
+
+ def get_parameter_count(self, input_size: TensorShape) -> int:
+ return (input_size._dims[0] + 1) * self._output_size
diff --git a/src/safeds/ml/nn/layers/_gru_layer.py b/src/safeds/ml/nn/layers/_gru_layer.py
index e74fec417..3ada7082b 100644
--- a/src/safeds/ml/nn/layers/_gru_layer.py
+++ b/src/safeds/ml/nn/layers/_gru_layer.py
@@ -5,7 +5,7 @@
from safeds._utils import _structural_hash
from safeds._validation import _check_bounds, _ClosedBound
-from safeds.ml.nn.typing import ModelImageSize
+from safeds.ml.nn.typing import ModelImageSize, TensorShape
from ._layer import Layer
@@ -95,3 +95,6 @@ def __eq__(self, other: object) -> bool:
def __sizeof__(self) -> int:
return sys.getsizeof(self._input_size) + sys.getsizeof(self._output_size)
+
+ def get_parameter_count(self, input_size: TensorShape) -> int:
+ return (input_size._dims[0] + self._output_size + 2) * self._output_size * 3
diff --git a/src/safeds/ml/nn/layers/_layer.py b/src/safeds/ml/nn/layers/_layer.py
index 058036688..a95155da1 100644
--- a/src/safeds/ml/nn/layers/_layer.py
+++ b/src/safeds/ml/nn/layers/_layer.py
@@ -6,7 +6,7 @@
if TYPE_CHECKING:
from torch import nn
- from safeds.ml.nn.typing import ModelImageSize
+ from safeds.ml.nn.typing import ModelImageSize, TensorShape
class Layer(ABC):
@@ -43,3 +43,7 @@ def __eq__(self, other: object) -> bool:
@abstractmethod
def __sizeof__(self) -> int:
pass # pragma: no cover
+
+ @abstractmethod
+ def get_parameter_count(self, input_size: TensorShape) -> int:
+ pass # pragma: no cover
diff --git a/src/safeds/ml/nn/layers/_lstm_layer.py b/src/safeds/ml/nn/layers/_lstm_layer.py
index 330809474..06825d8af 100644
--- a/src/safeds/ml/nn/layers/_lstm_layer.py
+++ b/src/safeds/ml/nn/layers/_lstm_layer.py
@@ -5,7 +5,7 @@
from safeds._utils import _structural_hash
from safeds._validation import _check_bounds, _ClosedBound
-from safeds.ml.nn.typing import ModelImageSize
+from safeds.ml.nn.typing import ModelImageSize, TensorShape
from ._layer import Layer
@@ -95,3 +95,6 @@ def __eq__(self, other: object) -> bool:
def __sizeof__(self) -> int:
return sys.getsizeof(self._input_size) + sys.getsizeof(self._output_size)
+
+ def get_parameter_count(self, input_size: TensorShape) -> int:
+ return (input_size._dims[0] + self._output_size + 2) * self._output_size * 4
diff --git a/src/safeds/ml/nn/layers/_pooling2d_layer.py b/src/safeds/ml/nn/layers/_pooling2d_layer.py
index d658ed848..7792b0044 100644
--- a/src/safeds/ml/nn/layers/_pooling2d_layer.py
+++ b/src/safeds/ml/nn/layers/_pooling2d_layer.py
@@ -11,7 +11,7 @@
if TYPE_CHECKING:
from torch import nn
- from safeds.ml.nn.typing import ModelImageSize
+ from safeds.ml.nn.typing import ModelImageSize, TensorShape
class _Pooling2DLayer(Layer):
@@ -134,6 +134,9 @@ def __sizeof__(self) -> int:
+ sys.getsizeof(self._padding)
)
+ def get_parameter_count(self, input_size: TensorShape) -> int: # noqa: ARG002
+ return 0
+
class MaxPooling2DLayer(_Pooling2DLayer):
"""
diff --git a/src/safeds/ml/nn/typing/__init__.py b/src/safeds/ml/nn/typing/__init__.py
index 913ad6f65..f7cef6927 100644
--- a/src/safeds/ml/nn/typing/__init__.py
+++ b/src/safeds/ml/nn/typing/__init__.py
@@ -6,6 +6,7 @@
if TYPE_CHECKING:
from ._model_image_size import ConstantImageSize, ModelImageSize, VariableImageSize
+ from ._tensor_shape import TensorShape
apipkg.initpkg(
__name__,
@@ -13,6 +14,7 @@
"ConstantImageSize": "._model_image_size:ConstantImageSize",
"ModelImageSize": "._model_image_size:ModelImageSize",
"VariableImageSize": "._model_image_size:VariableImageSize",
+ "TensorShape": "._tensor_shape:TensorShape",
},
)
@@ -20,4 +22,5 @@
"ConstantImageSize",
"ModelImageSize",
"VariableImageSize",
+ "TensorShape",
]
diff --git a/src/safeds/ml/nn/typing/_tensor_shape.py b/src/safeds/ml/nn/typing/_tensor_shape.py
new file mode 100644
index 000000000..f6c4b9d4f
--- /dev/null
+++ b/src/safeds/ml/nn/typing/_tensor_shape.py
@@ -0,0 +1,59 @@
+from __future__ import annotations
+
+from safeds._utils import _structural_hash
+from safeds._validation import _check_bounds, _ClosedBound
+
+
+class TensorShape:
+ """
+ Initializes a TensorShape object with the given dimensions.
+
+ Parameters
+ ----------
+ dims:
+ A list of integers where each integer represents
+ the size of the tensor in a particular dimension.
+ """
+
+ def __init__(self, dims: list[int]) -> None:
+ self._dims = dims
+
+ def get_size(self, dimension: int | None = None) -> int:
+ """
+ Return the size of the tensor in the specified dimension.
+
+ Parameters.
+ ----------
+ dimension:
+ The dimension index for which the size is to be retrieved.
+
+ Returns
+ -------
+ int: The size of the tensor in the specified dimension.
+
+ Raises
+ ------
+ OutOfBoundsError:
+ If the actual value is outside its expected range.
+ """
+ _check_bounds("dimension", dimension, lower_bound=_ClosedBound(0))
+ if dimension is not None and dimension >= self.dimensionality:
+ # TODO maybe add error message indicating that the dimension is out of range
+ return 0
+ if dimension is None:
+ return self._dims[0]
+ return self._dims[dimension]
+
+ def __hash__(self) -> int:
+ return _structural_hash(self._dims)
+
+ @property
+ def dimensionality(self) -> int:
+ """
+ Returns the number of dimensions of the tensor.
+
+ Returns
+ -------
+ int: The number of dimensions of the tensor.
+ """
+ return len(self._dims)
diff --git a/tests/safeds/ml/nn/layers/test_convolutional2d_layer.py b/tests/safeds/ml/nn/layers/test_convolutional2d_layer.py
index 4b8ac362e..f79595919 100644
--- a/tests/safeds/ml/nn/layers/test_convolutional2d_layer.py
+++ b/tests/safeds/ml/nn/layers/test_convolutional2d_layer.py
@@ -4,6 +4,7 @@
import pytest
from safeds.data.image.typing import ImageSize
from safeds.ml.nn.layers import Convolutional2DLayer, ConvolutionalTranspose2DLayer
+from safeds.ml.nn.typing import TensorShape
from torch import nn
@@ -157,6 +158,22 @@ def test_should_raise_if_input_size_is_set_with_int(
with pytest.raises(TypeError, match=r"The input_size of a convolution layer has to be of type ImageSize."):
layer._set_input_size(1)
+ def test_conv_get_parameter_count_returns_right_amount(self) -> None:
+ kernel_size = 5
+ input_channels = 3
+ output_channels = 3
+ expected_output = int((kernel_size * kernel_size * input_channels + 1) * output_channels)
+ layer = Convolutional2DLayer(input_channels, kernel_size)
+ assert layer.get_parameter_count(TensorShape([1, input_channels])) == expected_output
+
+ def test_conv_transposed_get_parameter_count_returns_right_amount(self) -> None:
+ kernel_size = 5
+ input_channels = 3
+ output_channels = 3
+ expected_output = int((kernel_size * kernel_size * input_channels + 1) * output_channels)
+ layer = ConvolutionalTranspose2DLayer(input_channels, kernel_size)
+ assert layer.get_parameter_count(TensorShape([1, input_channels])) == expected_output
+
class TestEq:
@pytest.mark.parametrize(
("conv2dlayer1", "conv2dlayer2"),
diff --git a/tests/safeds/ml/nn/layers/test_dropout_layer.py b/tests/safeds/ml/nn/layers/test_dropout_layer.py
index 5dad5f3a8..b9e47477b 100644
--- a/tests/safeds/ml/nn/layers/test_dropout_layer.py
+++ b/tests/safeds/ml/nn/layers/test_dropout_layer.py
@@ -4,7 +4,7 @@
from safeds.data.tabular.containers import Table
from safeds.exceptions import OutOfBoundsError
from safeds.ml.nn.layers import DropoutLayer
-from safeds.ml.nn.typing import ConstantImageSize
+from safeds.ml.nn.typing import ConstantImageSize, TensorShape
from torch import nn
@@ -43,6 +43,10 @@ def test_input_size_should_be_set(self) -> None:
with pytest.raises(ValueError, match=r"The input_size is not yet set."):
layer.__sizeof__()
+ def test_get_parameter_count_right_output(self) -> None:
+ layer = DropoutLayer(0.5)
+ assert layer.get_parameter_count(TensorShape([1])) == 0
+
class TestEq:
def test_should_be_equal(self) -> None:
diff --git a/tests/safeds/ml/nn/layers/test_flatten_layer.py b/tests/safeds/ml/nn/layers/test_flatten_layer.py
index 64db6127c..3adb15236 100644
--- a/tests/safeds/ml/nn/layers/test_flatten_layer.py
+++ b/tests/safeds/ml/nn/layers/test_flatten_layer.py
@@ -4,7 +4,7 @@
from safeds.data.image.typing import ImageSize
from safeds.data.tabular.containers import Table
from safeds.ml.nn.layers import FlattenLayer
-from safeds.ml.nn.typing import VariableImageSize
+from safeds.ml.nn.typing import TensorShape, VariableImageSize
from torch import nn
@@ -37,6 +37,10 @@ def test_should_raise_if_input_size_is_set_with_variable_image_size(self) -> Non
with pytest.raises(TypeError, match=r"The input_size of a flatten layer has to be a ConstantImageSize."):
layer._set_input_size(VariableImageSize(1, 2, 3))
+ def test_get_parameter_count_right_output(self) -> None:
+ layer = FlattenLayer()
+ assert layer.get_parameter_count(TensorShape([1])) == 0
+
class TestEq:
def test_should_be_equal(self) -> None:
assert FlattenLayer() == FlattenLayer()
diff --git a/tests/safeds/ml/nn/layers/test_forward_layer.py b/tests/safeds/ml/nn/layers/test_forward_layer.py
index 0ecd3bd05..1b250f5b7 100644
--- a/tests/safeds/ml/nn/layers/test_forward_layer.py
+++ b/tests/safeds/ml/nn/layers/test_forward_layer.py
@@ -5,6 +5,7 @@
from safeds.data.image.typing import ImageSize
from safeds.exceptions import OutOfBoundsError
from safeds.ml.nn.layers import ForwardLayer
+from safeds.ml.nn.typing import TensorShape
from torch import nn
# TODO: Should be tested on a model, not a layer, since input size gets inferred
@@ -177,3 +178,11 @@ def test_should_assert_that_different_forward_layers_have_different_hash(
)
def test_should_assert_that_layer_size_is_greater_than_normal_object(layer: ForwardLayer) -> None:
assert sys.getsizeof(layer) > sys.getsizeof(object())
+
+
+def test_conv_transposed_get_parameter_count_returns_right_amount() -> None:
+ input_neurons = 3
+ output_neurons = 3
+ expected_output = int((input_neurons + 1) * output_neurons)
+ layer = ForwardLayer(output_neurons)
+ assert layer.get_parameter_count(TensorShape([input_neurons])) == expected_output
diff --git a/tests/safeds/ml/nn/layers/test_gru_layer.py b/tests/safeds/ml/nn/layers/test_gru_layer.py
index 4a6f366e4..930805771 100644
--- a/tests/safeds/ml/nn/layers/test_gru_layer.py
+++ b/tests/safeds/ml/nn/layers/test_gru_layer.py
@@ -5,6 +5,7 @@
from safeds.data.image.typing import ImageSize
from safeds.exceptions import OutOfBoundsError
from safeds.ml.nn.layers import GRULayer
+from safeds.ml.nn.typing import TensorShape
from torch import nn
@@ -187,3 +188,11 @@ def test_internal_layer_should_raise_error() -> None:
layer = GRULayer(1)
with pytest.raises(ValueError, match="The input_size is not yet set."):
layer._get_internal_layer(activation_function="relu")
+
+
+def test_conv_transposed_get_parameter_count_returns_right_amount() -> None:
+ input_neurons = 4
+ output_neurons = 16
+ expected_output = int((input_neurons + output_neurons + 2) * output_neurons * 3)
+ layer = GRULayer(output_neurons)
+ assert layer.get_parameter_count(TensorShape([input_neurons])) == expected_output
diff --git a/tests/safeds/ml/nn/layers/test_lstm_layer.py b/tests/safeds/ml/nn/layers/test_lstm_layer.py
index 8d58e5dd8..2230d1839 100644
--- a/tests/safeds/ml/nn/layers/test_lstm_layer.py
+++ b/tests/safeds/ml/nn/layers/test_lstm_layer.py
@@ -5,6 +5,7 @@
from safeds.data.image.typing import ImageSize
from safeds.exceptions import OutOfBoundsError
from safeds.ml.nn.layers import LSTMLayer
+from safeds.ml.nn.typing import TensorShape
from torch import nn
# TODO: Should be tested on a model, not a layer, since input size gets inferred
@@ -177,3 +178,11 @@ def test_should_assert_that_different_forward_layers_have_different_hash(
)
def test_should_assert_that_layer_size_is_greater_than_normal_object(layer: LSTMLayer) -> None:
assert sys.getsizeof(layer) > sys.getsizeof(object())
+
+
+def test_conv_transposed_get_parameter_count_returns_right_amount() -> None:
+ input_neurons = 4
+ output_neurons = 16
+ expected_output = int((input_neurons + output_neurons + 2) * output_neurons * 4)
+ layer = LSTMLayer(output_neurons)
+ assert layer.get_parameter_count(TensorShape([input_neurons])) == expected_output
diff --git a/tests/safeds/ml/nn/layers/test_pooling2d_layer.py b/tests/safeds/ml/nn/layers/test_pooling2d_layer.py
index 4c5fcb6e3..e50d34221 100644
--- a/tests/safeds/ml/nn/layers/test_pooling2d_layer.py
+++ b/tests/safeds/ml/nn/layers/test_pooling2d_layer.py
@@ -6,6 +6,7 @@
from safeds.data.tabular.containers import Table
from safeds.ml.nn.layers import AveragePooling2DLayer, MaxPooling2DLayer
from safeds.ml.nn.layers._pooling2d_layer import _Pooling2DLayer
+from safeds.ml.nn.typing import TensorShape
from torch import nn
@@ -56,6 +57,17 @@ def test_should_raise_if_input_size_is_set_with_int(self, strategy: Literal["max
with pytest.raises(TypeError, match=r"The input_size of a pooling layer has to be of type ImageSize."):
layer._set_input_size(1)
+ @pytest.mark.parametrize(
+ "strategy",
+ [
+ "max",
+ "avg",
+ ],
+ )
+ def test_get_parameter_count_right_output(self, strategy: Literal["max", "avg"]) -> None:
+ layer = _Pooling2DLayer(strategy, 2, stride=2, padding=2)
+ assert layer.get_parameter_count(TensorShape([1])) == 0
+
class TestEq:
@pytest.mark.parametrize(
("pooling_2d_layer_1", "pooling_2d_layer_2"),
diff --git a/tests/safeds/ml/nn/test_model.py b/tests/safeds/ml/nn/test_model.py
index 43fc67aa6..d14358e3f 100644
--- a/tests/safeds/ml/nn/test_model.py
+++ b/tests/safeds/ml/nn/test_model.py
@@ -27,6 +27,7 @@
AveragePooling2DLayer,
Convolutional2DLayer,
ConvolutionalTranspose2DLayer,
+ DropoutLayer,
FlattenLayer,
ForwardLayer,
Layer,
@@ -514,6 +515,27 @@ def test_should_be_pickleable(self, device: Device) -> None:
# Should not raise
pickle.dumps(fitted_model)
+ def test_parameters_model_not_fitted(self, device: Device) -> None:
+ configure_test_with_device(device)
+ model = NeuralNetworkClassifier(
+ InputConversionTable(),
+ [ForwardLayer(neuron_count=8), DropoutLayer(0.5), ForwardLayer(neuron_count=1)],
+ )
+ with pytest.raises(ValueError, match=r"The input_size is not yet set."):
+ model.get_parameter_count()
+
+ def test_should_sum_parameters(self, device: Device) -> None:
+ configure_test_with_device(device)
+ expected_output = 16 + 0 + 9
+ model_fitted = NeuralNetworkClassifier(
+ InputConversionTable(),
+ [ForwardLayer(neuron_count=8), DropoutLayer(0.5), ForwardLayer(neuron_count=1)],
+ ).fit(
+ Table.from_dict({"a": [1, 1, 1], "b": [2, 2, 2]}).to_tabular_dataset("a"),
+ epoch_size=3,
+ )
+ assert expected_output == model_fitted.get_parameter_count()
+
@pytest.mark.parametrize("device", get_devices(), ids=get_devices_ids())
class TestRegressionModel:
@@ -879,3 +901,24 @@ def test_should_be_pickleable(self, device: Device) -> None:
# Should not raise
pickle.dumps(fitted_model)
+
+ def test_parameters_model_not_fitted(self, device: Device) -> None:
+ configure_test_with_device(device)
+ model = NeuralNetworkRegressor(
+ InputConversionTable(),
+ [ForwardLayer(neuron_count=8), DropoutLayer(0.5), ForwardLayer(neuron_count=1)],
+ )
+ with pytest.raises(ValueError, match=r"The input_size is not yet set."):
+ model.get_parameter_count()
+
+ def test_should_sum_parameters(self, device: Device) -> None:
+ configure_test_with_device(device)
+ expected_output = 16 + 0 + 9
+ model_fitted = NeuralNetworkRegressor(
+ InputConversionTable(),
+ [ForwardLayer(neuron_count=8), DropoutLayer(0.5), ForwardLayer(neuron_count=1)],
+ ).fit(
+ Table.from_dict({"a": [1, 1, 1], "b": [2, 2, 2]}).to_tabular_dataset("a"),
+ epoch_size=3,
+ )
+ assert expected_output == model_fitted.get_parameter_count()