Skip to content

Commit

Permalink
notebooks: rerun notebooks. (#1026)
Browse files Browse the repository at this point in the history
  • Loading branch information
nickfraser authored Sep 12, 2024
1 parent 10dcee3 commit 86afd5f
Show file tree
Hide file tree
Showing 7 changed files with 176 additions and 161 deletions.
95 changes: 51 additions & 44 deletions notebooks/01_quant_tensor_quant_conv2d_overview.ipynb

Large diffs are not rendered by default.

53 changes: 29 additions & 24 deletions notebooks/02_quant_activation_overview.ipynb

Large diffs are not rendered by default.

52 changes: 27 additions & 25 deletions notebooks/03_anatomy_of_a_quantizer.ipynb

Large diffs are not rendered by default.

57 changes: 31 additions & 26 deletions notebooks/Brevitas_TVMCon2021.ipynb

Large diffs are not rendered by default.

8 changes: 5 additions & 3 deletions notebooks/ONNX_export_tutorial.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@
"name": "stdout",
"output_type": "stream",
"text": [
"Requirement already satisfied: netron in /scratch/fabian/miniforge3/envs/torchgpu/lib/python3.11/site-packages (7.4.5)\n",
"Requirement already satisfied: netron in /proj/xlabs/users/nfraser/opt/miniforge3/envs/20231115_brv_pt1.13.1/lib/python3.10/site-packages (7.2.9)\r\n",
"Note: you may need to restart the kernel to use updated packages.\n"
]
}
Expand Down Expand Up @@ -555,7 +555,9 @@
"name": "stderr",
"output_type": "stream",
"text": [
"2024-03-06 02:12:47.492497092 [W:onnxruntime:, graph.cc:1283 Graph] Initializer linear.bias appears in graph inputs and will not be treated as constant value/weight. This may prevent some of the graph optimizations, like const folding. Move it out of graph inputs if there is no need to override it, by either re-generating the model with latest exporter/converter or with the tool onnxruntime/tools/python/remove_initializer_from_input.py.\n"
"2024-09-12 12:18:03.405472924 [W:onnxruntime:, graph.cc:1283 Graph] Initializer linear.bias appears in graph inputs and will not be treated as constant value/weight. This may prevent some of the graph optimizations, like const folding. Move it out of graph inputs if there is no need to override it, by either re-generating the model with latest exporter/converter or with the tool onnxruntime/tools/python/remove_initializer_from_input.py.\n",
"/proj/xlabs/users/nfraser/opt/miniforge3/envs/20231115_brv_pt1.13.1/lib/python3.10/site-packages/brevitas/nn/quant_linear.py:69: UserWarning: Defining your `__torch_function__` as a plain method is deprecated and will be an error in future, please define it as a classmethod. (Triggered internally at /opt/conda/conda-bld/pytorch_1670525541990/work/torch/csrc/utils/python_arg_parser.cpp:350.)\n",
" output_tensor = linear(x, quant_weight, quant_bias)\n"
]
}
],
Expand Down Expand Up @@ -771,7 +773,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.5"
"version": "3.10.13"
},
"vscode": {
"interpreter": {
Expand Down
26 changes: 15 additions & 11 deletions notebooks/minifloat_mx_tutorial.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -67,7 +67,10 @@
"name": "stderr",
"output_type": "stream",
"text": [
"/home/giuseppe/miniconda3/envs/brevitas_dev/lib/python3.11/site-packages/torch/nn/modules/conv.py:456: UserWarning: Defining your `__torch_function__` as a plain method is deprecated and will be an error in future, please define it as a classmethod. (Triggered internally at /opt/conda/conda-bld/pytorch_1712608853099/work/torch/csrc/utils/python_arg_parser.cpp:294.)\n",
"/proj/xlabs/users/nfraser/opt/miniforge3/envs/20231115_brv_pt1.13.1/lib/python3.10/site-packages/torch/_tensor.py:1255: UserWarning: Named tensors and all their associated APIs are an experimental feature and subject to change. Please do not use them for anything important until they are released as stable. (Triggered internally at /opt/conda/conda-bld/pytorch_1670525541990/work/c10/core/TensorImpl.h:1758.)\n",
" return super(Tensor, self).rename(names)\n",
"[W NNPACK.cpp:53] Could not initialize NNPACK! Reason: Unsupported hardware.\n",
"/proj/xlabs/users/nfraser/opt/miniforge3/envs/20231115_brv_pt1.13.1/lib/python3.10/site-packages/torch/nn/modules/conv.py:459: UserWarning: Defining your `__torch_function__` as a plain method is deprecated and will be an error in future, please define it as a classmethod. (Triggered internally at /opt/conda/conda-bld/pytorch_1670525541990/work/torch/csrc/utils/python_arg_parser.cpp:350.)\n",
" return F.conv2d(input, weight, bias, self.stride,\n"
]
}
Expand Down Expand Up @@ -152,7 +155,16 @@
"cell_type": "code",
"execution_count": 2,
"metadata": {},
"outputs": [],
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"/proj/xlabs/users/nfraser/opt/miniforge3/envs/20231115_brv_pt1.13.1/lib/python3.10/site-packages/brevitas/quant/solver/act.py:132: UserWarning: Group dim is being selected assuming batched input. Using unbatched input will fail and requires manually specification of group_dim\n",
" warn(\n"
]
}
],
"source": [
"from brevitas.quant_tensor import GroupwiseFloatQuantTensor\n",
"\n",
Expand Down Expand Up @@ -204,14 +216,6 @@
"Non padding weights shape torch.Size([64, 8, 3, 3])\n",
"Padded weights shape torch.Size([64, 32, 3, 3])\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"/home/giuseppe/miniconda3/envs/brevitas_dev/lib/python3.11/site-packages/torch/nn/modules/conv.py:456: UserWarning: Defining your `__torch_function__` as a plain method is deprecated and will be an error in future, please define it as a classmethod. (Triggered internally at /opt/conda/conda-bld/pytorch_1712608853099/work/torch/csrc/utils/python_arg_parser.cpp:294.)\n",
" return F.conv2d(input, weight, bias, self.stride,\n"
]
}
],
"source": [
Expand Down Expand Up @@ -353,7 +357,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.9"
"version": "3.10.13"
}
},
"nbformat": 4,
Expand Down
46 changes: 18 additions & 28 deletions notebooks/quantized_recurrent.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -116,7 +116,7 @@
"name": "stderr",
"output_type": "stream",
"text": [
"/scratch/fabian/brevitas/src/brevitas/nn/mixin/base.py:78: UserWarning: Keyword arguments are being passed but they not being used.\n",
"/proj/xlabs/users/nfraser/opt/miniforge3/envs/20231115_brv_pt1.13.1/lib/python3.10/site-packages/brevitas/nn/mixin/base.py:55: UserWarning: Keyword arguments are being passed but they not being used.\n",
" warn('Keyword arguments are being passed but they not being used.')\n"
]
}
Expand Down Expand Up @@ -384,14 +384,14 @@
"name": "stderr",
"output_type": "stream",
"text": [
"/scratch/fabian/brevitas/src/brevitas/nn/mixin/base.py:307: UserWarning: Defining your `__torch_function__` as a plain method is deprecated and will be an error in future, please define it as a classmethod. (Triggered internally at /opt/conda/conda-bld/pytorch_1699449183005/work/torch/csrc/utils/python_arg_parser.cpp:368.)\n",
"/proj/xlabs/users/nfraser/opt/miniforge3/envs/20231115_brv_pt1.13.1/lib/python3.10/site-packages/brevitas/nn/mixin/base.py:216: UserWarning: Defining your `__torch_function__` as a plain method is deprecated and will be an error in future, please define it as a classmethod. (Triggered internally at /opt/conda/conda-bld/pytorch_1670525541990/work/torch/csrc/utils/python_arg_parser.cpp:350.)\n",
" return torch.cat(outputs, dim=seq_dim)\n"
]
},
{
"data": {
"text/plain": [
"(QuantTensor(value=tensor([[[-0.0062, -0.2872, 0.7931, 0.4309, 0.5495, -0.4558, 0.2373,\n",
"(IntQuantTensor(value=tensor([[[-0.0062, -0.2872, 0.7931, 0.4309, 0.5495, -0.4558, 0.2373,\n",
" 0.6807, 0.4621, 0.6120, -0.1124, 0.3872, 0.3060, 0.7681,\n",
" -0.3684, 0.0437, -0.7369, -0.3247, 0.7743, 0.3372],\n",
" [ 0.5450, 0.2962, -0.3969, 0.3555, -0.5628, 0.2429, -0.4976,\n",
Expand Down Expand Up @@ -423,7 +423,7 @@
" -0.2664, 0.4923, 0.2143, -0.4170, 0.4112, 0.5502, 0.7066,\n",
" -0.6024, 0.7356, 0.0348, 0.1043, -0.1911, -0.4518]]],\n",
" grad_fn=<CatBackward0>), scale=tensor(0.0059, grad_fn=<DivBackward0>), zero_point=tensor(0.), bit_width=tensor(8.), signed_t=tensor(True), training_t=tensor(True)),\n",
" QuantTensor(value=tensor([[[ 0.0579, -0.0058, -0.4054, -0.1564, -0.5560, -0.3301, 0.3533,\n",
" IntQuantTensor(value=tensor([[[ 0.0579, -0.0058, -0.4054, -0.1564, -0.5560, -0.3301, 0.3533,\n",
" 0.0058, -0.1622, -0.3765, 0.1216, 0.0695, -0.4054, 0.0927,\n",
" 0.6139, -0.1390, 0.7066, 0.1274, 0.1622, -0.2896],\n",
" [-0.0290, -0.1738, 0.0695, 0.3765, 0.1738, 0.0579, -0.4054,\n",
Expand Down Expand Up @@ -462,14 +462,14 @@
"name": "stderr",
"output_type": "stream",
"text": [
"/scratch/fabian/miniforge3/envs/torchgpu/lib/python3.11/site-packages/torch/_tensor.py:1362: UserWarning: Named tensors and all their associated APIs are an experimental feature and subject to change. Please do not use them for anything important until they are released as stable. (Triggered internally at /opt/conda/conda-bld/pytorch_1699449183005/work/c10/core/TensorImpl.h:1900.)\n",
" return super().rename(names)\n"
"/proj/xlabs/users/nfraser/opt/miniforge3/envs/20231115_brv_pt1.13.1/lib/python3.10/site-packages/torch/_tensor.py:1255: UserWarning: Named tensors and all their associated APIs are an experimental feature and subject to change. Please do not use them for anything important until they are released as stable. (Triggered internally at /opt/conda/conda-bld/pytorch_1670525541990/work/c10/core/TensorImpl.h:1758.)\n",
" return super(Tensor, self).rename(names)\n"
]
},
{
"data": {
"text/plain": [
"(QuantTensor(value=tensor([[[ 0.2111, 0.1267, 0.0060, 0.6153, -0.7721, -0.3740, -0.5188,\n",
"(IntQuantTensor(value=tensor([[[ 0.2111, 0.1267, 0.0060, 0.6153, -0.7721, -0.3740, -0.5188,\n",
" 0.6273, 0.4162, 0.2051, 0.2292, 0.7239, 0.6032, 0.2533,\n",
" 0.5067, 0.6635, 0.1206, -0.5730, 0.0483, 0.3318],\n",
" [ 0.5742, 0.0194, -0.3807, -0.0710, -0.6000, 0.1807, 0.1355,\n",
Expand Down Expand Up @@ -501,7 +501,7 @@
" 0.4136, 0.5383, -0.3085, 0.4070, 0.4070, 0.6630, -0.0263,\n",
" 0.2823, -0.1510, 0.1313, -0.5186, 0.4464, -0.0066]]],\n",
" grad_fn=<CatBackward0>), scale=tensor(0.0062, grad_fn=<DivBackward0>), zero_point=tensor(0.), bit_width=tensor(8.), signed_t=tensor(True), training_t=tensor(True)),\n",
" QuantTensor(value=tensor([[[ 0.0000, -0.4004, 0.3151, -0.0263, -0.5842, -0.1641, -0.3939,\n",
" IntQuantTensor(value=tensor([[[ 0.0000, -0.4004, 0.3151, -0.0263, -0.5842, -0.1641, -0.3939,\n",
" 0.0263, -0.2429, 0.6499, -0.5186, 0.1247, -0.2101, 0.8337,\n",
" -0.1444, 0.6762, -0.1641, -0.5317, -0.1707, -0.0197],\n",
" [ 0.3479, 0.5974, -0.3939, 0.1444, -0.6762, 0.1969, -0.6499,\n",
Expand Down Expand Up @@ -538,7 +538,7 @@
{
"data": {
"text/plain": [
"(QuantTensor(value=tensor([[[-0.3777, -0.2074, 0.7184, 0.9110, 0.0148, -0.1926, -0.7110,\n",
"(IntQuantTensor(value=tensor([[[-0.3777, -0.2074, 0.7184, 0.9110, 0.0148, -0.1926, -0.7110,\n",
" 0.1926, -0.4222, -0.9480, 0.2592, 0.2222, -0.2370, -0.5407,\n",
" 0.5851, -0.2370, 0.3555, 0.1703, 0.4444, -0.2222],\n",
" [ 0.4814, -0.7355, -0.1605, 0.3878, -0.5282, 0.2073, 0.0000,\n",
Expand Down Expand Up @@ -570,7 +570,7 @@
" -0.3422, 0.8028, 0.0855, -0.7238, -0.6317, 0.2764, -0.0461,\n",
" -0.4211, -0.5988, 0.2632, 0.4014, -0.7501, -0.5659]]],\n",
" grad_fn=<CatBackward0>), scale=tensor(0.0069, grad_fn=<DivBackward0>), zero_point=tensor(0.), bit_width=tensor(8.), signed_t=tensor(True), training_t=tensor(True)),\n",
" QuantTensor(value=tensor([[[-0.0066, 0.4804, 0.0066, -0.1184, 0.6843, -0.0197, 0.1448,\n",
" IntQuantTensor(value=tensor([[[-0.0066, 0.4804, 0.0066, -0.1184, 0.6843, -0.0197, 0.1448,\n",
" 0.1842, 0.6383, -0.1908, -0.0066, -0.1053, -0.1316, 0.0461,\n",
" -0.0066, -0.2764, 0.3751, 0.3619, 0.5001, -0.1316],\n",
" [-0.6449, 0.5856, -0.0263, -0.0197, 0.8357, -0.5856, 0.0395,\n",
Expand Down Expand Up @@ -985,7 +985,7 @@
"name": "stderr",
"output_type": "stream",
"text": [
"2024-02-28 05:30:11.212034979 [W:onnxruntime:, graph.cc:1283 Graph] Initializer onnx::LSTM_93 appears in graph inputs and will not be treated as constant value/weight. This may prevent some of the graph optimizations, like const folding. Move it out of graph inputs if there is no need to override it, by either re-generating the model with latest exporter/converter or with the tool onnxruntime/tools/python/remove_initializer_from_input.py.\n"
"2024-09-12 12:18:52.692518968 [W:onnxruntime:, graph.cc:1283 Graph] Initializer onnx::LSTM_93 appears in graph inputs and will not be treated as constant value/weight. This may prevent some of the graph optimizations, like const folding. Move it out of graph inputs if there is no need to override it, by either re-generating the model with latest exporter/converter or with the tool onnxruntime/tools/python/remove_initializer_from_input.py.\n"
]
}
],
Expand Down Expand Up @@ -1084,7 +1084,7 @@
"name": "stderr",
"output_type": "stream",
"text": [
"2024-02-28 05:30:18.857576114 [W:onnxruntime:, graph.cc:1283 Graph] Initializer onnx::LSTM_87 appears in graph inputs and will not be treated as constant value/weight. This may prevent some of the graph optimizations, like const folding. Move it out of graph inputs if there is no need to override it, by either re-generating the model with latest exporter/converter or with the tool onnxruntime/tools/python/remove_initializer_from_input.py.\n"
"2024-09-12 12:18:53.086326293 [W:onnxruntime:, graph.cc:1283 Graph] Initializer onnx::LSTM_87 appears in graph inputs and will not be treated as constant value/weight. This may prevent some of the graph optimizations, like const folding. Move it out of graph inputs if there is no need to override it, by either re-generating the model with latest exporter/converter or with the tool onnxruntime/tools/python/remove_initializer_from_input.py.\n"
]
}
],
Expand Down Expand Up @@ -1115,7 +1115,7 @@
"name": "stderr",
"output_type": "stream",
"text": [
"/scratch/fabian/brevitas/src/brevitas/nn/mixin/base.py:78: UserWarning: Keyword arguments are being passed but they not being used.\n",
"/proj/xlabs/users/nfraser/opt/miniforge3/envs/20231115_brv_pt1.13.1/lib/python3.10/site-packages/brevitas/nn/mixin/base.py:55: UserWarning: Keyword arguments are being passed but they not being used.\n",
" warn('Keyword arguments are being passed but they not being used.')\n"
]
}
Expand Down Expand Up @@ -1191,7 +1191,7 @@
"name": "stderr",
"output_type": "stream",
"text": [
"/scratch/fabian/brevitas/src/brevitas/nn/mixin/base.py:78: UserWarning: Keyword arguments are being passed but they not being used.\n",
"/proj/xlabs/users/nfraser/opt/miniforge3/envs/20231115_brv_pt1.13.1/lib/python3.10/site-packages/brevitas/nn/mixin/base.py:55: UserWarning: Keyword arguments are being passed but they not being used.\n",
" warn('Keyword arguments are being passed but they not being used.')\n"
]
}
Expand Down Expand Up @@ -1268,7 +1268,7 @@
"name": "stderr",
"output_type": "stream",
"text": [
"/scratch/fabian/brevitas/src/brevitas/nn/mixin/base.py:78: UserWarning: Keyword arguments are being passed but they not being used.\n",
"/proj/xlabs/users/nfraser/opt/miniforge3/envs/20231115_brv_pt1.13.1/lib/python3.10/site-packages/brevitas/nn/mixin/base.py:55: UserWarning: Keyword arguments are being passed but they not being used.\n",
" warn('Keyword arguments are being passed but they not being used.')\n"
]
}
Expand Down Expand Up @@ -1345,7 +1345,7 @@
"name": "stderr",
"output_type": "stream",
"text": [
"/scratch/fabian/brevitas/src/brevitas/nn/mixin/base.py:78: UserWarning: Keyword arguments are being passed but they not being used.\n",
"/proj/xlabs/users/nfraser/opt/miniforge3/envs/20231115_brv_pt1.13.1/lib/python3.10/site-packages/brevitas/nn/mixin/base.py:55: UserWarning: Keyword arguments are being passed but they not being used.\n",
" warn('Keyword arguments are being passed but they not being used.')\n"
]
}
Expand Down Expand Up @@ -1420,17 +1420,7 @@
"cell_type": "code",
"execution_count": 32,
"metadata": {},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"[W shape_type_inference.cpp:1974] Warning: The shape inference of onnx.brevitas::QuantLSTMCell type is missing, so it may result in wrong shape inference for the exported graph. Please consider adding it in symbolic function. (function UpdateReliable)\n",
"[W shape_type_inference.cpp:1974] Warning: The shape inference of onnx.brevitas::QuantLSTMCell type is missing, so it may result in wrong shape inference for the exported graph. Please consider adding it in symbolic function. (function UpdateReliable)\n",
"[W shape_type_inference.cpp:1974] Warning: The shape inference of onnx.brevitas::QuantLSTMCell type is missing, so it may result in wrong shape inference for the exported graph. Please consider adding it in symbolic function. (function UpdateReliable)\n"
]
}
],
"outputs": [],
"source": [
"import torch\n",
"from brevitas.nn import QuantLSTM\n",
Expand Down Expand Up @@ -1566,7 +1556,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.5"
"version": "3.10.13"
},
"vscode": {
"interpreter": {
Expand Down

0 comments on commit 86afd5f

Please sign in to comment.