From 00ed4a7373fba9610d751dedb235f6153fec6d86 Mon Sep 17 00:00:00 2001 From: liuxinwei Date: Wed, 4 Dec 2024 15:33:52 +0800 Subject: [PATCH] =?UTF-8?q?=E6=B7=BB=E5=8A=A0=20pytorch=20=E7=94=9F?= =?UTF-8?q?=E6=80=81?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .github/workflows/pages.yml | 2 +- .gitignore | 1 + doc/datasets/cv/DIV2K.ipynb | 71 ++ doc/datasets/cv/index.md | 5 + doc/datasets/index.md | 5 + doc/ecosystem/Captum/index.md | 11 + doc/ecosystem/ExecuTorch/index.md | 5 + doc/ecosystem/LightlySSL/index.md | 5 + doc/ecosystem/OpenMMLab/index.md | 9 + .../OpenMMLab}/mmagic/a.py | 0 .../OpenMMLab}/mmagic/images/output.png | Bin .../OpenMMLab}/mmagic/index.md | 0 .../OpenMMLab}/mmagic/quick.ipynb | 0 .../OpenMMLab}/mmdesign/SRCNN.ipynb | 0 .../OpenMMLab}/mmdesign/env.py | 0 .../OpenMMLab}/mmdesign/index.md | 0 .../OpenMMLab}/mmengine/index.md | 0 .../OpenMMLab}/mmengine/set_env.py | 0 .../OpenMMLab}/mmengine/start.ipynb | 0 .../mmengine/tutorials/config.ipynb | 0 .../OpenMMLab}/mmengine/tutorials/index.md | 0 .../mmengine/tutorials/registry.ipynb | 0 .../mmengine/tutorials/text-config.ipynb | 0 doc/ecosystem/PyTorchVideo/index.md | 3 + doc/ecosystem/Ray/index.md | 7 + doc/ecosystem/Renate/index.md | 3 + doc/ecosystem/TorchDrug/index.md | 3 + doc/ecosystem/TorchIO/index.md | 3 + doc/ecosystem/TorchOpt/index.md | 7 + doc/ecosystem/depyf/index.md | 57 ++ doc/ecosystem/depyf/walk-through.ipynb | 415 +++++++++++ doc/ecosystem/flower/index.md | 3 + doc/ecosystem/index.md | 23 + doc/ecosystem/neural-compressor/index.md | 3 + doc/ecosystem/torchao/index.md | 7 + doc/ecosystem/torchtune/index.md | 5 + .../ultralytics/.gitignore | 0 .../ultralytics/guides/analytics.md | 0 .../ultralytics/guides/azureml-quickstart.md | 0 .../ultralytics/guides/conda-quickstart.md | 0 .../guides/coral-edge-tpu-on-raspberry-pi.md | 0 .../guides/data-collection-and-annotation.md | 0 .../guides/deepstream-nvidia-jetson.md | 0 .../guides/defining-project-goals.md | 0 .../ultralytics/guides/deploy.md | 0 .../guides/distance-calculation.md | 0 .../ultralytics/guides/docker-quickstart.md | 0 .../ultralytics/guides/edge.md | 0 .../ultralytics/guides/features.md | 0 .../ultralytics/guides/heatmaps.md | 0 .../guides/hyperparameter-tuning.md | 0 .../ultralytics/guides/index.md | 0 .../instance-segmentation-and-tracking.md | 0 .../ultralytics/guides/intro.md | 0 .../guides/isolating-segmentation-objects.md | 0 .../guides/kfold-cross-validation.md | 0 .../guides/model-deployment-options.md | 0 .../guides/model-deployment-practices.md | 0 .../guides/model-evaluation-insights.md | 0 .../model-monitoring-and-maintenance.md | 0 .../ultralytics/guides/model-testing.md | 0 .../ultralytics/guides/model-training-tips.md | 0 .../ultralytics/guides/nvidia-jetson.md | 0 .../ultralytics/guides/object-blurring.md | 0 .../ultralytics/guides/object-counting.md | 0 .../ultralytics/guides/object-cropping.md | 0 ...ng-openvino-latency-vs-throughput-modes.md | 0 .../ultralytics/guides/parking-management.md | 0 .../guides/preprocessing_annotated_data.md | 0 .../ultralytics/guides/project.md | 0 .../ultralytics/guides/queue-management.md | 0 .../ultralytics/guides/raspberry-pi.md | 0 .../ultralytics/guides/region-counting.md | 0 .../ultralytics/guides/ros-quickstart.md | 0 .../guides/sahi-tiled-inference.md | 0 .../guides/security-alarm-system.md | 0 .../ultralytics/guides/speed-estimation.md | 0 .../guides/steps-of-a-cv-project.md | 0 .../guides/streamlit-live-inference.md | 0 .../guides/triton-inference-server.md | 0 .../guides/view-results-in-terminal.md | 0 .../ultralytics/guides/vision-eye.md | 0 .../ultralytics/guides/workouts-monitoring.md | 0 .../ultralytics/guides/yolo-common-issues.md | 0 .../guides/yolo-performance-metrics.md | 0 .../guides/yolo-thread-safe-inference.md | 0 .../ultralytics/images/bus.jpg | Bin doc/{tools => ecosystem}/ultralytics/index.md | 0 .../ultralytics/intro.ipynb | 0 .../ultralytics/modes/benchmark.ipynb | 0 .../ultralytics/modes/export.ipynb | 0 .../ultralytics/modes/index.md | 0 .../ultralytics/modes/predict.ipynb | 0 .../ultralytics/modes/set_env.py | 0 .../ultralytics/modes/track.ipynb | 0 .../ultralytics/modes/train.ipynb | 0 .../ultralytics/modes/val.ipynb | 0 .../ultralytics/solutions/index.md | 0 .../ultralytics/tasks/classify.ipynb | 0 .../ultralytics/tasks/detect.ipynb | 0 .../ultralytics/tasks/index.md | 0 .../ultralytics/tasks/obb.ipynb | 0 .../ultralytics/tasks/pose.ipynb | 0 .../ultralytics/tasks/segment.ipynb | 0 .../ultralytics/tasks/set_env.py | 0 .../ultralytics/test.ipynb | 0 doc/ecosystem/usb/index.md | 3 + doc/index.md | 6 +- doc/news/index.md | 3 + doc/sr/SRCNN/code.ipynb | 680 +++++++++++++++++- doc/tools/index.md | 8 - pyproject.toml | 25 +- tests/env.py | 2 +- 113 files changed, 1338 insertions(+), 42 deletions(-) create mode 100644 doc/datasets/cv/DIV2K.ipynb create mode 100644 doc/datasets/cv/index.md create mode 100644 doc/datasets/index.md create mode 100644 doc/ecosystem/Captum/index.md create mode 100644 doc/ecosystem/ExecuTorch/index.md create mode 100644 doc/ecosystem/LightlySSL/index.md create mode 100644 doc/ecosystem/OpenMMLab/index.md rename doc/{tools => ecosystem/OpenMMLab}/mmagic/a.py (100%) rename doc/{tools => ecosystem/OpenMMLab}/mmagic/images/output.png (100%) rename doc/{tools => ecosystem/OpenMMLab}/mmagic/index.md (100%) rename doc/{tools => ecosystem/OpenMMLab}/mmagic/quick.ipynb (100%) rename doc/{tools => ecosystem/OpenMMLab}/mmdesign/SRCNN.ipynb (100%) rename doc/{tools => ecosystem/OpenMMLab}/mmdesign/env.py (100%) rename doc/{tools => ecosystem/OpenMMLab}/mmdesign/index.md (100%) rename doc/{tools => ecosystem/OpenMMLab}/mmengine/index.md (100%) rename doc/{tools => ecosystem/OpenMMLab}/mmengine/set_env.py (100%) rename doc/{tools => ecosystem/OpenMMLab}/mmengine/start.ipynb (100%) rename doc/{tools => ecosystem/OpenMMLab}/mmengine/tutorials/config.ipynb (100%) rename doc/{tools => ecosystem/OpenMMLab}/mmengine/tutorials/index.md (100%) rename doc/{tools => ecosystem/OpenMMLab}/mmengine/tutorials/registry.ipynb (100%) rename doc/{tools => ecosystem/OpenMMLab}/mmengine/tutorials/text-config.ipynb (100%) create mode 100644 doc/ecosystem/PyTorchVideo/index.md create mode 100644 doc/ecosystem/Ray/index.md create mode 100644 doc/ecosystem/Renate/index.md create mode 100644 doc/ecosystem/TorchDrug/index.md create mode 100644 doc/ecosystem/TorchIO/index.md create mode 100644 doc/ecosystem/TorchOpt/index.md create mode 100644 doc/ecosystem/depyf/index.md create mode 100644 doc/ecosystem/depyf/walk-through.ipynb create mode 100644 doc/ecosystem/flower/index.md create mode 100644 doc/ecosystem/index.md create mode 100644 doc/ecosystem/neural-compressor/index.md create mode 100644 doc/ecosystem/torchao/index.md create mode 100644 doc/ecosystem/torchtune/index.md rename doc/{tools => ecosystem}/ultralytics/.gitignore (100%) rename doc/{tools => ecosystem}/ultralytics/guides/analytics.md (100%) rename doc/{tools => ecosystem}/ultralytics/guides/azureml-quickstart.md (100%) rename doc/{tools => ecosystem}/ultralytics/guides/conda-quickstart.md (100%) rename doc/{tools => ecosystem}/ultralytics/guides/coral-edge-tpu-on-raspberry-pi.md (100%) rename doc/{tools => ecosystem}/ultralytics/guides/data-collection-and-annotation.md (100%) rename doc/{tools => ecosystem}/ultralytics/guides/deepstream-nvidia-jetson.md (100%) rename doc/{tools => ecosystem}/ultralytics/guides/defining-project-goals.md (100%) rename doc/{tools => ecosystem}/ultralytics/guides/deploy.md (100%) rename doc/{tools => ecosystem}/ultralytics/guides/distance-calculation.md (100%) rename doc/{tools => ecosystem}/ultralytics/guides/docker-quickstart.md (100%) rename doc/{tools => ecosystem}/ultralytics/guides/edge.md (100%) rename doc/{tools => ecosystem}/ultralytics/guides/features.md (100%) rename doc/{tools => ecosystem}/ultralytics/guides/heatmaps.md (100%) rename doc/{tools => ecosystem}/ultralytics/guides/hyperparameter-tuning.md (100%) rename doc/{tools => ecosystem}/ultralytics/guides/index.md (100%) rename doc/{tools => ecosystem}/ultralytics/guides/instance-segmentation-and-tracking.md (100%) rename doc/{tools => ecosystem}/ultralytics/guides/intro.md (100%) rename doc/{tools => ecosystem}/ultralytics/guides/isolating-segmentation-objects.md (100%) rename doc/{tools => ecosystem}/ultralytics/guides/kfold-cross-validation.md (100%) rename doc/{tools => ecosystem}/ultralytics/guides/model-deployment-options.md (100%) rename doc/{tools => ecosystem}/ultralytics/guides/model-deployment-practices.md (100%) rename doc/{tools => ecosystem}/ultralytics/guides/model-evaluation-insights.md (100%) rename doc/{tools => ecosystem}/ultralytics/guides/model-monitoring-and-maintenance.md (100%) rename doc/{tools => ecosystem}/ultralytics/guides/model-testing.md (100%) rename doc/{tools => ecosystem}/ultralytics/guides/model-training-tips.md (100%) rename doc/{tools => ecosystem}/ultralytics/guides/nvidia-jetson.md (100%) rename doc/{tools => ecosystem}/ultralytics/guides/object-blurring.md (100%) rename doc/{tools => ecosystem}/ultralytics/guides/object-counting.md (100%) rename doc/{tools => ecosystem}/ultralytics/guides/object-cropping.md (100%) rename doc/{tools => ecosystem}/ultralytics/guides/optimizing-openvino-latency-vs-throughput-modes.md (100%) rename doc/{tools => ecosystem}/ultralytics/guides/parking-management.md (100%) rename doc/{tools => ecosystem}/ultralytics/guides/preprocessing_annotated_data.md (100%) rename doc/{tools => ecosystem}/ultralytics/guides/project.md (100%) rename doc/{tools => ecosystem}/ultralytics/guides/queue-management.md (100%) rename doc/{tools => ecosystem}/ultralytics/guides/raspberry-pi.md (100%) rename doc/{tools => ecosystem}/ultralytics/guides/region-counting.md (100%) rename doc/{tools => ecosystem}/ultralytics/guides/ros-quickstart.md (100%) rename doc/{tools => ecosystem}/ultralytics/guides/sahi-tiled-inference.md (100%) rename doc/{tools => ecosystem}/ultralytics/guides/security-alarm-system.md (100%) rename doc/{tools => ecosystem}/ultralytics/guides/speed-estimation.md (100%) rename doc/{tools => ecosystem}/ultralytics/guides/steps-of-a-cv-project.md (100%) rename doc/{tools => ecosystem}/ultralytics/guides/streamlit-live-inference.md (100%) rename doc/{tools => ecosystem}/ultralytics/guides/triton-inference-server.md (100%) rename doc/{tools => ecosystem}/ultralytics/guides/view-results-in-terminal.md (100%) rename doc/{tools => ecosystem}/ultralytics/guides/vision-eye.md (100%) rename doc/{tools => ecosystem}/ultralytics/guides/workouts-monitoring.md (100%) rename doc/{tools => ecosystem}/ultralytics/guides/yolo-common-issues.md (100%) rename doc/{tools => ecosystem}/ultralytics/guides/yolo-performance-metrics.md (100%) rename doc/{tools => ecosystem}/ultralytics/guides/yolo-thread-safe-inference.md (100%) rename doc/{tools => ecosystem}/ultralytics/images/bus.jpg (100%) rename doc/{tools => ecosystem}/ultralytics/index.md (100%) rename doc/{tools => ecosystem}/ultralytics/intro.ipynb (100%) rename doc/{tools => ecosystem}/ultralytics/modes/benchmark.ipynb (100%) rename doc/{tools => ecosystem}/ultralytics/modes/export.ipynb (100%) rename doc/{tools => ecosystem}/ultralytics/modes/index.md (100%) rename doc/{tools => ecosystem}/ultralytics/modes/predict.ipynb (100%) rename doc/{tools => ecosystem}/ultralytics/modes/set_env.py (100%) rename doc/{tools => ecosystem}/ultralytics/modes/track.ipynb (100%) rename doc/{tools => ecosystem}/ultralytics/modes/train.ipynb (100%) rename doc/{tools => ecosystem}/ultralytics/modes/val.ipynb (100%) rename doc/{tools => ecosystem}/ultralytics/solutions/index.md (100%) rename doc/{tools => ecosystem}/ultralytics/tasks/classify.ipynb (100%) rename doc/{tools => ecosystem}/ultralytics/tasks/detect.ipynb (100%) rename doc/{tools => ecosystem}/ultralytics/tasks/index.md (100%) rename doc/{tools => ecosystem}/ultralytics/tasks/obb.ipynb (100%) rename doc/{tools => ecosystem}/ultralytics/tasks/pose.ipynb (100%) rename doc/{tools => ecosystem}/ultralytics/tasks/segment.ipynb (100%) rename doc/{tools => ecosystem}/ultralytics/tasks/set_env.py (100%) rename doc/{tools => ecosystem}/ultralytics/test.ipynb (100%) create mode 100644 doc/ecosystem/usb/index.md create mode 100644 doc/news/index.md delete mode 100644 doc/tools/index.md diff --git a/.github/workflows/pages.yml b/.github/workflows/pages.yml index 8b6d63b..d122b5b 100644 --- a/.github/workflows/pages.yml +++ b/.github/workflows/pages.yml @@ -42,7 +42,7 @@ jobs: sudo apt-get install graphviz pip install -U openmim mim install mmcv - pip install .[doc] + pip install .[doc,dev] pip install sphinxcontrib-websupport conda install -c conda-forge pandoc invoke doc diff --git a/.gitignore b/.gitignore index 1538987..c20b6e2 100644 --- a/.gitignore +++ b/.gitignore @@ -161,3 +161,4 @@ __pycache__/ *.npz *.torchscript *.engine +work_dirs/ \ No newline at end of file diff --git a/doc/datasets/cv/DIV2K.ipynb b/doc/datasets/cv/DIV2K.ipynb new file mode 100644 index 0000000..c1da47f --- /dev/null +++ b/doc/datasets/cv/DIV2K.ipynb @@ -0,0 +1,71 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# DIV2K 数据\n", + "\n", + "[DIV2K 数据集](https://data.vision.ee.ethz.ch/cvl/DIV2K/)是广泛用于图像超分辨率(Super-Resolution, SR)研究的高质量图像数据集。它由2K分辨率的图像组成,提供了高分辨率(HR)图像和相应的低分辨率(LR)图像,用于训练和测试超分辨率算法。\n", + "\n", + "## 数据集概述\n", + "\n", + "- **名称**:DIV2K\n", + "- **全称**:DIVerse 2K resolution high quality images\n", + "- **发布者**:NTIRE(New Trends in Image Restoration and Enhancement)Workshop\n", + "- **发布年份**:2017\n", + "- **用途**:图像超分辨率研究\n", + "\n", + "### 数据集组成\n", + "\n", + "DIV2K 数据集包含以下部分:\n", + "\n", + "1. **训练集**:\n", + " - 800张高分辨率(HR)图像。\n", + " - 对应的低分辨率(LR)图像,通过不同的降采样方法生成(如双三次插值、双线性插值等)。\n", + "\n", + "2. **验证集**:\n", + " - 100张高分辨率(HR)图像。\n", + " - 对应的低分辨率(LR)图像。\n", + "\n", + "3. **测试集**:\n", + " - 100张高分辨率(HR)图像。\n", + " - 对应的低分辨率(LR)图像。\n", + "\n", + "### 数据集特点\n", + "\n", + "- **高分辨率**:所有图像的分辨率为2K(2048x1080或类似分辨率)。\n", + "- **多样性**:图像内容多样,包括自然场景、人像、建筑、纹理等。\n", + "- **高质量**:图像质量高,适合用于训练和评估超分辨率算法。" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "ai", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.12.7" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/doc/datasets/cv/index.md b/doc/datasets/cv/index.md new file mode 100644 index 0000000..50cc151 --- /dev/null +++ b/doc/datasets/cv/index.md @@ -0,0 +1,5 @@ +# 计算机视觉数据 + +```{toctree} +DIV2K +``` diff --git a/doc/datasets/index.md b/doc/datasets/index.md new file mode 100644 index 0000000..0246a4b --- /dev/null +++ b/doc/datasets/index.md @@ -0,0 +1,5 @@ +# 数据集 + +```{toctree} +cv/index +``` diff --git a/doc/ecosystem/Captum/index.md b/doc/ecosystem/Captum/index.md new file mode 100644 index 0000000..792c8df --- /dev/null +++ b/doc/ecosystem/Captum/index.md @@ -0,0 +1,11 @@ +# Captum + +[Captum](https://captum.ai/)(拉丁语中意为“理解”)是开源的、可扩展的库,基于 PyTorch 构建模型的解释性。随着模型复杂度的增加以及由此带来的透明度缺失,模型解释方法变得越来越重要。无论是在研究领域还是在使用机器学习的各个行业中的实际应用方面,理解模型都成为了活跃的研究主题和关注焦点。Captum 提供了最先进的算法,包括集成梯度法,以便研究人员和开发者能够轻松理解哪些特征对模型输出有所贡献。 + +Captum 帮助机器学习研究者更容易地实现可以与 PyTorch 模型交互的解释性算法。它还允许研究人员快速将他们的工作与库中其他现有算法进行基准对比。对于模型开发者来说,Captum 可以用来通过促进识别不同特征来改进和排查模型,这些特征有助于模型的输出,从而设计出更好的模型并解决意外的模型输出问题。 + +## 目标受众 + +Captum 的主要受众是那些希望改进其模型并了解哪些特征重要的模型开发者,以及专注于识别能够更好地解释多种类型模型的算法的解释性研究者。 + +Captum 也可以被使用训练有素模型进行生产的应用程序工程师所使用。Captum 通过提高模型的解释性,提供了更简单的故障排除方式,并有可能向最终用户更好地解释为什么他们会看到特定的内容,例如电影推荐。 diff --git a/doc/ecosystem/ExecuTorch/index.md b/doc/ecosystem/ExecuTorch/index.md new file mode 100644 index 0000000..d52229b --- /dev/null +++ b/doc/ecosystem/ExecuTorch/index.md @@ -0,0 +1,5 @@ +# ExecuTorch + +[ExecuTorch](https://pytorch.org/executorch/stable/index.html) 平台提供了基础设施,使 PyTorch 程序能够从增强现实/虚拟现实可穿戴设备到标准的 iOS 和 Android 移动设备上运行。 ExecuTorch 的主要目标之一是增强 PyTorch 程序的自定义和部署能力。 + +ExecuTorch 高度依赖于 PyTorch 技术,如 {func}`torch.compile` 和 {func}`torch.export`。 diff --git a/doc/ecosystem/LightlySSL/index.md b/doc/ecosystem/LightlySSL/index.md new file mode 100644 index 0000000..0a7ca73 --- /dev/null +++ b/doc/ecosystem/LightlySSL/index.md @@ -0,0 +1,5 @@ +# LightlySSL + +[LightlySSL](https://docs.lightly.ai/self-supervised-learning/) 是用于自监督学习的计算机视觉框架。 + +通过 LightlySSL,您可以使用自监督的方式训练深度学习模型。这意味着您无需任何标签即可进行模型训练。LightlySSL 旨在帮助您理解和处理大型无标注数据集。它是基于 PyTorch 构建的,因此与 Fast.ai 等其他框架完全兼容。 \ No newline at end of file diff --git a/doc/ecosystem/OpenMMLab/index.md b/doc/ecosystem/OpenMMLab/index.md new file mode 100644 index 0000000..6884a5b --- /dev/null +++ b/doc/ecosystem/OpenMMLab/index.md @@ -0,0 +1,9 @@ +# OpenMMLab + +项目初始化 `cd ../../tests && . init_openmmlab.sh` + +```{toctree} +mmengine/index +mmagic/index +mmdesign/index +``` diff --git a/doc/tools/mmagic/a.py b/doc/ecosystem/OpenMMLab/mmagic/a.py similarity index 100% rename from doc/tools/mmagic/a.py rename to doc/ecosystem/OpenMMLab/mmagic/a.py diff --git a/doc/tools/mmagic/images/output.png b/doc/ecosystem/OpenMMLab/mmagic/images/output.png similarity index 100% rename from doc/tools/mmagic/images/output.png rename to doc/ecosystem/OpenMMLab/mmagic/images/output.png diff --git a/doc/tools/mmagic/index.md b/doc/ecosystem/OpenMMLab/mmagic/index.md similarity index 100% rename from doc/tools/mmagic/index.md rename to doc/ecosystem/OpenMMLab/mmagic/index.md diff --git a/doc/tools/mmagic/quick.ipynb b/doc/ecosystem/OpenMMLab/mmagic/quick.ipynb similarity index 100% rename from doc/tools/mmagic/quick.ipynb rename to doc/ecosystem/OpenMMLab/mmagic/quick.ipynb diff --git a/doc/tools/mmdesign/SRCNN.ipynb b/doc/ecosystem/OpenMMLab/mmdesign/SRCNN.ipynb similarity index 100% rename from doc/tools/mmdesign/SRCNN.ipynb rename to doc/ecosystem/OpenMMLab/mmdesign/SRCNN.ipynb diff --git a/doc/tools/mmdesign/env.py b/doc/ecosystem/OpenMMLab/mmdesign/env.py similarity index 100% rename from doc/tools/mmdesign/env.py rename to doc/ecosystem/OpenMMLab/mmdesign/env.py diff --git a/doc/tools/mmdesign/index.md b/doc/ecosystem/OpenMMLab/mmdesign/index.md similarity index 100% rename from doc/tools/mmdesign/index.md rename to doc/ecosystem/OpenMMLab/mmdesign/index.md diff --git a/doc/tools/mmengine/index.md b/doc/ecosystem/OpenMMLab/mmengine/index.md similarity index 100% rename from doc/tools/mmengine/index.md rename to doc/ecosystem/OpenMMLab/mmengine/index.md diff --git a/doc/tools/mmengine/set_env.py b/doc/ecosystem/OpenMMLab/mmengine/set_env.py similarity index 100% rename from doc/tools/mmengine/set_env.py rename to doc/ecosystem/OpenMMLab/mmengine/set_env.py diff --git a/doc/tools/mmengine/start.ipynb b/doc/ecosystem/OpenMMLab/mmengine/start.ipynb similarity index 100% rename from doc/tools/mmengine/start.ipynb rename to doc/ecosystem/OpenMMLab/mmengine/start.ipynb diff --git a/doc/tools/mmengine/tutorials/config.ipynb b/doc/ecosystem/OpenMMLab/mmengine/tutorials/config.ipynb similarity index 100% rename from doc/tools/mmengine/tutorials/config.ipynb rename to doc/ecosystem/OpenMMLab/mmengine/tutorials/config.ipynb diff --git a/doc/tools/mmengine/tutorials/index.md b/doc/ecosystem/OpenMMLab/mmengine/tutorials/index.md similarity index 100% rename from doc/tools/mmengine/tutorials/index.md rename to doc/ecosystem/OpenMMLab/mmengine/tutorials/index.md diff --git a/doc/tools/mmengine/tutorials/registry.ipynb b/doc/ecosystem/OpenMMLab/mmengine/tutorials/registry.ipynb similarity index 100% rename from doc/tools/mmengine/tutorials/registry.ipynb rename to doc/ecosystem/OpenMMLab/mmengine/tutorials/registry.ipynb diff --git a/doc/tools/mmengine/tutorials/text-config.ipynb b/doc/ecosystem/OpenMMLab/mmengine/tutorials/text-config.ipynb similarity index 100% rename from doc/tools/mmengine/tutorials/text-config.ipynb rename to doc/ecosystem/OpenMMLab/mmengine/tutorials/text-config.ipynb diff --git a/doc/ecosystem/PyTorchVideo/index.md b/doc/ecosystem/PyTorchVideo/index.md new file mode 100644 index 0000000..9ab667c --- /dev/null +++ b/doc/ecosystem/PyTorchVideo/index.md @@ -0,0 +1,3 @@ +# PyTorchVideo + +[PyTorchVideo](https://pytorchvideo.org/) 是开源的视频理解库,它提供了最新的构建器,用于最先进的视频理解主干、层、头部和损失函数,以应对不同的任务。这些任务包括声学事件检测、动作识别(视频分类)、动作检测(视频检测)、多模态理解(声音视觉分类)以及自监督学习。 \ No newline at end of file diff --git a/doc/ecosystem/Ray/index.md b/doc/ecosystem/Ray/index.md new file mode 100644 index 0000000..7c04110 --- /dev/null +++ b/doc/ecosystem/Ray/index.md @@ -0,0 +1,7 @@ +# Ray + +[Ray](https://docs.ray.io/en/latest/index.html) 是开源的统一框架,用于扩展 AI 和 Python 应用程序,例如机器学习。它提供了并行处理的计算层,因此您无需成为分布式系统专家。Ray 通过以下组件最小化了运行分布式个体和端到端机器学习工作流程的复杂性: + +- 可扩展的库,用于常见的机器学习任务,如数据预处理、分布式训练、超参数调整、强化学习和模型服务。 +- Pythonic 分布式计算原语,用于并行化和扩展 Python 应用程序。 +- 集成和实用工具,用于将 Ray 集群与现有工具和基础设施(如 Kubernetes、AWS、GCP 和 Azure)集成和部署。 diff --git a/doc/ecosystem/Renate/index.md b/doc/ecosystem/Renate/index.md new file mode 100644 index 0000000..eaf0c77 --- /dev/null +++ b/doc/ecosystem/Renate/index.md @@ -0,0 +1,3 @@ +# Renate + +[Renate](https://renate.readthedocs.io/en/latest/) Python 包,用于自动重新训练神经网络模型。它采用先进的持续学习和终身学习算法来实现这一目标。其实现基于 PyTorch 和 Lightning 进行深度学习,以及使用 [Syne Tune](https://github.com/awslabs/syne-tune) 进行超参数优化。 diff --git a/doc/ecosystem/TorchDrug/index.md b/doc/ecosystem/TorchDrug/index.md new file mode 100644 index 0000000..d46c93a --- /dev/null +++ b/doc/ecosystem/TorchDrug/index.md @@ -0,0 +1,3 @@ +# TorchDrug + +[TorchDrug](https://torchdrug.ai/) 是专为药物发现设计的机器学习平台,它融合了图机器学习(包括图神经网络、几何深度学习和知识图谱)、深度生成模型以及强化学习等多种技术。该平台提供了全面而灵活的接口,支持在 PyTorch 中快速构建药物发现模型原型。 diff --git a/doc/ecosystem/TorchIO/index.md b/doc/ecosystem/TorchIO/index.md new file mode 100644 index 0000000..e1c09eb --- /dev/null +++ b/doc/ecosystem/TorchIO/index.md @@ -0,0 +1,3 @@ +# TorchIO + +[TorchIO](https://torchio.readthedocs.io/index.html) 是开源的 Python 库,用于深度学习中3D医学图像的有效加载、预处理、增强和基于区块的采样,该库遵循 PyTorch 的设计。 \ No newline at end of file diff --git a/doc/ecosystem/TorchOpt/index.md b/doc/ecosystem/TorchOpt/index.md new file mode 100644 index 0000000..fa8c758 --- /dev/null +++ b/doc/ecosystem/TorchOpt/index.md @@ -0,0 +1,7 @@ +# TorchOpt + +[TorchOpt](https://torchopt.readthedocs.io/en/latest/index.html) 是基于 PyTorch 构建的高效可微分优化库。TorchOpt 具有以下特点: + +- 全面性:TorchOpt 提供了三种不同的微分模式——显式微分、隐式微分和零阶微分,以应对不同的可微分优化情境。 +- 灵活性:TorchOpt 为用户提供了函数式和面向对象两种 API 风格,以满足用户的不同偏好。用户可以选择类似 JAX 或 PyTorch 的风格来实现可微分优化。 +- 高效性:TorchOpt 提供了(1)CPU/GPU加速的可微分优化器;(2)基于RPC的分布式训练框架;(3)快速树操作,这些功能极大地提高了双层优化问题的训练效率。 \ No newline at end of file diff --git a/doc/ecosystem/depyf/index.md b/doc/ecosystem/depyf/index.md new file mode 100644 index 0000000..e266b9c --- /dev/null +++ b/doc/ecosystem/depyf/index.md @@ -0,0 +1,57 @@ +# depyf + +在了解 [`depyf`](https://depyf.readthedocs.io/en/latest/) 的使用方法之前,推荐您先阅读 [`torch.compile` 示例教程](walk-through),以便理解 `depyf` 如何帮助您。 + +`depyf` 旨在解决 {func}`torch.compile` 的两个痛点: + +1. {func}`torch.compile` 转换 Python 字节码,但很少有开发者能读懂 Python 字节码(除非你的大脑里有一台堆栈机……),从而理解发生了什么。`depyf` 帮助将转换后的字节码反编译回 Python 源代码,使开发者能够理解 {func}`torch.compile` 是如何转换他们的代码的。这极大地帮助用户调整他们的代码以适应 {func}`torch.compile`,使他们能够编写对 {func}`torch.compile` 友好的代码。 +2. {func}`torch.compile` 动态生成许多函数,这些函数只能作为黑盒子运行。用户无法逐行调试代码。`depyf` 帮助将源代码导出到文件中,并将这些函数与源代码文件链接起来,这样用户就可以使用调试器逐行调试这些函数了。这极大地帮助用户理解 {func}`torch.compile` 并在训练过程中调试如 `NaN` 等问题。 + +采用从教程示例中的工作流程:![](https://depyf.readthedocs.io/en/latest/_images/dynamo-workflow-with-depyf.svg) + +`depyf` 有助于: + +- 提供上述工作流程的源代码描述,以便用户能够轻松理解。(实际的工作流程发生在 C 语言中,并在 CPython 解释器内进行,提供 Python 源代码描述的工作流程,以便用户可以更容易地理解。) +- 生成转换后的字节码和恢复函数的源代码。 +- 将计算图计算函数与磁盘上的代码链接起来,以便调试器可以逐步执行代码。 + +`depyf` 的主要用途涉及两个上下文管理器,建议在调试器中启动脚本: + +```python +import torch + +@torch.compile +def function(inputs): + x = inputs["x"] + y = inputs["y"] + x = x.cos().cos() + if x.mean() > 0.5: + x = x / 1.1 + return x * y + +shape_10_inputs = {"x": torch.randn(10, requires_grad=True), "y": torch.randn(10, requires_grad=True)} +shape_8_inputs = {"x": torch.randn(8, requires_grad=True), "y": torch.randn(8, requires_grad=True)} + +import depyf +with depyf.prepare_debug("./debug_dir"): + # warmup + for i in range(100): + output = function(shape_10_inputs) + output = function(shape_8_inputs) +# the program will pause here for you to set breakpoints +# then you can hit breakpoints when running the function +with depyf.debug(): + output = function(shape_10_inputs) +``` + +第一个上下文管理器 {func}`depyf.prepare_debug` 接受一个目录路径作为参数,用于将所有源代码转储至该目录。在这个上下文管理器中,PyTorch 的所有内部细节将被 `depyf` 挂钩,它会自动为你转储必要的源代码。 + +第二个上下文管理器 {func}`depyf.debug` 无需任何参数,它仅禁用新的编译条目。一旦进入此上下文管理器,程序将会暂停,你可以浏览指定目录下(本例中为 `"./debug_dir"`)的所有源代码。入口文件是 `full_code_for_xxx.py`。你可以在这些文件中设置断点。最重要的是,你在这个上下文管理器下设置的断点可以被命中。你可以逐行调试代码,以排查可能的 `NaN` 值或理解你的代码发生了什么。 + +下图展示了 `depyf` 的两个典型用法,并列出了所有生成的文件。![](https://depyf.readthedocs.io/en/latest/_images/usage.svg) + +```{toctree} +:hidden: + +walk-through +``` \ No newline at end of file diff --git a/doc/ecosystem/depyf/walk-through.ipynb b/doc/ecosystem/depyf/walk-through.ipynb new file mode 100644 index 0000000..93455a1 --- /dev/null +++ b/doc/ecosystem/depyf/walk-through.ipynb @@ -0,0 +1,415 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# PyTorch编译器示例教程\n", + "\n", + "本教程旨在涵盖 PyTorch 编译器的以下几个方面:\n", + "\n", + "- 基本概念(即时(Just-In-Time)编译器,提前(Ahead-of-time)编译器)\n", + "- Dynamo(图捕获,将用户的代码分为纯 Python 代码和纯 PyTorch 相关代码)\n", + "- AOTAutograd(从正向计算图中生成反向计算图)\n", + "- Inductor/其他后端(给定计算图,如何在不同的设备上更快地运行它)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "这些组件将根据不同的后端选项被调用:\n", + "\n", + "- 当只使用 Dynamo 时,使用 `torch.compile(backend=\"eager\")`。\n", + "- 当使用 Dynamo 和 AOTAutograd 时,使用 `torch.compile(backend=\"aot_eager\")`。\n", + "- 默认情况下,使用 `torch.compile(backend=\"inductor\")`,这意味着同时使用 Dynamo、AOTAutograd 以及 PyTorch 内置的图优化后端 Inductor。" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## PyTorch 编译器是即时编译器\n", + "\n", + "首先需要了解的概念是,PyTorch 编译器是一种即时编译器(Just-In-Time)。那么,即时编译器是什么意思呢?来看例子:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import torch\n", + "\n", + "class A(torch.nn.Module):\n", + " def forward(self, x):\n", + " return torch.exp(2 * x)\n", + "\n", + "class B(torch.nn.Module):\n", + " def forward(self, x):\n", + " return torch.exp(-x)\n", + "\n", + "def f(x, mod):\n", + " y = mod(x)\n", + " z = torch.log(y)\n", + " return z\n", + "\n", + "# users might use\n", + "# mod = A()\n", + "# x = torch.randn(5, 5, 5)\n", + "# output = f(x, mod)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "编写了函数 `f`,它包含模块调用,该调用将执行 `mod.forward`,以及 `torch.log` 调用。由于众所周知的代数简化恒等式 $\\log(\\exp(a\\times x))=a\\times x$,迫不及待地想要优化代码如下:" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [], + "source": [ + "def f(x, mod):\n", + " if isinstance(mod, A):\n", + " return 2 * x\n", + " elif isinstance(mod, B):\n", + " return -x" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "可以将其称为我们的第一个编译器,尽管它是由我们的大脑而不是自动化程序编译的。\n", + "\n", + "如果希望更加严谨,那么编译器示例应该更新如下:" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [], + "source": [ + "def f(x, mod):\n", + " if isinstance(x, torch.Tensor) and isinstance(mod, A):\n", + " return 2 * x\n", + " elif isinstance(x, torch.Tensor) and isinstance(mod, B):\n", + " return -x\n", + " else:\n", + " y = mod(x)\n", + " z = torch.log(y)\n", + " return z" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "需要检查每个参数,以确保优化条件是合理的,如果未能优化代码,还需要回退到原始代码。\n", + "\n", + "这引出了即时编译器中的两个基本概念:守卫和转换代码。**守卫**是函数可以被优化的条件,而 **转换代码** 则是在满足守卫条件下的函数优化版本。在上面简单的编译器示例中,`isinstance(mod, A)` 就是守卫,而 `return 2 * x` 则是相应的转换代码,它在守卫条件下与原始代码等效,但执行速度要快得多。\n", + "\n", + "上述例子是提前编译的编译器:检查所有可用的源代码,并在运行任何函数(即提前)之前,根据所有可能的守卫和转换代码编写优化后的函数。\n", + "\n", + "另一类编译器是即时编译器:就在函数执行之前,它会分析是否可以对执行进行优化,以及在什么条件下可以对函数执行进行优化。希望这个条件足够通用,以适应新的输入,从而使即时编译的好处大于成本。如果所有条件都失败,它将尝试在新的条件下优化代码。\n", + "\n", + "即时编译器的基本工作流程应该如下所示:\n", + "\n", + "```python\n", + "def f(x, mod):\n", + " for guard, transformed_code in f.compiled_entries:\n", + " if guard(x, mod):\n", + " return transformed_code(x, mod)\n", + " try:\n", + " guard, transformed_code = compile_and_optimize(x, mod)\n", + " f.compiled_entries.append([guard, transformed_code])\n", + " return transformed_code(x, mod)\n", + " except FailToCompileError:\n", + " y = mod(x)\n", + " z = torch.log(y)\n", + " return z\n", + "```" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "即时编译器(Just-In-Time Compiler)仅针对其已经观察到的情况进行优化。每当它遇到新的输入,而这个输入不满足任何现有的保护条件时,它就会为这个新输入编译出新的保护条件和转换后的代码。\n", + "\n", + "逐步解释编译器的状态(就保护条件和转换后的代码而言):\n", + "```python\n", + "import torch\n", + "\n", + "class A(torch.nn.Module):\n", + " def __init__(self):\n", + " super().__init__()\n", + "\n", + " def forward(self, x):\n", + " return torch.exp(2 * x)\n", + "\n", + "class B(torch.nn.Module):\n", + " def __init__(self):\n", + " super().__init__()\n", + "\n", + " def forward(self, x):\n", + " return torch.exp(-x)\n", + "\n", + "@just_in_time_compile # an imaginary compiler function\n", + "def f(x, mod):\n", + " y = mod(x)\n", + " z = torch.log(y)\n", + " return z\n", + "\n", + "a = A()\n", + "b = B()\n", + "x = torch.randn((5, 5, 5))\n", + "\n", + "# before executing f(x, a), f.compiled_entries == [] is empty.\n", + "f(x, a)\n", + "# after executing f(x, a), f.compiled_entries == [Guard(\"isinstance(x, torch.Tensor) and isinstance(mod, A)\"), TransformedCode(\"return 2 * x\")]\n", + "\n", + "# the second call of f(x, a) hit a condition, so we can just execute the transformed code\n", + "f(x, a)\n", + "\n", + "# f(x, b) will trigger compilation and add a new compiled entry\n", + "# before executing f(x, b), f.compiled_entries == [Guard(\"isinstance(x, torch.Tensor) and isinstance(mod, A)\"), TransformedCode(\"return 2 * x\")]\n", + "f(x, b)\n", + "# after executing f(x, b), f.compiled_entries == [Guard(\"isinstance(x, torch.Tensor) and isinstance(mod, A)\"), TransformedCode(\"return 2 * x\"), Guard(\"isinstance(x, torch.Tensor) and isinstance(mod, B)\"), TransformedCode(\"return -x\")]\n", + "\n", + "# the second call of f(x, b) hit a condition, so we can just execute the transformed code\n", + "f(x, b)\n", + "```" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "在这个示例中,我们对类类型进行防护检查,例如使用 `isinstance(mod, A)` 语句,而且转换后的代码仍然是 Python 代码;对于 torch.compile 来说,它需要对更多的条件进行防护,比如设备(CPU/GPU)、数据类型(int32, float32)、形状([10], [8]),而它的转换代码则是 Python 字节码。我们可以从函数中提取这些编译条目,更多细节请参阅 [PyTorch 文档](https://pytorch.org/docs/stable/torch.compiler_dynamo_deepdive.html)。尽管在防护和转换代码方面有所不同,但 `torch.compile` 的基本工作流程与本例相同,即它充当即时编译器。" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 超越代数简化的优化\n", + "上述例子是关于代数简化的。然而,这样的优化在实践中相当罕见。让我们来看更实际的例子,并了解 PyTorch 编译器是如何对以下代码进行优化的:" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/media/pc/data/lxw/envs/anaconda3x/envs/xxx/lib/python3.12/site-packages/onnxscript/converter.py:820: FutureWarning: 'onnxscript.values.Op.param_schemas' is deprecated in version 0.1 and will be removed in the future. Please use '.op_signature' instead.\n", + " param_schemas = callee.param_schemas()\n", + "/media/pc/data/lxw/envs/anaconda3x/envs/xxx/lib/python3.12/site-packages/onnxscript/converter.py:820: FutureWarning: 'onnxscript.values.OnnxFunction.param_schemas' is deprecated in version 0.1 and will be removed in the future. Please use '.op_signature' instead.\n", + " param_schemas = callee.param_schemas()\n" + ] + } + ], + "source": [ + "import torch\n", + "\n", + "@torch.compile\n", + "def function(inputs):\n", + " x = inputs[\"x\"]\n", + " y = inputs[\"y\"]\n", + " x = x.cos().cos()\n", + " if x.mean() > 0.5:\n", + " x = x / 1.1\n", + " return x * y\n", + "\n", + "shape_10_inputs = {\"x\": torch.randn(10, requires_grad=True), \"y\": torch.randn(10, requires_grad=True)}\n", + "shape_8_inputs = {\"x\": torch.randn(8, requires_grad=True), \"y\": torch.randn(8, requires_grad=True)}\n", + "# warmup\n", + "for i in range(100):\n", + " output = function(shape_10_inputs)\n", + " output = function(shape_8_inputs)\n", + "\n", + "# execution of compiled functions\n", + "output = function(shape_10_inputs)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "代码尝试实现 $\\text{cos}(\\text{cos}(x))$ 激活函数,并根据其激活值调整输出的大小,然后将输出与另一个张量 `y` 相乘。" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Dynamo是如何转换和修改函数功能的?\n", + "\n", + "当理解 {func}`torch.compile` 作为即时编译器的整体图景后,可以更深入地探究其工作原理。与 `gcc` 或 `llvm` 这样的通用编译器不同,{func}`torch.compile` 是特定领域的编译器:它只专注于 PyTorch 相关的计算图。因此,需要工具来将用户的代码分为两部分:纯 Python 代码和计算图代码。\n", + "\n", + "Dynamo 就位于 {mod}`torch._dynamo` 模块内,是完成此任务的工具。通常不直接与这个模块交互。它是在 {func}`torch.compile` 函数内部被调用的。\n", + "\n", + "从概念上讲,Dynamo 执行以下操作:\n", + "\n", + "- 找到第一个无法在计算图中表示但需要计算图中计算值的算子(例如,打印张量的值,使用张量的值来决定 Python 中的 `if` 语句控制流)。\n", + "- 将前面的算子分成两部分:一个是纯粹关于张量计算的计算图,另一个是一些关于操纵 Python 对象的 Python 代码。\n", + "- 将剩余的算子保留为一两个新函数(称为 `resume` 函数),并再次触发上述分析。\n", + "\n", + "为了能够对函数进行这种细粒度的操作,Dynamo 在低于 Python 源代码级别的 Python 字节码层面运作。\n", + "\n", + "以下过程描述了 Dynamo 对函数所做的处理。![](https://depyf.readthedocs.io/en/latest/_images/dynamo-workflow.svg)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "`Dynamo` 的显著特性是它能够分析函数内部调用的所有函数。如果函数可以完全用计算图表示,那么这个函数的调用将被内联,从而消除该函数调用。\n", + "\n", + "Dynamo 的使命是以安全稳妥的方式从 Python 代码中提取计算图。一旦获得了计算图,就可以进入计算图优化的世界。" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "```{note}\n", + "上述工作流程包含许多难以理解的字节码。对于那些无法阅读 Python 字节码的人来说,`depyf` 可以提供帮助!\n", + "```" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 动态形状支持来自 `Dynamo`\n", + "\n", + "深度学习编译器通常倾向于静态形状输入。这就是为什么上述保护条件包括形状保护的原因。第一次函数调用使用形状 `[10]` 的输入,但第二次函数调用使用的是形状 `[8]` 的输入。这将无法通过形状保护,因此触发新的代码转换。\n", + "\n", + "默认情况下,`Dynamo` 支持动态形状。当形状保护失败时,它会分析和比较形状,并尝试将形状泛化。在这种情况下,看到形状为 `[8]` 的输入后,它将尝试泛化为任意一维形状 `[s0]`,这被称为动态形状或符号形状。\n", + "\n", + "## `AOTAutograd`:从前向图生成反向计算图\n", + "\n", + "上述代码仅处理前向计算图。重要的缺失部分是如何获取反向计算图来计算梯度。\n", + "\n", + "在纯 PyTorch 代码中,反向计算是通过对某个标量损失值调用 `backward` 函数来触发的。每个 PyTorch 函数在前向计算期间存储了反向所需的信息。\n", + "\n", + "为了解释急切模式下反向期间发生了什么,有下面的实现,它模拟了 {func}`torch.cos` 函数的内置行为(需要一些关于如何在 PyTorch 中编写带有自动梯度支持的自定义函数的[背景知识](https://pytorch.org/docs/main/notes/extending.html#extending-torch-autograd)):" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [], + "source": [ + "import torch\n", + "class Cosine(torch.autograd.Function):\n", + " @staticmethod\n", + " def forward(x0):\n", + " x1 = torch.cos(x0)\n", + " return x1, x0\n", + "\n", + " @staticmethod\n", + " def setup_context(ctx, inputs, output):\n", + " x1, x0 = output\n", + " print(f\"saving tensor of size {x0.shape}\")\n", + " ctx.save_for_backward(x0)\n", + "\n", + " @staticmethod\n", + " def backward(ctx, grad_output):\n", + " x0, = ctx.saved_tensors\n", + " result = (-torch.sin(x0)) * grad_output\n", + " return result\n", + "\n", + "# Wrap Cosine in a function so that it is clearer what the output is\n", + "def cosine(x):\n", + " # `apply` will call `forward` and `setup_context`\n", + " y, x= Cosine.apply(x)\n", + " return y\n", + "\n", + "def naive_two_cosine(x0):\n", + " x1 = cosine(x0)\n", + " x2 = cosine(x1)\n", + " return x2" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "在执行上述函数时,如果输入需要计算梯度,可以观察到有两个张量被保存下来:" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "saving tensor of size torch.Size([5, 5, 5])\n" + ] + }, + { + "ename": "RuntimeError", + "evalue": "A input that has been returned as-is as output is being saved for backward. This is not supported if you override setup_context. You should return and save a view of the input instead, e.g. with x.view_as(x) or setup ctx inside the forward function itself.", + "output_type": "error", + "traceback": [ + "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", + "\u001b[0;31mRuntimeError\u001b[0m Traceback (most recent call last)", + "Cell \u001b[0;32mIn[7], line 2\u001b[0m\n\u001b[1;32m 1\u001b[0m \u001b[38;5;28minput\u001b[39m \u001b[38;5;241m=\u001b[39m torch\u001b[38;5;241m.\u001b[39mrandn((\u001b[38;5;241m5\u001b[39m, \u001b[38;5;241m5\u001b[39m, \u001b[38;5;241m5\u001b[39m), requires_grad\u001b[38;5;241m=\u001b[39m\u001b[38;5;28;01mTrue\u001b[39;00m)\n\u001b[0;32m----> 2\u001b[0m output \u001b[38;5;241m=\u001b[39m naive_two_cosine(\u001b[38;5;28minput\u001b[39m)\n", + "Cell \u001b[0;32mIn[6], line 27\u001b[0m, in \u001b[0;36mnaive_two_cosine\u001b[0;34m(x0)\u001b[0m\n\u001b[1;32m 26\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21mnaive_two_cosine\u001b[39m(x0):\n\u001b[0;32m---> 27\u001b[0m x1 \u001b[38;5;241m=\u001b[39m cosine(x0)\n\u001b[1;32m 28\u001b[0m x2 \u001b[38;5;241m=\u001b[39m cosine(x1)\n\u001b[1;32m 29\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m x2\n", + "Cell \u001b[0;32mIn[6], line 23\u001b[0m, in \u001b[0;36mcosine\u001b[0;34m(x)\u001b[0m\n\u001b[1;32m 21\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21mcosine\u001b[39m(x):\n\u001b[1;32m 22\u001b[0m \u001b[38;5;66;03m# `apply` will call `forward` and `setup_context`\u001b[39;00m\n\u001b[0;32m---> 23\u001b[0m y, x\u001b[38;5;241m=\u001b[39m Cosine\u001b[38;5;241m.\u001b[39mapply(x)\n\u001b[1;32m 24\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m y\n", + "File \u001b[0;32m/media/pc/data/lxw/envs/anaconda3x/envs/xxx/lib/python3.12/site-packages/torch/autograd/function.py:575\u001b[0m, in \u001b[0;36mFunction.apply\u001b[0;34m(cls, *args, **kwargs)\u001b[0m\n\u001b[1;32m 572\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m torch\u001b[38;5;241m.\u001b[39m_C\u001b[38;5;241m.\u001b[39m_are_functorch_transforms_active():\n\u001b[1;32m 573\u001b[0m \u001b[38;5;66;03m# See NOTE: [functorch vjp and autograd interaction]\u001b[39;00m\n\u001b[1;32m 574\u001b[0m args \u001b[38;5;241m=\u001b[39m _functorch\u001b[38;5;241m.\u001b[39mutils\u001b[38;5;241m.\u001b[39munwrap_dead_wrappers(args)\n\u001b[0;32m--> 575\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28msuper\u001b[39m()\u001b[38;5;241m.\u001b[39mapply(\u001b[38;5;241m*\u001b[39margs, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs) \u001b[38;5;66;03m# type: ignore[misc]\u001b[39;00m\n\u001b[1;32m 577\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m is_setup_ctx_defined:\n\u001b[1;32m 578\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;167;01mRuntimeError\u001b[39;00m(\n\u001b[1;32m 579\u001b[0m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mIn order to use an autograd.Function with functorch transforms \u001b[39m\u001b[38;5;124m\"\u001b[39m\n\u001b[1;32m 580\u001b[0m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124m(vmap, grad, jvp, jacrev, ...), it must override the setup_context \u001b[39m\u001b[38;5;124m\"\u001b[39m\n\u001b[1;32m 581\u001b[0m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mstaticmethod. For more details, please see \u001b[39m\u001b[38;5;124m\"\u001b[39m\n\u001b[1;32m 582\u001b[0m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mhttps://pytorch.org/docs/main/notes/extending.func.html\u001b[39m\u001b[38;5;124m\"\u001b[39m\n\u001b[1;32m 583\u001b[0m )\n", + "\u001b[0;31mRuntimeError\u001b[0m: A input that has been returned as-is as output is being saved for backward. This is not supported if you override setup_context. You should return and save a view of the input instead, e.g. with x.view_as(x) or setup ctx inside the forward function itself." + ] + } + ], + "source": [ + "input = torch.randn((5, 5, 5), requires_grad=True)\n", + "output = naive_two_cosine(input)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "xxx", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.12.2" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/doc/ecosystem/flower/index.md b/doc/ecosystem/flower/index.md new file mode 100644 index 0000000..2202a2e --- /dev/null +++ b/doc/ecosystem/flower/index.md @@ -0,0 +1,3 @@ +# Flower 联邦学习框架 + +[Flower](https://flower.ai/docs/framework/main/zh_Hans/index.html) 将现有机器学习工作负载引入联邦环境的研究人员和开发人员。 diff --git a/doc/ecosystem/index.md b/doc/ecosystem/index.md new file mode 100644 index 0000000..5a65c9f --- /dev/null +++ b/doc/ecosystem/index.md @@ -0,0 +1,23 @@ +# PyTorch 生态 + +在 [ecosystem](https://pytorch.org/ecosystem/) 中,PyTorch 社区有很多优质开源项目,可以帮助你更好地利用 PyTorch 进行深度学习。 + +```{toctree} +ExecuTorch/index +torchao/index +ultralytics/index +torchtune/index +Captum/index +usb/index +Ray/index +Renate/index +TorchOpt/index +neural-compressor/index +OpenMMLab/index +flower/index +LightlySSL/index +TorchDrug/index +PyTorchVideo/index +TorchIO/index +depyf/index +``` diff --git a/doc/ecosystem/neural-compressor/index.md b/doc/ecosystem/neural-compressor/index.md new file mode 100644 index 0000000..14fea41 --- /dev/null +++ b/doc/ecosystem/neural-compressor/index.md @@ -0,0 +1,3 @@ +# Intel® Neural Compressor + +[Intel® Neural Compressor](https://intel.github.io/neural-compressor/latest/index.html) 目标是提供流行的模型压缩技术,如量化、剪枝(稀疏性)、蒸馏和神经架构搜索,在主流框架如 TensorFlow、PyTorch 和 ONNX Runtime 上,以及英特尔扩展,如 [TensorFlow 的英特尔扩展](https://github.com/intel/intel-extension-for-tensorflow)和 [PyTorch 的英特尔扩展](https://github.com/intel/intel-extension-for-pytorch)。 \ No newline at end of file diff --git a/doc/ecosystem/torchao/index.md b/doc/ecosystem/torchao/index.md new file mode 100644 index 0000000..67e3eb5 --- /dev/null +++ b/doc/ecosystem/torchao/index.md @@ -0,0 +1,7 @@ +# `torchao` + +PyTorch 原生架构优化:[`torchao`](https://github.com/pytorch/ao) + +原生的 PyTorch 库 `torchao`,通过利用低比特数据类型、量化和稀疏性技术,使得模型运行速度更快且体积更小。`torchao` 是易于访问的工具包,其中包含的技术大多以容易阅读的 PyTorch 代码编写,涵盖推断和训练两个方面。 + +详细介绍见博文:[PyTorch Native Architecture Optimization: torchao](https://pytorch.org/blog/pytorch-native-architecture-optimization/)。 diff --git a/doc/ecosystem/torchtune/index.md b/doc/ecosystem/torchtune/index.md new file mode 100644 index 0000000..152a5e6 --- /dev/null +++ b/doc/ecosystem/torchtune/index.md @@ -0,0 +1,5 @@ +# `torchtune` + +参考 [torchtune: Easily fine-tune LLMs using PyTorch](https://pytorch.org/blog/torchtune-fine-tune-llms/) + +[`torchtune`](https://pytorch.org/torchtune) \ No newline at end of file diff --git a/doc/tools/ultralytics/.gitignore b/doc/ecosystem/ultralytics/.gitignore similarity index 100% rename from doc/tools/ultralytics/.gitignore rename to doc/ecosystem/ultralytics/.gitignore diff --git a/doc/tools/ultralytics/guides/analytics.md b/doc/ecosystem/ultralytics/guides/analytics.md similarity index 100% rename from doc/tools/ultralytics/guides/analytics.md rename to doc/ecosystem/ultralytics/guides/analytics.md diff --git a/doc/tools/ultralytics/guides/azureml-quickstart.md b/doc/ecosystem/ultralytics/guides/azureml-quickstart.md similarity index 100% rename from doc/tools/ultralytics/guides/azureml-quickstart.md rename to doc/ecosystem/ultralytics/guides/azureml-quickstart.md diff --git a/doc/tools/ultralytics/guides/conda-quickstart.md b/doc/ecosystem/ultralytics/guides/conda-quickstart.md similarity index 100% rename from doc/tools/ultralytics/guides/conda-quickstart.md rename to doc/ecosystem/ultralytics/guides/conda-quickstart.md diff --git a/doc/tools/ultralytics/guides/coral-edge-tpu-on-raspberry-pi.md b/doc/ecosystem/ultralytics/guides/coral-edge-tpu-on-raspberry-pi.md similarity index 100% rename from doc/tools/ultralytics/guides/coral-edge-tpu-on-raspberry-pi.md rename to doc/ecosystem/ultralytics/guides/coral-edge-tpu-on-raspberry-pi.md diff --git a/doc/tools/ultralytics/guides/data-collection-and-annotation.md b/doc/ecosystem/ultralytics/guides/data-collection-and-annotation.md similarity index 100% rename from doc/tools/ultralytics/guides/data-collection-and-annotation.md rename to doc/ecosystem/ultralytics/guides/data-collection-and-annotation.md diff --git a/doc/tools/ultralytics/guides/deepstream-nvidia-jetson.md b/doc/ecosystem/ultralytics/guides/deepstream-nvidia-jetson.md similarity index 100% rename from doc/tools/ultralytics/guides/deepstream-nvidia-jetson.md rename to doc/ecosystem/ultralytics/guides/deepstream-nvidia-jetson.md diff --git a/doc/tools/ultralytics/guides/defining-project-goals.md b/doc/ecosystem/ultralytics/guides/defining-project-goals.md similarity index 100% rename from doc/tools/ultralytics/guides/defining-project-goals.md rename to doc/ecosystem/ultralytics/guides/defining-project-goals.md diff --git a/doc/tools/ultralytics/guides/deploy.md b/doc/ecosystem/ultralytics/guides/deploy.md similarity index 100% rename from doc/tools/ultralytics/guides/deploy.md rename to doc/ecosystem/ultralytics/guides/deploy.md diff --git a/doc/tools/ultralytics/guides/distance-calculation.md b/doc/ecosystem/ultralytics/guides/distance-calculation.md similarity index 100% rename from doc/tools/ultralytics/guides/distance-calculation.md rename to doc/ecosystem/ultralytics/guides/distance-calculation.md diff --git a/doc/tools/ultralytics/guides/docker-quickstart.md b/doc/ecosystem/ultralytics/guides/docker-quickstart.md similarity index 100% rename from doc/tools/ultralytics/guides/docker-quickstart.md rename to doc/ecosystem/ultralytics/guides/docker-quickstart.md diff --git a/doc/tools/ultralytics/guides/edge.md b/doc/ecosystem/ultralytics/guides/edge.md similarity index 100% rename from doc/tools/ultralytics/guides/edge.md rename to doc/ecosystem/ultralytics/guides/edge.md diff --git a/doc/tools/ultralytics/guides/features.md b/doc/ecosystem/ultralytics/guides/features.md similarity index 100% rename from doc/tools/ultralytics/guides/features.md rename to doc/ecosystem/ultralytics/guides/features.md diff --git a/doc/tools/ultralytics/guides/heatmaps.md b/doc/ecosystem/ultralytics/guides/heatmaps.md similarity index 100% rename from doc/tools/ultralytics/guides/heatmaps.md rename to doc/ecosystem/ultralytics/guides/heatmaps.md diff --git a/doc/tools/ultralytics/guides/hyperparameter-tuning.md b/doc/ecosystem/ultralytics/guides/hyperparameter-tuning.md similarity index 100% rename from doc/tools/ultralytics/guides/hyperparameter-tuning.md rename to doc/ecosystem/ultralytics/guides/hyperparameter-tuning.md diff --git a/doc/tools/ultralytics/guides/index.md b/doc/ecosystem/ultralytics/guides/index.md similarity index 100% rename from doc/tools/ultralytics/guides/index.md rename to doc/ecosystem/ultralytics/guides/index.md diff --git a/doc/tools/ultralytics/guides/instance-segmentation-and-tracking.md b/doc/ecosystem/ultralytics/guides/instance-segmentation-and-tracking.md similarity index 100% rename from doc/tools/ultralytics/guides/instance-segmentation-and-tracking.md rename to doc/ecosystem/ultralytics/guides/instance-segmentation-and-tracking.md diff --git a/doc/tools/ultralytics/guides/intro.md b/doc/ecosystem/ultralytics/guides/intro.md similarity index 100% rename from doc/tools/ultralytics/guides/intro.md rename to doc/ecosystem/ultralytics/guides/intro.md diff --git a/doc/tools/ultralytics/guides/isolating-segmentation-objects.md b/doc/ecosystem/ultralytics/guides/isolating-segmentation-objects.md similarity index 100% rename from doc/tools/ultralytics/guides/isolating-segmentation-objects.md rename to doc/ecosystem/ultralytics/guides/isolating-segmentation-objects.md diff --git a/doc/tools/ultralytics/guides/kfold-cross-validation.md b/doc/ecosystem/ultralytics/guides/kfold-cross-validation.md similarity index 100% rename from doc/tools/ultralytics/guides/kfold-cross-validation.md rename to doc/ecosystem/ultralytics/guides/kfold-cross-validation.md diff --git a/doc/tools/ultralytics/guides/model-deployment-options.md b/doc/ecosystem/ultralytics/guides/model-deployment-options.md similarity index 100% rename from doc/tools/ultralytics/guides/model-deployment-options.md rename to doc/ecosystem/ultralytics/guides/model-deployment-options.md diff --git a/doc/tools/ultralytics/guides/model-deployment-practices.md b/doc/ecosystem/ultralytics/guides/model-deployment-practices.md similarity index 100% rename from doc/tools/ultralytics/guides/model-deployment-practices.md rename to doc/ecosystem/ultralytics/guides/model-deployment-practices.md diff --git a/doc/tools/ultralytics/guides/model-evaluation-insights.md b/doc/ecosystem/ultralytics/guides/model-evaluation-insights.md similarity index 100% rename from doc/tools/ultralytics/guides/model-evaluation-insights.md rename to doc/ecosystem/ultralytics/guides/model-evaluation-insights.md diff --git a/doc/tools/ultralytics/guides/model-monitoring-and-maintenance.md b/doc/ecosystem/ultralytics/guides/model-monitoring-and-maintenance.md similarity index 100% rename from doc/tools/ultralytics/guides/model-monitoring-and-maintenance.md rename to doc/ecosystem/ultralytics/guides/model-monitoring-and-maintenance.md diff --git a/doc/tools/ultralytics/guides/model-testing.md b/doc/ecosystem/ultralytics/guides/model-testing.md similarity index 100% rename from doc/tools/ultralytics/guides/model-testing.md rename to doc/ecosystem/ultralytics/guides/model-testing.md diff --git a/doc/tools/ultralytics/guides/model-training-tips.md b/doc/ecosystem/ultralytics/guides/model-training-tips.md similarity index 100% rename from doc/tools/ultralytics/guides/model-training-tips.md rename to doc/ecosystem/ultralytics/guides/model-training-tips.md diff --git a/doc/tools/ultralytics/guides/nvidia-jetson.md b/doc/ecosystem/ultralytics/guides/nvidia-jetson.md similarity index 100% rename from doc/tools/ultralytics/guides/nvidia-jetson.md rename to doc/ecosystem/ultralytics/guides/nvidia-jetson.md diff --git a/doc/tools/ultralytics/guides/object-blurring.md b/doc/ecosystem/ultralytics/guides/object-blurring.md similarity index 100% rename from doc/tools/ultralytics/guides/object-blurring.md rename to doc/ecosystem/ultralytics/guides/object-blurring.md diff --git a/doc/tools/ultralytics/guides/object-counting.md b/doc/ecosystem/ultralytics/guides/object-counting.md similarity index 100% rename from doc/tools/ultralytics/guides/object-counting.md rename to doc/ecosystem/ultralytics/guides/object-counting.md diff --git a/doc/tools/ultralytics/guides/object-cropping.md b/doc/ecosystem/ultralytics/guides/object-cropping.md similarity index 100% rename from doc/tools/ultralytics/guides/object-cropping.md rename to doc/ecosystem/ultralytics/guides/object-cropping.md diff --git a/doc/tools/ultralytics/guides/optimizing-openvino-latency-vs-throughput-modes.md b/doc/ecosystem/ultralytics/guides/optimizing-openvino-latency-vs-throughput-modes.md similarity index 100% rename from doc/tools/ultralytics/guides/optimizing-openvino-latency-vs-throughput-modes.md rename to doc/ecosystem/ultralytics/guides/optimizing-openvino-latency-vs-throughput-modes.md diff --git a/doc/tools/ultralytics/guides/parking-management.md b/doc/ecosystem/ultralytics/guides/parking-management.md similarity index 100% rename from doc/tools/ultralytics/guides/parking-management.md rename to doc/ecosystem/ultralytics/guides/parking-management.md diff --git a/doc/tools/ultralytics/guides/preprocessing_annotated_data.md b/doc/ecosystem/ultralytics/guides/preprocessing_annotated_data.md similarity index 100% rename from doc/tools/ultralytics/guides/preprocessing_annotated_data.md rename to doc/ecosystem/ultralytics/guides/preprocessing_annotated_data.md diff --git a/doc/tools/ultralytics/guides/project.md b/doc/ecosystem/ultralytics/guides/project.md similarity index 100% rename from doc/tools/ultralytics/guides/project.md rename to doc/ecosystem/ultralytics/guides/project.md diff --git a/doc/tools/ultralytics/guides/queue-management.md b/doc/ecosystem/ultralytics/guides/queue-management.md similarity index 100% rename from doc/tools/ultralytics/guides/queue-management.md rename to doc/ecosystem/ultralytics/guides/queue-management.md diff --git a/doc/tools/ultralytics/guides/raspberry-pi.md b/doc/ecosystem/ultralytics/guides/raspberry-pi.md similarity index 100% rename from doc/tools/ultralytics/guides/raspberry-pi.md rename to doc/ecosystem/ultralytics/guides/raspberry-pi.md diff --git a/doc/tools/ultralytics/guides/region-counting.md b/doc/ecosystem/ultralytics/guides/region-counting.md similarity index 100% rename from doc/tools/ultralytics/guides/region-counting.md rename to doc/ecosystem/ultralytics/guides/region-counting.md diff --git a/doc/tools/ultralytics/guides/ros-quickstart.md b/doc/ecosystem/ultralytics/guides/ros-quickstart.md similarity index 100% rename from doc/tools/ultralytics/guides/ros-quickstart.md rename to doc/ecosystem/ultralytics/guides/ros-quickstart.md diff --git a/doc/tools/ultralytics/guides/sahi-tiled-inference.md b/doc/ecosystem/ultralytics/guides/sahi-tiled-inference.md similarity index 100% rename from doc/tools/ultralytics/guides/sahi-tiled-inference.md rename to doc/ecosystem/ultralytics/guides/sahi-tiled-inference.md diff --git a/doc/tools/ultralytics/guides/security-alarm-system.md b/doc/ecosystem/ultralytics/guides/security-alarm-system.md similarity index 100% rename from doc/tools/ultralytics/guides/security-alarm-system.md rename to doc/ecosystem/ultralytics/guides/security-alarm-system.md diff --git a/doc/tools/ultralytics/guides/speed-estimation.md b/doc/ecosystem/ultralytics/guides/speed-estimation.md similarity index 100% rename from doc/tools/ultralytics/guides/speed-estimation.md rename to doc/ecosystem/ultralytics/guides/speed-estimation.md diff --git a/doc/tools/ultralytics/guides/steps-of-a-cv-project.md b/doc/ecosystem/ultralytics/guides/steps-of-a-cv-project.md similarity index 100% rename from doc/tools/ultralytics/guides/steps-of-a-cv-project.md rename to doc/ecosystem/ultralytics/guides/steps-of-a-cv-project.md diff --git a/doc/tools/ultralytics/guides/streamlit-live-inference.md b/doc/ecosystem/ultralytics/guides/streamlit-live-inference.md similarity index 100% rename from doc/tools/ultralytics/guides/streamlit-live-inference.md rename to doc/ecosystem/ultralytics/guides/streamlit-live-inference.md diff --git a/doc/tools/ultralytics/guides/triton-inference-server.md b/doc/ecosystem/ultralytics/guides/triton-inference-server.md similarity index 100% rename from doc/tools/ultralytics/guides/triton-inference-server.md rename to doc/ecosystem/ultralytics/guides/triton-inference-server.md diff --git a/doc/tools/ultralytics/guides/view-results-in-terminal.md b/doc/ecosystem/ultralytics/guides/view-results-in-terminal.md similarity index 100% rename from doc/tools/ultralytics/guides/view-results-in-terminal.md rename to doc/ecosystem/ultralytics/guides/view-results-in-terminal.md diff --git a/doc/tools/ultralytics/guides/vision-eye.md b/doc/ecosystem/ultralytics/guides/vision-eye.md similarity index 100% rename from doc/tools/ultralytics/guides/vision-eye.md rename to doc/ecosystem/ultralytics/guides/vision-eye.md diff --git a/doc/tools/ultralytics/guides/workouts-monitoring.md b/doc/ecosystem/ultralytics/guides/workouts-monitoring.md similarity index 100% rename from doc/tools/ultralytics/guides/workouts-monitoring.md rename to doc/ecosystem/ultralytics/guides/workouts-monitoring.md diff --git a/doc/tools/ultralytics/guides/yolo-common-issues.md b/doc/ecosystem/ultralytics/guides/yolo-common-issues.md similarity index 100% rename from doc/tools/ultralytics/guides/yolo-common-issues.md rename to doc/ecosystem/ultralytics/guides/yolo-common-issues.md diff --git a/doc/tools/ultralytics/guides/yolo-performance-metrics.md b/doc/ecosystem/ultralytics/guides/yolo-performance-metrics.md similarity index 100% rename from doc/tools/ultralytics/guides/yolo-performance-metrics.md rename to doc/ecosystem/ultralytics/guides/yolo-performance-metrics.md diff --git a/doc/tools/ultralytics/guides/yolo-thread-safe-inference.md b/doc/ecosystem/ultralytics/guides/yolo-thread-safe-inference.md similarity index 100% rename from doc/tools/ultralytics/guides/yolo-thread-safe-inference.md rename to doc/ecosystem/ultralytics/guides/yolo-thread-safe-inference.md diff --git a/doc/tools/ultralytics/images/bus.jpg b/doc/ecosystem/ultralytics/images/bus.jpg similarity index 100% rename from doc/tools/ultralytics/images/bus.jpg rename to doc/ecosystem/ultralytics/images/bus.jpg diff --git a/doc/tools/ultralytics/index.md b/doc/ecosystem/ultralytics/index.md similarity index 100% rename from doc/tools/ultralytics/index.md rename to doc/ecosystem/ultralytics/index.md diff --git a/doc/tools/ultralytics/intro.ipynb b/doc/ecosystem/ultralytics/intro.ipynb similarity index 100% rename from doc/tools/ultralytics/intro.ipynb rename to doc/ecosystem/ultralytics/intro.ipynb diff --git a/doc/tools/ultralytics/modes/benchmark.ipynb b/doc/ecosystem/ultralytics/modes/benchmark.ipynb similarity index 100% rename from doc/tools/ultralytics/modes/benchmark.ipynb rename to doc/ecosystem/ultralytics/modes/benchmark.ipynb diff --git a/doc/tools/ultralytics/modes/export.ipynb b/doc/ecosystem/ultralytics/modes/export.ipynb similarity index 100% rename from doc/tools/ultralytics/modes/export.ipynb rename to doc/ecosystem/ultralytics/modes/export.ipynb diff --git a/doc/tools/ultralytics/modes/index.md b/doc/ecosystem/ultralytics/modes/index.md similarity index 100% rename from doc/tools/ultralytics/modes/index.md rename to doc/ecosystem/ultralytics/modes/index.md diff --git a/doc/tools/ultralytics/modes/predict.ipynb b/doc/ecosystem/ultralytics/modes/predict.ipynb similarity index 100% rename from doc/tools/ultralytics/modes/predict.ipynb rename to doc/ecosystem/ultralytics/modes/predict.ipynb diff --git a/doc/tools/ultralytics/modes/set_env.py b/doc/ecosystem/ultralytics/modes/set_env.py similarity index 100% rename from doc/tools/ultralytics/modes/set_env.py rename to doc/ecosystem/ultralytics/modes/set_env.py diff --git a/doc/tools/ultralytics/modes/track.ipynb b/doc/ecosystem/ultralytics/modes/track.ipynb similarity index 100% rename from doc/tools/ultralytics/modes/track.ipynb rename to doc/ecosystem/ultralytics/modes/track.ipynb diff --git a/doc/tools/ultralytics/modes/train.ipynb b/doc/ecosystem/ultralytics/modes/train.ipynb similarity index 100% rename from doc/tools/ultralytics/modes/train.ipynb rename to doc/ecosystem/ultralytics/modes/train.ipynb diff --git a/doc/tools/ultralytics/modes/val.ipynb b/doc/ecosystem/ultralytics/modes/val.ipynb similarity index 100% rename from doc/tools/ultralytics/modes/val.ipynb rename to doc/ecosystem/ultralytics/modes/val.ipynb diff --git a/doc/tools/ultralytics/solutions/index.md b/doc/ecosystem/ultralytics/solutions/index.md similarity index 100% rename from doc/tools/ultralytics/solutions/index.md rename to doc/ecosystem/ultralytics/solutions/index.md diff --git a/doc/tools/ultralytics/tasks/classify.ipynb b/doc/ecosystem/ultralytics/tasks/classify.ipynb similarity index 100% rename from doc/tools/ultralytics/tasks/classify.ipynb rename to doc/ecosystem/ultralytics/tasks/classify.ipynb diff --git a/doc/tools/ultralytics/tasks/detect.ipynb b/doc/ecosystem/ultralytics/tasks/detect.ipynb similarity index 100% rename from doc/tools/ultralytics/tasks/detect.ipynb rename to doc/ecosystem/ultralytics/tasks/detect.ipynb diff --git a/doc/tools/ultralytics/tasks/index.md b/doc/ecosystem/ultralytics/tasks/index.md similarity index 100% rename from doc/tools/ultralytics/tasks/index.md rename to doc/ecosystem/ultralytics/tasks/index.md diff --git a/doc/tools/ultralytics/tasks/obb.ipynb b/doc/ecosystem/ultralytics/tasks/obb.ipynb similarity index 100% rename from doc/tools/ultralytics/tasks/obb.ipynb rename to doc/ecosystem/ultralytics/tasks/obb.ipynb diff --git a/doc/tools/ultralytics/tasks/pose.ipynb b/doc/ecosystem/ultralytics/tasks/pose.ipynb similarity index 100% rename from doc/tools/ultralytics/tasks/pose.ipynb rename to doc/ecosystem/ultralytics/tasks/pose.ipynb diff --git a/doc/tools/ultralytics/tasks/segment.ipynb b/doc/ecosystem/ultralytics/tasks/segment.ipynb similarity index 100% rename from doc/tools/ultralytics/tasks/segment.ipynb rename to doc/ecosystem/ultralytics/tasks/segment.ipynb diff --git a/doc/tools/ultralytics/tasks/set_env.py b/doc/ecosystem/ultralytics/tasks/set_env.py similarity index 100% rename from doc/tools/ultralytics/tasks/set_env.py rename to doc/ecosystem/ultralytics/tasks/set_env.py diff --git a/doc/tools/ultralytics/test.ipynb b/doc/ecosystem/ultralytics/test.ipynb similarity index 100% rename from doc/tools/ultralytics/test.ipynb rename to doc/ecosystem/ultralytics/test.ipynb diff --git a/doc/ecosystem/usb/index.md b/doc/ecosystem/usb/index.md new file mode 100644 index 0000000..280b0ff --- /dev/null +++ b/doc/ecosystem/usb/index.md @@ -0,0 +1,3 @@ +# 统一半监督学习基准 + +[统一半监督学习基准](https://usb.readthedocs.io/en/main/index.html)(Unified Semi-supervised learning Benchmark,简称 USB)是模块化和可扩展的代码库,包括数据管道和流行的半监督学习(SSL)算法,用于标准化 SSL 消融实验。同时,提供了用于计算机视觉(CV)任务的最先进神经模型的预训练版本。它易于使用/扩展,经济实惠且全面,适用于开发和评估 SSL 算法。USB 提供了基于一致性正则化的 14 种 SSL 算法的实现,以及来自计算机视觉、自然语言处理和音频领域的 15 个评估任务。 diff --git a/doc/index.md b/doc/index.md index 9f2a13e..c7d4771 100644 --- a/doc/index.md +++ b/doc/index.md @@ -7,12 +7,14 @@ ## 导航 ```{toctree} -:maxdepth: 1 +:maxdepth: 3 +datasets/index sr/index tips/index -tools/index +ecosystem/index appendix/index +news/index ``` ## 目录和索引 diff --git a/doc/news/index.md b/doc/news/index.md new file mode 100644 index 0000000..5b699bb --- /dev/null +++ b/doc/news/index.md @@ -0,0 +1,3 @@ +# 资讯 + +- [FlexAttention: The Flexibility of PyTorch with the Performance of FlashAttention](https://pytorch.org/blog/flexattention/) \ No newline at end of file diff --git a/doc/sr/SRCNN/code.ipynb b/doc/sr/SRCNN/code.ipynb index c3e4da9..8a15e36 100644 --- a/doc/sr/SRCNN/code.ipynb +++ b/doc/sr/SRCNN/code.ipynb @@ -71,37 +71,679 @@ }, { "cell_type": "code", - "execution_count": 4, + "execution_count": 2, "metadata": {}, "outputs": [], "source": [ - "model = SRCNNNet()" + "import sys\n", + "from pathlib import Path\n", + "root_dir = Path(\".\").resolve().parents[2]\n", + "sys.path.extend([\n", + " f\"{root_dir}/src\",\n", + " f\"{root_dir}/tests\"\n", + "])\n", + "from env import temp_dir\n", + "(temp_dir/\"output\").mkdir(exist_ok=True)" ] }, { "cell_type": "code", - "execution_count": 6, + "execution_count": 3, "metadata": {}, "outputs": [ { - "data": { - "text/plain": [ - "SRCNNNet(\n", - " (img_upsampler): Upsample(scale_factor=4.0, mode='bicubic')\n", - " (conv1): Conv2d(3, 64, kernel_size=(9, 9), stride=(1, 1), padding=(4, 4))\n", - " (conv2): Conv2d(64, 32, kernel_size=(1, 1), stride=(1, 1))\n", - " (conv3): Conv2d(32, 3, kernel_size=(5, 5), stride=(1, 1), padding=(2, 2))\n", - " (relu): ReLU()\n", - ")" - ] - }, - "execution_count": 6, - "metadata": {}, - "output_type": "execute_result" + "name": "stderr", + "output_type": "stream", + "text": [ + "/media/pc/data/lxw/envs/anaconda3x/envs/xxx/lib/python3.12/site-packages/mmengine/optim/optimizer/zero_optimizer.py:11: DeprecationWarning: `TorchScript` support for functional optimizers is deprecated and will be removed in a future PyTorch release. Consider using the `torch.compile` optimizer instead.\n", + " from torch.distributed.optim import \\\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "12/04 08:55:47 - mmengine - \u001b[4m\u001b[97mINFO\u001b[0m - \n", + "------------------------------------------------------------\n", + "System environment:\n", + " sys.platform: linux\n", + " Python: 3.12.2 | packaged by conda-forge | (main, Feb 16 2024, 20:50:58) [GCC 12.3.0]\n", + " CUDA available: True\n", + " MUSA available: False\n", + " numpy_random_seed: 1803196251\n", + " GPU 0: NVIDIA GeForce RTX 3090\n", + " GPU 1: NVIDIA GeForce RTX 2080 Ti\n", + " CUDA_HOME: /media/pc/data/lxw/envs/anaconda3x/envs/xxx\n", + " NVCC: Cuda compilation tools, release 12.6, V12.6.20\n", + " GCC: gcc (conda-forge gcc 12.4.0-0) 12.4.0\n", + " PyTorch: 2.5.0\n", + " PyTorch compiling details: PyTorch built with:\n", + " - GCC 9.3\n", + " - C++ Version: 201703\n", + " - Intel(R) oneAPI Math Kernel Library Version 2023.1-Product Build 20230303 for Intel(R) 64 architecture applications\n", + " - Intel(R) MKL-DNN v3.5.3 (Git Hash 66f0cb9eb66affd2da3bf5f8d897376f04aae6af)\n", + " - OpenMP 201511 (a.k.a. OpenMP 4.5)\n", + " - LAPACK is enabled (usually provided by MKL)\n", + " - NNPACK is enabled\n", + " - CPU capability usage: AVX2\n", + " - CUDA Runtime 12.4\n", + " - NVCC architecture flags: -gencode;arch=compute_50,code=sm_50;-gencode;arch=compute_60,code=sm_60;-gencode;arch=compute_61,code=sm_61;-gencode;arch=compute_70,code=sm_70;-gencode;arch=compute_75,code=sm_75;-gencode;arch=compute_80,code=sm_80;-gencode;arch=compute_86,code=sm_86;-gencode;arch=compute_90,code=sm_90\n", + " - CuDNN 90.1\n", + " - Magma 2.6.1\n", + " - Build settings: BLAS_INFO=mkl, BUILD_TYPE=Release, CUDA_VERSION=12.4, CUDNN_VERSION=9.1.0, CXX_COMPILER=/opt/rh/devtoolset-9/root/usr/bin/c++, CXX_FLAGS= -D_GLIBCXX_USE_CXX11_ABI=0 -fabi-version=11 -fvisibility-inlines-hidden -DUSE_PTHREADPOOL -DNDEBUG -DUSE_KINETO -DLIBKINETO_NOROCTRACER -DLIBKINETO_NOXPUPTI=ON -DUSE_FBGEMM -DUSE_PYTORCH_QNNPACK -DUSE_XNNPACK -DSYMBOLICATE_MOBILE_DEBUG_HANDLE -O2 -fPIC -Wall -Wextra -Werror=return-type -Werror=non-virtual-dtor -Werror=bool-operation -Wnarrowing -Wno-missing-field-initializers -Wno-type-limits -Wno-array-bounds -Wno-unknown-pragmas -Wno-unused-parameter -Wno-strict-overflow -Wno-strict-aliasing -Wno-stringop-overflow -Wsuggest-override -Wno-psabi -Wno-error=old-style-cast -Wno-missing-braces -fdiagnostics-color=always -faligned-new -Wno-unused-but-set-variable -Wno-maybe-uninitialized -fno-math-errno -fno-trapping-math -Werror=format -Wno-stringop-overflow, LAPACK_INFO=mkl, PERF_WITH_AVX=1, PERF_WITH_AVX2=1, TORCH_VERSION=2.5.0, USE_CUDA=ON, USE_CUDNN=ON, USE_CUSPARSELT=1, USE_EXCEPTION_PTR=1, USE_GFLAGS=OFF, USE_GLOG=OFF, USE_GLOO=ON, USE_MKL=ON, USE_MKLDNN=ON, USE_MPI=OFF, USE_NCCL=ON, USE_NNPACK=ON, USE_OPENMP=ON, USE_ROCM=OFF, USE_ROCM_KERNEL_ASSERT=OFF, \n", + "\n", + " TorchVision: 0.20.0\n", + " OpenCV: 4.10.0\n", + " MMEngine: 0.10.5\n", + "\n", + "Runtime environment:\n", + " cudnn_benchmark: False\n", + " mp_cfg: {'mp_start_method': 'fork', 'opencv_num_threads': 4}\n", + " dist_cfg: {'backend': 'nccl'}\n", + " seed: 1803196251\n", + " Distributed launcher: none\n", + " Distributed training: False\n", + " GPU number: 1\n", + "------------------------------------------------------------\n", + "\n", + "12/04 08:55:48 - mmengine - \u001b[4m\u001b[97mINFO\u001b[0m - Config:\n", + "custom_hooks = [\n", + " dict(interval=1, type='BasicVisualizationHook'),\n", + "]\n", + "data_root = 'data'\n", + "dataset_type = 'BasicImageDataset'\n", + "default_hooks = dict(\n", + " checkpoint=dict(\n", + " by_epoch=False,\n", + " interval=5000,\n", + " max_keep_ckpts=10,\n", + " out_dir='./work_dirs/',\n", + " rule='greater',\n", + " save_best='PSNR',\n", + " save_optimizer=True,\n", + " type='CheckpointHook'),\n", + " logger=dict(interval=100, type='LoggerHook'),\n", + " param_scheduler=dict(type='ParamSchedulerHook'),\n", + " sampler_seed=dict(type='DistSamplerSeedHook'),\n", + " timer=dict(type='IterTimerHook'))\n", + "default_scope = 'mmagic'\n", + "div2k_data_root = 'data/DIV2K'\n", + "div2k_dataloader = dict(\n", + " dataset=dict(\n", + " ann_file='meta_info_DIV2K100sub_GT.txt',\n", + " data_prefix=dict(\n", + " gt='DIV2K_train_HR_sub', img='DIV2K_train_LR_bicubic/X4_sub'),\n", + " data_root='data/DIV2K',\n", + " metainfo=dict(dataset_type='div2k', task_name='sisr'),\n", + " pipeline=[\n", + " dict(\n", + " channel_order='rgb',\n", + " color_type='color',\n", + " imdecode_backend='cv2',\n", + " key='img',\n", + " type='LoadImageFromFile'),\n", + " dict(\n", + " channel_order='rgb',\n", + " color_type='color',\n", + " imdecode_backend='cv2',\n", + " key='gt',\n", + " type='LoadImageFromFile'),\n", + " dict(type='PackInputs'),\n", + " ],\n", + " type='BasicImageDataset'),\n", + " drop_last=False,\n", + " num_workers=4,\n", + " persistent_workers=False,\n", + " sampler=dict(shuffle=False, type='DefaultSampler'))\n", + "div2k_evaluator = dict(\n", + " metrics=[\n", + " dict(crop_border=4, prefix='DIV2K', type='PSNR'),\n", + " dict(crop_border=4, prefix='DIV2K', type='SSIM'),\n", + " ],\n", + " type='Evaluator')\n", + "env_cfg = dict(\n", + " cudnn_benchmark=False,\n", + " dist_cfg=dict(backend='nccl'),\n", + " mp_cfg=dict(mp_start_method='fork', opencv_num_threads=4))\n", + "experiment_name = 'srcnn_x4k915_1xb16-1000k_div2k'\n", + "load_from = None\n", + "log_level = 'INFO'\n", + "log_processor = dict(by_epoch=False, type='LogProcessor', window_size=100)\n", + "model = dict(\n", + " data_preprocessor=dict(\n", + " mean=[\n", + " 0.0,\n", + " 0.0,\n", + " 0.0,\n", + " ],\n", + " std=[\n", + " 255.0,\n", + " 255.0,\n", + " 255.0,\n", + " ],\n", + " type='DataPreprocessor'),\n", + " generator=dict(\n", + " channels=(\n", + " 3,\n", + " 64,\n", + " 32,\n", + " 3,\n", + " ),\n", + " kernel_sizes=(\n", + " 9,\n", + " 1,\n", + " 5,\n", + " ),\n", + " type='SRCNNNet',\n", + " upscale_factor=4),\n", + " pixel_loss=dict(loss_weight=1.0, reduction='mean', type='L1Loss'),\n", + " test_cfg=dict(crop_border=4, metrics=[\n", + " 'PSNR',\n", + " ]),\n", + " train_cfg=dict(),\n", + " type='BaseEditModel')\n", + "optim_wrapper = dict(\n", + " constructor='DefaultOptimWrapperConstructor',\n", + " optimizer=dict(betas=(\n", + " 0.9,\n", + " 0.999,\n", + " ), lr=0.0002, type='Adam'),\n", + " type='OptimWrapper')\n", + "param_scheduler = dict(\n", + " by_epoch=False,\n", + " eta_min=1e-07,\n", + " periods=[\n", + " 250000,\n", + " 250000,\n", + " 250000,\n", + " 250000,\n", + " ],\n", + " restart_weights=[\n", + " 1,\n", + " 1,\n", + " 1,\n", + " 1,\n", + " ],\n", + " type='CosineRestartLR')\n", + "resume = False\n", + "save_dir = './work_dirs/'\n", + "scale = 4\n", + "set14_data_root = 'data/Set14'\n", + "set14_dataloader = dict(\n", + " dataset=dict(\n", + " data_prefix=dict(gt='GTmod12', img='LRbicx4'),\n", + " data_root='data/Set14',\n", + " metainfo=dict(dataset_type='set14', task_name='sisr'),\n", + " pipeline=[\n", + " dict(\n", + " channel_order='rgb',\n", + " color_type='color',\n", + " imdecode_backend='cv2',\n", + " key='img',\n", + " type='LoadImageFromFile'),\n", + " dict(\n", + " channel_order='rgb',\n", + " color_type='color',\n", + " imdecode_backend='cv2',\n", + " key='gt',\n", + " type='LoadImageFromFile'),\n", + " dict(type='PackInputs'),\n", + " ],\n", + " type='BasicImageDataset'),\n", + " drop_last=False,\n", + " num_workers=4,\n", + " persistent_workers=False,\n", + " sampler=dict(shuffle=False, type='DefaultSampler'))\n", + "set14_evaluator = dict(\n", + " metrics=[\n", + " dict(crop_border=4, prefix='Set14', type='PSNR'),\n", + " dict(crop_border=4, prefix='Set14', type='SSIM'),\n", + " ],\n", + " type='Evaluator')\n", + "set5_data_root = 'data/Set5'\n", + "set5_dataloader = dict(\n", + " dataset=dict(\n", + " data_prefix=dict(gt='GTmod12', img='LRbicx4'),\n", + " data_root='data/Set5',\n", + " metainfo=dict(dataset_type='set5', task_name='sisr'),\n", + " pipeline=[\n", + " dict(\n", + " channel_order='rgb',\n", + " color_type='color',\n", + " imdecode_backend='cv2',\n", + " key='img',\n", + " type='LoadImageFromFile'),\n", + " dict(\n", + " channel_order='rgb',\n", + " color_type='color',\n", + " imdecode_backend='cv2',\n", + " key='gt',\n", + " type='LoadImageFromFile'),\n", + " dict(type='PackInputs'),\n", + " ],\n", + " type='BasicImageDataset'),\n", + " drop_last=False,\n", + " num_workers=4,\n", + " persistent_workers=False,\n", + " sampler=dict(shuffle=False, type='DefaultSampler'))\n", + "set5_evaluator = dict(\n", + " metrics=[\n", + " dict(crop_border=4, prefix='Set5', type='PSNR'),\n", + " dict(crop_border=4, prefix='Set5', type='SSIM'),\n", + " ],\n", + " type='Evaluator')\n", + "test_cfg = dict(type='MultiTestLoop')\n", + "test_dataloader = [\n", + " dict(\n", + " dataset=dict(\n", + " data_prefix=dict(gt='GTmod12', img='LRbicx4'),\n", + " data_root='data/Set5',\n", + " metainfo=dict(dataset_type='set5', task_name='sisr'),\n", + " pipeline=[\n", + " dict(\n", + " channel_order='rgb',\n", + " color_type='color',\n", + " imdecode_backend='cv2',\n", + " key='img',\n", + " type='LoadImageFromFile'),\n", + " dict(\n", + " channel_order='rgb',\n", + " color_type='color',\n", + " imdecode_backend='cv2',\n", + " key='gt',\n", + " type='LoadImageFromFile'),\n", + " dict(type='PackInputs'),\n", + " ],\n", + " type='BasicImageDataset'),\n", + " drop_last=False,\n", + " num_workers=4,\n", + " persistent_workers=False,\n", + " sampler=dict(shuffle=False, type='DefaultSampler')),\n", + " dict(\n", + " dataset=dict(\n", + " data_prefix=dict(gt='GTmod12', img='LRbicx4'),\n", + " data_root='data/Set14',\n", + " metainfo=dict(dataset_type='set14', task_name='sisr'),\n", + " pipeline=[\n", + " dict(\n", + " channel_order='rgb',\n", + " color_type='color',\n", + " imdecode_backend='cv2',\n", + " key='img',\n", + " type='LoadImageFromFile'),\n", + " dict(\n", + " channel_order='rgb',\n", + " color_type='color',\n", + " imdecode_backend='cv2',\n", + " key='gt',\n", + " type='LoadImageFromFile'),\n", + " dict(type='PackInputs'),\n", + " ],\n", + " type='BasicImageDataset'),\n", + " drop_last=False,\n", + " num_workers=4,\n", + " persistent_workers=False,\n", + " sampler=dict(shuffle=False, type='DefaultSampler')),\n", + " dict(\n", + " dataset=dict(\n", + " ann_file='meta_info_DIV2K100sub_GT.txt',\n", + " data_prefix=dict(\n", + " gt='DIV2K_train_HR_sub', img='DIV2K_train_LR_bicubic/X4_sub'),\n", + " data_root='data/DIV2K',\n", + " metainfo=dict(dataset_type='div2k', task_name='sisr'),\n", + " pipeline=[\n", + " dict(\n", + " channel_order='rgb',\n", + " color_type='color',\n", + " imdecode_backend='cv2',\n", + " key='img',\n", + " type='LoadImageFromFile'),\n", + " dict(\n", + " channel_order='rgb',\n", + " color_type='color',\n", + " imdecode_backend='cv2',\n", + " key='gt',\n", + " type='LoadImageFromFile'),\n", + " dict(type='PackInputs'),\n", + " ],\n", + " type='BasicImageDataset'),\n", + " drop_last=False,\n", + " num_workers=4,\n", + " persistent_workers=False,\n", + " sampler=dict(shuffle=False, type='DefaultSampler')),\n", + "]\n", + "test_evaluator = [\n", + " dict(\n", + " metrics=[\n", + " dict(crop_border=4, prefix='Set5', type='PSNR'),\n", + " dict(crop_border=4, prefix='Set5', type='SSIM'),\n", + " ],\n", + " type='Evaluator'),\n", + " dict(\n", + " metrics=[\n", + " dict(crop_border=4, prefix='Set14', type='PSNR'),\n", + " dict(crop_border=4, prefix='Set14', type='SSIM'),\n", + " ],\n", + " type='Evaluator'),\n", + " dict(\n", + " metrics=[\n", + " dict(crop_border=4, prefix='DIV2K', type='PSNR'),\n", + " dict(crop_border=4, prefix='DIV2K', type='SSIM'),\n", + " ],\n", + " type='Evaluator'),\n", + "]\n", + "test_pipeline = [\n", + " dict(\n", + " channel_order='rgb',\n", + " color_type='color',\n", + " imdecode_backend='cv2',\n", + " key='img',\n", + " type='LoadImageFromFile'),\n", + " dict(\n", + " channel_order='rgb',\n", + " color_type='color',\n", + " imdecode_backend='cv2',\n", + " key='gt',\n", + " type='LoadImageFromFile'),\n", + " dict(type='PackInputs'),\n", + "]\n", + "train_cfg = dict(\n", + " max_iters=1000000, type='IterBasedTrainLoop', val_interval=5000)\n", + "train_dataloader = dict(\n", + " batch_size=16,\n", + " dataset=dict(\n", + " ann_file='meta_info_DIV2K800sub_GT.txt',\n", + " data_prefix=dict(\n", + " gt='DIV2K_train_HR_sub', img='DIV2K_train_LR_bicubic/X4_sub'),\n", + " data_root='data/DIV2K',\n", + " filename_tmpl=dict(gt='{}', img='{}'),\n", + " metainfo=dict(dataset_type='div2k', task_name='sisr'),\n", + " pipeline=[\n", + " dict(\n", + " channel_order='rgb',\n", + " color_type='color',\n", + " imdecode_backend='cv2',\n", + " key='img',\n", + " type='LoadImageFromFile'),\n", + " dict(\n", + " channel_order='rgb',\n", + " color_type='color',\n", + " imdecode_backend='cv2',\n", + " key='gt',\n", + " type='LoadImageFromFile'),\n", + " dict(dictionary=dict(scale=4), type='SetValues'),\n", + " dict(gt_patch_size=128, type='PairedRandomCrop'),\n", + " dict(\n", + " direction='horizontal',\n", + " flip_ratio=0.5,\n", + " keys=[\n", + " 'img',\n", + " 'gt',\n", + " ],\n", + " type='Flip'),\n", + " dict(\n", + " direction='vertical',\n", + " flip_ratio=0.5,\n", + " keys=[\n", + " 'img',\n", + " 'gt',\n", + " ],\n", + " type='Flip'),\n", + " dict(\n", + " keys=[\n", + " 'img',\n", + " 'gt',\n", + " ],\n", + " transpose_ratio=0.5,\n", + " type='RandomTransposeHW'),\n", + " dict(type='PackInputs'),\n", + " ],\n", + " type='BasicImageDataset'),\n", + " num_workers=4,\n", + " persistent_workers=False,\n", + " sampler=dict(shuffle=True, type='InfiniteSampler'))\n", + "train_pipeline = [\n", + " dict(\n", + " channel_order='rgb',\n", + " color_type='color',\n", + " imdecode_backend='cv2',\n", + " key='img',\n", + " type='LoadImageFromFile'),\n", + " dict(\n", + " channel_order='rgb',\n", + " color_type='color',\n", + " imdecode_backend='cv2',\n", + " key='gt',\n", + " type='LoadImageFromFile'),\n", + " dict(dictionary=dict(scale=4), type='SetValues'),\n", + " dict(gt_patch_size=128, type='PairedRandomCrop'),\n", + " dict(\n", + " direction='horizontal',\n", + " flip_ratio=0.5,\n", + " keys=[\n", + " 'img',\n", + " 'gt',\n", + " ],\n", + " type='Flip'),\n", + " dict(\n", + " direction='vertical',\n", + " flip_ratio=0.5,\n", + " keys=[\n", + " 'img',\n", + " 'gt',\n", + " ],\n", + " type='Flip'),\n", + " dict(keys=[\n", + " 'img',\n", + " 'gt',\n", + " ], transpose_ratio=0.5, type='RandomTransposeHW'),\n", + " dict(type='PackInputs'),\n", + "]\n", + "val_cfg = dict(type='MultiValLoop')\n", + "val_dataloader = dict(\n", + " dataset=dict(\n", + " data_prefix=dict(gt='GTmod12', img='LRbicx4'),\n", + " data_root='data/Set5',\n", + " metainfo=dict(dataset_type='set5', task_name='sisr'),\n", + " pipeline=[\n", + " dict(\n", + " channel_order='rgb',\n", + " color_type='color',\n", + " imdecode_backend='cv2',\n", + " key='img',\n", + " type='LoadImageFromFile'),\n", + " dict(\n", + " channel_order='rgb',\n", + " color_type='color',\n", + " imdecode_backend='cv2',\n", + " key='gt',\n", + " type='LoadImageFromFile'),\n", + " dict(type='PackInputs'),\n", + " ],\n", + " type='BasicImageDataset'),\n", + " drop_last=False,\n", + " num_workers=4,\n", + " persistent_workers=False,\n", + " sampler=dict(shuffle=False, type='DefaultSampler'))\n", + "val_evaluator = dict(\n", + " metrics=[\n", + " dict(type='MAE'),\n", + " dict(crop_border=4, type='PSNR'),\n", + " dict(crop_border=4, type='SSIM'),\n", + " ],\n", + " type='Evaluator')\n", + "val_pipeline = [\n", + " dict(\n", + " channel_order='rgb',\n", + " color_type='color',\n", + " imdecode_backend='cv2',\n", + " key='img',\n", + " type='LoadImageFromFile'),\n", + " dict(\n", + " channel_order='rgb',\n", + " color_type='color',\n", + " imdecode_backend='cv2',\n", + " key='gt',\n", + " type='LoadImageFromFile'),\n", + " dict(type='PackInputs'),\n", + "]\n", + "vis_backends = [\n", + " dict(type='LocalVisBackend'),\n", + "]\n", + "visualizer = dict(\n", + " bgr2rgb=True,\n", + " fn_key='gt_path',\n", + " img_keys=[\n", + " 'gt_img',\n", + " 'input',\n", + " 'pred_img',\n", + " ],\n", + " type='ConcatImageVisualizer',\n", + " vis_backends=[\n", + " dict(type='LocalVisBackend'),\n", + " ])\n", + "work_dir = './work_dirs/srcnn_x4k915_1xb16-1000k_div2k'\n", + "\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "2024-12-04 08:56:04.341525: E external/local_xla/xla/stream_executor/cuda/cuda_fft.cc:485] Unable to register cuFFT factory: Attempting to register factory for plugin cuFFT when one has already been registered\n", + "2024-12-04 08:56:04.843636: E external/local_xla/xla/stream_executor/cuda/cuda_dnn.cc:8454] Unable to register cuDNN factory: Attempting to register factory for plugin cuDNN when one has already been registered\n", + "2024-12-04 08:56:05.001130: E external/local_xla/xla/stream_executor/cuda/cuda_blas.cc:1452] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered\n", + "2024-12-04 08:56:06.543236: I tensorflow/core/platform/cpu_feature_guard.cc:210] This TensorFlow binary is optimized to use available CPU instructions in performance-critical operations.\n", + "To enable the following instructions: AVX2 FMA, in other operations, rebuild TensorFlow with the appropriate compiler flags.\n", + "2024-12-04 08:56:23.014523: W tensorflow/compiler/tf2tensorrt/utils/py_utils.cc:38] TF-TRT Warning: Could not find TensorRT\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "12/04 08:57:32 - mmengine - \u001b[4m\u001b[97mINFO\u001b[0m - Distributed training is not used, all SyncBatchNorm (SyncBN) layers in the model will be automatically reverted to BatchNormXd layers if they are used.\n", + "12/04 08:57:32 - mmengine - \u001b[4m\u001b[97mINFO\u001b[0m - Hooks will be executed in the following order:\n", + "before_run:\n", + "(VERY_HIGH ) RuntimeInfoHook \n", + "(BELOW_NORMAL) LoggerHook \n", + " -------------------- \n", + "before_train:\n", + "(VERY_HIGH ) RuntimeInfoHook \n", + "(NORMAL ) IterTimerHook \n", + "(VERY_LOW ) CheckpointHook \n", + " -------------------- \n", + "before_train_epoch:\n", + "(VERY_HIGH ) RuntimeInfoHook \n", + "(NORMAL ) IterTimerHook \n", + "(NORMAL ) DistSamplerSeedHook \n", + " -------------------- \n", + "before_train_iter:\n", + "(VERY_HIGH ) RuntimeInfoHook \n", + "(NORMAL ) IterTimerHook \n", + " -------------------- \n", + "after_train_iter:\n", + "(VERY_HIGH ) RuntimeInfoHook \n", + "(NORMAL ) IterTimerHook \n", + "(NORMAL ) BasicVisualizationHook \n", + "(BELOW_NORMAL) LoggerHook \n", + "(LOW ) ParamSchedulerHook \n", + "(VERY_LOW ) CheckpointHook \n", + " -------------------- \n", + "after_train_epoch:\n", + "(NORMAL ) IterTimerHook \n", + "(LOW ) ParamSchedulerHook \n", + "(VERY_LOW ) CheckpointHook \n", + " -------------------- \n", + "before_val:\n", + "(VERY_HIGH ) RuntimeInfoHook \n", + " -------------------- \n", + "before_val_epoch:\n", + "(NORMAL ) IterTimerHook \n", + " -------------------- \n", + "before_val_iter:\n", + "(NORMAL ) IterTimerHook \n", + " -------------------- \n", + "after_val_iter:\n", + "(NORMAL ) IterTimerHook \n", + "(NORMAL ) BasicVisualizationHook \n", + "(BELOW_NORMAL) LoggerHook \n", + " -------------------- \n", + "after_val_epoch:\n", + "(VERY_HIGH ) RuntimeInfoHook \n", + "(NORMAL ) IterTimerHook \n", + "(BELOW_NORMAL) LoggerHook \n", + "(LOW ) ParamSchedulerHook \n", + "(VERY_LOW ) CheckpointHook \n", + " -------------------- \n", + "after_val:\n", + "(VERY_HIGH ) RuntimeInfoHook \n", + " -------------------- \n", + "after_train:\n", + "(VERY_HIGH ) RuntimeInfoHook \n", + "(VERY_LOW ) CheckpointHook \n", + " -------------------- \n", + "before_test:\n", + "(VERY_HIGH ) RuntimeInfoHook \n", + " -------------------- \n", + "before_test_epoch:\n", + "(NORMAL ) IterTimerHook \n", + " -------------------- \n", + "before_test_iter:\n", + "(NORMAL ) IterTimerHook \n", + " -------------------- \n", + "after_test_iter:\n", + "(NORMAL ) IterTimerHook \n", + "(NORMAL ) BasicVisualizationHook \n", + "(BELOW_NORMAL) LoggerHook \n", + " -------------------- \n", + "after_test_epoch:\n", + "(VERY_HIGH ) RuntimeInfoHook \n", + "(NORMAL ) IterTimerHook \n", + "(BELOW_NORMAL) LoggerHook \n", + " -------------------- \n", + "after_test:\n", + "(VERY_HIGH ) RuntimeInfoHook \n", + " -------------------- \n", + "after_run:\n", + "(BELOW_NORMAL) LoggerHook \n", + " -------------------- \n" + ] + } + ], + "source": [ + "from torch import nn\n", + "from mmengine.config import Config\n", + "# from mmengine.registry import OPTIMIZERS\n", + "cfg_path = temp_dir/\"mmagic/configs/srcnn/srcnn_x4k915_1xb16-1000k_div2k.py\"\n", + "cfg = Config.fromfile(cfg_path)\n", + "# model = nn.Conv2d(1, 1, 1)\n", + "# cfg.optimizer.params = model.parameters()\n", + "# optimizer = OPTIMIZERS.build(cfg.optimizer)\n", + "from mmengine.runner import Runner\n", + "\n", + "runner = Runner.from_cfg(cfg)" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [ + { + "ename": "AttributeError", + "evalue": "'Runner' object has no attribute 'plot_learning_curves'", + "output_type": "error", + "traceback": [ + "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", + "\u001b[0;31mAttributeError\u001b[0m Traceback (most recent call last)", + "Cell \u001b[0;32mIn[5], line 1\u001b[0m\n\u001b[0;32m----> 1\u001b[0m runner\u001b[38;5;241m.\u001b[39mplot_learning_curves(model)\n", + "\u001b[0;31mAttributeError\u001b[0m: 'Runner' object has no attribute 'plot_learning_curves'" + ] } ], "source": [ - "model" + "runner.plot_learning_curves(model)" ] }, { @@ -114,7 +756,7 @@ ], "metadata": { "kernelspec": { - "display_name": "Python 3", + "display_name": "xxx", "language": "python", "name": "python3" }, diff --git a/doc/tools/index.md b/doc/tools/index.md deleted file mode 100644 index ae194ee..0000000 --- a/doc/tools/index.md +++ /dev/null @@ -1,8 +0,0 @@ -# 工具箱 - -```{toctree} -mmengine/index -mmagic/index -mmdesign/index -ultralytics/index -``` diff --git a/pyproject.toml b/pyproject.toml index 41ecd3b..172b5ac 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -16,6 +16,8 @@ dependencies = [ "onnx", "onnxscript", "onnxruntime", + "tables", + "imageio", ] maintainers = [ @@ -60,6 +62,18 @@ doc = [ "timm", "av", "click", # required by mmagic/utils/io_utils.py +] + +coverage = [ + "pytest-cov", + "pytest-regressions", + "codecov", + "torch_book[test]", +] + +dev = [ + "pre-commit", + "torch_book[coverage]", "controlnet_aux", "diffusers>=0.23.0", "einops", @@ -85,17 +99,6 @@ doc = [ "tensorflow", "onnx", ] - -coverage = [ - "pytest-cov", - "pytest-regressions", - "codecov", - "torch_book[test]", -] -dev = [ - "pre-commit", - "torch_book[coverage]", -] test = [ "pytest", "torch_book[doc]", diff --git a/tests/env.py b/tests/env.py index f4c4f82..e566111 100644 --- a/tests/env.py +++ b/tests/env.py @@ -1,6 +1,6 @@ from pathlib import Path import sys -import tools.set_tensorflow +# import tools.set_tensorflow root_dir = Path(__file__).resolve().parent temp_dir = root_dir/".temp/tasks" temp_dir.mkdir(exist_ok=True)