diff --git a/.gitignore b/.gitignore index c7081c4e2e..410875fa26 100644 --- a/.gitignore +++ b/.gitignore @@ -66,4 +66,4 @@ typings/ # Python cache files __pycache__ build -*.egg-info \ No newline at end of file +*.egg-info diff --git a/docs/HowToChooseTuner.md b/docs/HowToChooseTuner.md index c162febdbd..c23edf8b71 100644 --- a/docs/HowToChooseTuner.md +++ b/docs/HowToChooseTuner.md @@ -10,6 +10,7 @@ For now, NNI has supported the following tuner algorithms. Note that NNI install - [Batch Tuner](#Batch) - [Grid Search](#Grid) - [Hyperband](#Hyperband) + - [Network Morphism](#NetworkMorphism) ## Supported tuner algorithms @@ -22,11 +23,11 @@ We will introduce some basic knowledge about the tuner algorithms, suggested sce The Tree-structured Parzen Estimator (TPE) is a sequential model-based optimization (SMBO) approach. SMBO methods sequentially construct models to approximate the performance of hyperparameters based on historical measurements, and then subsequently choose new hyperparameters to test based on this model. The TPE approach models P(x|y) and P(y) where x represents hyperparameters and y the associated evaluate matric. P(x|y) is modeled by transforming the generative process of hyperparameters, replacing the distributions of the configuration prior with non-parametric densities. This optimization approach is described in detail in [Algorithms for Hyper-Parameter Optimization][1]. - +​ _Suggested scenario_: TPE, as a black-box optimization, can be used in various scenarios, and shows good performance in general. Especially when you have limited computation resource and can only try a small number of trials. From a large amount of experiments, we could found that TPE is far better than Random Search. _Usage_: -``` +```yaml # config.yaml tuner: builtinTunerName: TPE @@ -43,7 +44,7 @@ In [Random Search for Hyper-Parameter Optimization][2] show that Random Search m _Suggested scenario_: Random search is suggested when each trial does not take too long (e.g., each trial can be completed very soon, or early stopped by assessor quickly), and you have enough computation resource. Or you want to uniformly explore the search space. Random Search could be considered as baseline of search algorithm. _Usage_: -``` +```yaml # config.yaml tuner: builtinTunerName: Random @@ -57,7 +58,7 @@ This simple annealing algorithm begins by sampling from the prior, but tends ove _Suggested scenario_: Anneal is suggested when each trial does not take too long, and you have enough computation resource(almost same with Random Search). Or the variables in search space could be sample from some prior distribution. _Usage_: -``` +```yaml # config.yaml tuner: builtinTunerName: Anneal @@ -74,7 +75,7 @@ Naive Evolution comes from [Large-Scale Evolution of Image Classifiers][3]. It r _Suggested scenario_: Its requirement of computation resource is relatively high. Specifically, it requires large inital population to avoid falling into local optimum. If your trial is short or leverages assessor, this tuner is a good choice. And, it is more suggested when your trial code supports weight transfer, that is, the trial could inherit the converged weights from its parent(s). This can greatly speed up the training progress. _Usage_: -``` +```yaml # config.yaml tuner: builtinTunerName: Evolution @@ -93,7 +94,7 @@ Note that SMAC on nni only supports a subset of the types in [search space spec] _Suggested scenario_: Similar to TPE, SMAC is also a black-box tuner which can be tried in various scenarios, and is suggested when computation resource is limited. It is optimized for discrete hyperparameters, thus, suggested when most of your hyperparameters are discrete. _Usage_: -``` +```yaml # config.yaml tuner: builtinTunerName: SMAC @@ -110,14 +111,14 @@ Batch tuner allows users to simply provide several configurations (i.e., choices _Suggested sceanrio_: If the configurations you want to try have been decided, you can list them in searchspace file (using `choice`) and run them using batch tuner. _Usage_: -``` +```yaml # config.yaml tuner: builtinTunerName: BatchTuner ``` Note that the search space that BatchTuner supported like: -``` +```json { "combine_params": { @@ -133,7 +134,6 @@ Note that the search space that BatchTuner supported like: ``` The search space file including the high-level key `combine_params`. The type of params in search space must be `choice` and the `values` including all the combined-params value. - **Grid Search** @@ -143,7 +143,7 @@ Note that the only acceptable types of search space are `choice`, `quniform`, `q _Suggested scenario_: It is suggested when search space is small, it is feasible to exhaustively sweeping the whole search space. _Usage_: -``` +```yaml # config.yaml tuner: builtinTunerName: GridSearch @@ -152,12 +152,12 @@ _Usage_: **Hyperband** -[Hyperband][6] tries to use limited resource to explore as many configurations as possible, and finds out the promising ones to get the final result. The basic idea is generating many configurations and to run them for small number of STEPs to find out promising one, then further training those promising ones to select several more promising one. More detail can be refered to [here](../src/sdk/pynni/nni/hyperband_advisor/README.md) +[Hyperband][6] tries to use limited resource to explore as many configurations as possible, and finds out the promising ones to get the final result. The basic idea is generating many configurations and to run them for small number of STEPs to find out promising one, then further training those promising ones to select several more promising one. More detail can be referred to [here](../src/sdk/pynni/nni/hyperband_advisor/README.md). _Suggested scenario_: It is suggested when you have limited computation resource but have relatively large search space. It performs good in the scenario that intermediate result (e.g., accuracy) can reflect good or bad of final result (e.g., accuracy) to some extent. _Usage_: -``` +```yaml # config.yaml advisor: builtinAdvisorName: Hyperband @@ -171,6 +171,31 @@ _Usage_: eta: 3 ``` + +**Network Morphism** + +[Network Morphism](7) provides functions to automatically search for architecture of deep learning models. Every child network inherits the knowledge from its parent network and morphs into diverse types of networks, including changes of depth, width and skip-connection. Next, it estimates the value of child network using the history architecture and metric pairs. Then it selects the most promising one to train. More detail can be referred to [here](../src/sdk/pynni/nni/networkmorphism_tuner/README.md). + +_Suggested scenario_: It is suggested that you want to apply deep learning methods to your task (your own dataset) but you have no idea of how to choose or design a network. You modify the [example](../examples/trials/network_morphism/cifar10/cifar10_keras.py) to fit your own dataset and your own data augmentation method. Also you can change the batch size, learning rate or optimizer. It is feasible for different tasks to find a good network architecture. Now this tuner only supports the cv domain. + +_Usage_: +```yaml + # config.yaml + tuner: + builtinTunerName: NetworkMorphism + classArgs: + #choice: maximize, minimize + optimize_mode: maximize + #for now, this tuner only supports cv domain + task: cv + #input image width + input_width: 32 + #input image channel + input_channel: 3 + #number of classes + n_output_node: 10 +``` + # How to use Assessor that NNI supports? @@ -184,12 +209,12 @@ For now, NNI has supported the following assessor algorithms. **Medianstop** -Medianstop is a simple early stopping rule mentioned in the [paper][7]. It stops a pending trial X at step S if the trial’s best objective value by step S is strictly worse than the median value of the running averages of all completed trials’ objectives reported up to step S. +Medianstop is a simple early stopping rule mentioned in the [paper][8]. It stops a pending trial X at step S if the trial’s best objective value by step S is strictly worse than the median value of the running averages of all completed trials’ objectives reported up to step S. _Suggested scenario_: It is applicable in a wide range of performance curves, thus, can be used in various scenarios to speed up the tuning progress. _Usage_: -``` +```yaml assessor: builtinAssessorName: Medianstop classArgs: @@ -201,10 +226,11 @@ _Usage_: start_step: 5 ``` - [1]: https://papers.nips.cc/paper/4443-algorithms-for-hyper-parameter-optimization.pdf - [2]: http://www.jmlr.org/papers/volume13/bergstra12a/bergstra12a.pdf - [3]: https://arxiv.org/pdf/1703.01041.pdf - [4]: https://www.cs.ubc.ca/~hutter/papers/10-TR-SMAC.pdf - [5]: https://github.com/automl/SMAC3 - [6]: https://arxiv.org/pdf/1603.06560.pdf - [7]: https://static.googleusercontent.com/media/research.google.com/en//pubs/archive/46180.pdf \ No newline at end of file +[1]: https://papers.nips.cc/paper/4443-algorithms-for-hyper-parameter-optimization.pdf +[2]: http://www.jmlr.org/papers/volume13/bergstra12a/bergstra12a.pdf +[3]: https://arxiv.org/pdf/1703.01041.pdf +[4]: https://www.cs.ubc.ca/~hutter/papers/10-TR-SMAC.pdf +[5]: https://github.com/automl/SMAC3 +[6]: https://arxiv.org/pdf/1603.06560.pdf +[7]: https://arxiv.org/abs/1806.10282 +[8]: https://static.googleusercontent.com/media/research.google.com/en//pubs/archive/46180.pdf \ No newline at end of file diff --git a/docs/KubeflowMode.md b/docs/KubeflowMode.md index 3034fe8593..3b2e18d4d1 100644 --- a/docs/KubeflowMode.md +++ b/docs/KubeflowMode.md @@ -1,115 +1,115 @@ -**Run an Experiment on Kubeflow** -=== -Now NNI supports running experiment on [Kubeflow](https://github.com/kubeflow/kubeflow), called kubeflow mode. Before starting to use NNI kubeflow mode, you should have a kubernetes cluster, either on-prem or [Azure Kubernetes Service(AKS)](https://azure.microsoft.com/en-us/services/kubernetes-service/), a Ubuntu machine on which [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/) is installed and configured to connect to your kubernetes cluster. If you are not familiar with kubernetes, [here](https://kubernetes.io/docs/tutorials/kubernetes-basics/) is a goot start. In kubeflow mode, your trial program will run as kubeflow job in kubernetes cluster. - -## Prerequisite for on-premises Kubernetes Service -1. A **Kubernetes** cluster using Kubernetes 1.8 or later. Follow this [guideline](https://kubernetes.io/docs/setup/) to set up Kubernetes -2. Download, set up, and deploy **Kubelow** to your Kubernetes cluster. Follow this [guideline](https://www.kubeflow.org/docs/started/getting-started/) to set up Kubeflow -3. Install **kubectl**, and configure to connect to your Kubernetes API server. Follow this [guideline](https://kubernetes.io/docs/tasks/tools/install-kubectl/) to install kubectl on Ubuntu -4. If your NNI trial job needs GPU resource, you should follow this [guideline](https://github.com/NVIDIA/k8s-device-plugin) to configure **Nvidia device plugin for Kubernetes**. -5. Install **NFS server** and export a general purpose mount (we recommend to map your NFS server path in `root_squash option`, otherwise permission issue may raise when nni copy files to NFS. Refer this [page](https://linux.die.net/man/5/exports) to learn what root_squash option is), or **Azure File Storage**. -6. Install **NFS client** on the machine where you install NNI and run nnictl to create experiment. Run this command to install NFSv4 client: - ``` - apt-get install nfs-common - ``` - -7. Install **NNI**, follow the install guide [here](GetStarted.md). - -## Prerequisite for Azure Kubernetes Service -1. NNI support kubeflow based on Azure Kubernetes Service, follow the [guideline](https://azure.microsoft.com/en-us/services/kubernetes-service/) to set up Azure Kubernetes Service. -2. Install [Azure CLI](https://docs.microsoft.com/en-us/cli/azure/install-azure-cli?view=azure-cli-latest) and __kubectl__. Use `az login` to set azure account, and connect kubectl client to AKS, [refer](https://docs.microsoft.com/en-us/azure/aks/kubernetes-walkthrough#connect-to-the-cluster). -3. Deploy kubeflow on Azure Kubernetes Service, follow the [guideline](https://www.kubeflow.org/docs/started/getting-started/). -4. Follow the [guideline](https://docs.microsoft.com/en-us/azure/storage/common/storage-quickstart-create-account?tabs=portal) to create azure file storage account. If you use Azure Kubernetes Service, nni need Azure Storage Service to store code files and the output files. -5. To access Azure storage service, nni need the access key of the storage account, and nni use [Azure Key Vault](https://azure.microsoft.com/en-us/services/key-vault/) Service to protect your private key. Set up Azure Key Vault Service, add a secret to Key Vault to store the access key of Azure storage account. Follow this [guideline](https://docs.microsoft.com/en-us/azure/key-vault/quick-create-cli) to store the access key. - -## Design -TODO - -## Run an experiment -Use `examples/trials/mnist` as an example. The nni config yaml file's content is like: -``` -authorName: your_name -experimentName: example_mnist -# how many trials could be concurrently running -trialConcurrency: 4 -# maximum experiment running duration -maxExecDuration: 3h -# empty means never stop -maxTrialNum: 100 -# choice: local, remote, pai, kubeflow -trainingServicePlatform: kubeflow -# choice: true, false -useAnnotation: false -tuner: - builtinTunerName: TPE - classArgs: - #choice: maximize, minimize - optimize_mode: maximize -trial: - codeDir: ~/nni/examples/trials/mnist - ps: - replicas: 1 - command: python mnist-keras.py - gpuNum: 0 - cpuNum: 1 - memoryMB: 8196 - image: {your_docker_image_for_tensorflow_ps} - worker: - replicas: 1 - command: python mnist-keras.py - gpuNum: 2 - cpuNum: 1 - memoryMB: 8196 - image: {your_docker_image_for_tensorflow_worker} -kubeflowConfig: - operator: tf-operator - storage: nfs - nfs: - server: {your_nfs_server} - path: {your_nfs_server_exported_path} -``` -If you use Azure Kubernetes Service, you should set `kubeflowConfig` in your config yaml file as follows: -``` -kubeflowConfig: - operator: tf-operator - storage: azureStorage - keyVault: - vaultName: {your_vault_name} - name: {your_secert_name} - azureStorage: - accountName: {your_storage_account_name} - azureShare: {your_azure_share_name} -``` - -Note: You should explicitly set `trainingServicePlatform: kubeflow` in nni config yaml file if you want to start experiment in kubeflow mode. - -Trial configuration in kubeflow mode have the following configuration keys: -* codeDir - * code directory, where you put training code and config files -* worker (required). This config section is used to configure tensorflow worker role - * replicas - * Required key. Should be positive number depends on how many replication your want to run for tensorflow worker role. - * command - * Required key. Command to launch your trial job, like ```python mnist.py``` - * memoryMB - * Required key. Should be positive number based on your trial program's memory requirement - * cpuNum - * gpuNum - * image - * Required key. In kubeflow mode, your trial program will be scheduled by Kubernetes to run in [Pod](https://kubernetes.io/docs/concepts/workloads/pods/pod/). This key is used to specify the Docker image used to create the pod where your trail program will run. - * We already build a docker image [nnimsra/nni](https://hub.docker.com/r/msranni/nni/) on [Docker Hub](https://hub.docker.com/). It contains NNI python packages, Node modules and javascript artifact files required to start experiment, and all of NNI dependencies. The docker file used to build this image can be found at [here](../deployment/Dockerfile.build.base). You can either use this image directly in your config file, or build your own image based on it. -* ps (optional). This config section is used to configure tensorflow parameter server role. - -Once complete to fill nni experiment config file and save (for example, save as exp_kubeflow.yaml), then run the following command -``` -nnictl create --config exp_kubeflow.yaml -``` -to start the experiment in kubeflow mode. NNI will create Kubeflow tfjob for each trial, and the job name format is something like `nni_exp_{experiment_id}_trial_{trial_id}`. -You can see the kubeflow tfjob created by NNI in your Kubernetes dashboard. - -Notice: In kubeflow mode, NNIManager will start a rest server and listen on a port which is your NNI WebUI's port plus 1. For example, if your WebUI port is `8080`, the rest server will listen on `8081`, to receive metrics from trial job running in Kubernetes. So you should `enable 8081` TCP port in your firewall rule to allow incoming traffic. - -Once a trial job is completed, you can goto NNI WebUI's overview page (like http://localhost:8080/oview) to check trial's information. - -Any problems when using NNI in kubeflow mode, plesae create issues on [NNI github repo](https://github.com/Microsoft/nni), or send mail to nni@microsoft.com - +**Run an Experiment on Kubeflow** +=== +Now NNI supports running experiment on [Kubeflow](https://github.com/kubeflow/kubeflow), called kubeflow mode. Before starting to use NNI kubeflow mode, you should have a kubernetes cluster, either on-prem or [Azure Kubernetes Service(AKS)](https://azure.microsoft.com/en-us/services/kubernetes-service/), a Ubuntu machine on which [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/) is installed and configured to connect to your kubernetes cluster. If you are not familiar with kubernetes, [here](https://kubernetes.io/docs/tutorials/kubernetes-basics/) is a goot start. In kubeflow mode, your trial program will run as kubeflow job in kubernetes cluster. + +## Prerequisite for on-premises Kubernetes Service +1. A **Kubernetes** cluster using Kubernetes 1.8 or later. Follow this [guideline](https://kubernetes.io/docs/setup/) to set up Kubernetes +2. Download, set up, and deploy **Kubelow** to your Kubernetes cluster. Follow this [guideline](https://www.kubeflow.org/docs/started/getting-started/) to set up Kubeflow +3. Install **kubectl**, and configure to connect to your Kubernetes API server. Follow this [guideline](https://kubernetes.io/docs/tasks/tools/install-kubectl/) to install kubectl on Ubuntu +4. If your NNI trial job needs GPU resource, you should follow this [guideline](https://github.com/NVIDIA/k8s-device-plugin) to configure **Nvidia device plugin for Kubernetes**. +5. Install **NFS server** and export a general purpose mount (we recommend to map your NFS server path in `root_squash option`, otherwise permission issue may raise when nni copy files to NFS. Refer this [page](https://linux.die.net/man/5/exports) to learn what root_squash option is), or **Azure File Storage**. +6. Install **NFS client** on the machine where you install NNI and run nnictl to create experiment. Run this command to install NFSv4 client: + ``` + apt-get install nfs-common + ``` + +7. Install **NNI**, follow the install guide [here](GetStarted.md). + +## Prerequisite for Azure Kubernetes Service +1. NNI support kubeflow based on Azure Kubernetes Service, follow the [guideline](https://azure.microsoft.com/en-us/services/kubernetes-service/) to set up Azure Kubernetes Service. +2. Install [Azure CLI](https://docs.microsoft.com/en-us/cli/azure/install-azure-cli?view=azure-cli-latest) and __kubectl__. Use `az login` to set azure account, and connect kubectl client to AKS, [refer](https://docs.microsoft.com/en-us/azure/aks/kubernetes-walkthrough#connect-to-the-cluster). +3. Deploy kubeflow on Azure Kubernetes Service, follow the [guideline](https://www.kubeflow.org/docs/started/getting-started/). +4. Follow the [guideline](https://docs.microsoft.com/en-us/azure/storage/common/storage-quickstart-create-account?tabs=portal) to create azure file storage account. If you use Azure Kubernetes Service, nni need Azure Storage Service to store code files and the output files. +5. To access Azure storage service, nni need the access key of the storage account, and nni use [Azure Key Vault](https://azure.microsoft.com/en-us/services/key-vault/) Service to protect your private key. Set up Azure Key Vault Service, add a secret to Key Vault to store the access key of Azure storage account. Follow this [guideline](https://docs.microsoft.com/en-us/azure/key-vault/quick-create-cli) to store the access key. + +## Design +TODO + +## Run an experiment +Use `examples/trials/mnist` as an example. The nni config yaml file's content is like: +``` +authorName: your_name +experimentName: example_mnist +# how many trials could be concurrently running +trialConcurrency: 4 +# maximum experiment running duration +maxExecDuration: 3h +# empty means never stop +maxTrialNum: 100 +# choice: local, remote, pai, kubeflow +trainingServicePlatform: kubeflow +# choice: true, false +useAnnotation: false +tuner: + builtinTunerName: TPE + classArgs: + #choice: maximize, minimize + optimize_mode: maximize +trial: + codeDir: ~/nni/examples/trials/mnist + ps: + replicas: 1 + command: python mnist-keras.py + gpuNum: 0 + cpuNum: 1 + memoryMB: 8196 + image: {your_docker_image_for_tensorflow_ps} + worker: + replicas: 1 + command: python mnist-keras.py + gpuNum: 2 + cpuNum: 1 + memoryMB: 8196 + image: {your_docker_image_for_tensorflow_worker} +kubeflowConfig: + operator: tf-operator + storage: nfs + nfs: + server: {your_nfs_server} + path: {your_nfs_server_exported_path} +``` +If you use Azure Kubernetes Service, you should set `kubeflowConfig` in your config yaml file as follows: +``` +kubeflowConfig: + operator: tf-operator + storage: azureStorage + keyVault: + vaultName: {your_vault_name} + name: {your_secert_name} + azureStorage: + accountName: {your_storage_account_name} + azureShare: {your_azure_share_name} +``` + +Note: You should explicitly set `trainingServicePlatform: kubeflow` in nni config yaml file if you want to start experiment in kubeflow mode. + +Trial configuration in kubeflow mode have the following configuration keys: +* codeDir + * code directory, where you put training code and config files +* worker (required). This config section is used to configure tensorflow worker role + * replicas + * Required key. Should be positive number depends on how many replication your want to run for tensorflow worker role. + * command + * Required key. Command to launch your trial job, like ```python mnist.py``` + * memoryMB + * Required key. Should be positive number based on your trial program's memory requirement + * cpuNum + * gpuNum + * image + * Required key. In kubeflow mode, your trial program will be scheduled by Kubernetes to run in [Pod](https://kubernetes.io/docs/concepts/workloads/pods/pod/). This key is used to specify the Docker image used to create the pod where your trail program will run. + * We already build a docker image [nnimsra/nni](https://hub.docker.com/r/msranni/nni/) on [Docker Hub](https://hub.docker.com/). It contains NNI python packages, Node modules and javascript artifact files required to start experiment, and all of NNI dependencies. The docker file used to build this image can be found at [here](../deployment/Dockerfile.build.base). You can either use this image directly in your config file, or build your own image based on it. +* ps (optional). This config section is used to configure tensorflow parameter server role. + +Once complete to fill nni experiment config file and save (for example, save as exp_kubeflow.yaml), then run the following command +``` +nnictl create --config exp_kubeflow.yaml +``` +to start the experiment in kubeflow mode. NNI will create Kubeflow tfjob for each trial, and the job name format is something like `nni_exp_{experiment_id}_trial_{trial_id}`. +You can see the kubeflow tfjob created by NNI in your Kubernetes dashboard. + +Notice: In kubeflow mode, NNIManager will start a rest server and listen on a port which is your NNI WebUI's port plus 1. For example, if your WebUI port is `8080`, the rest server will listen on `8081`, to receive metrics from trial job running in Kubernetes. So you should `enable 8081` TCP port in your firewall rule to allow incoming traffic. + +Once a trial job is completed, you can goto NNI WebUI's overview page (like http://localhost:8080/oview) to check trial's information. + +Any problems when using NNI in kubeflow mode, plesae create issues on [NNI github repo](https://github.com/Microsoft/nni), or send mail to nni@microsoft.com + diff --git a/examples/trials/cifar10_pytorch/search_space.json b/examples/trials/cifar10_pytorch/search_space.json index eec00bdadf..723e49b6b9 100644 --- a/examples/trials/cifar10_pytorch/search_space.json +++ b/examples/trials/cifar10_pytorch/search_space.json @@ -1,5 +1,5 @@ -{ - "lr":{"_type":"choice", "_value":[0.1, 0.01, 0.001, 0.0001]}, - "optimizer":{"_type":"choice", "_value":["SGD", "Adadelta", "Adagrad", "Adam", "Adamax"]}, - "model":{"_type":"choice", "_value":["vgg", "resnet18", "googlenet", "densenet121", "mobilenet", "dpn92", "senet18"]} +{ + "lr":{"_type":"choice", "_value":[0.1, 0.01, 0.001, 0.0001]}, + "optimizer":{"_type":"choice", "_value":["SGD", "Adadelta", "Adagrad", "Adam", "Adamax"]}, + "model":{"_type":"choice", "_value":["vgg", "resnet18", "googlenet", "densenet121", "mobilenet", "dpn92", "senet18"]} } diff --git a/examples/trials/ga_squad/README.md b/examples/trials/ga_squad/README.md index 1766f56d2a..6a6e1806fa 100644 --- a/examples/trials/ga_squad/README.md +++ b/examples/trials/ga_squad/README.md @@ -159,7 +159,7 @@ The trial has a lot of different files, functions and classes. Here we will only Among those files, `trial.py` and `graph_to_tf.py` is special. -`graph_to_tf.py` has a function named as `graph_to_network`, here is its skelton code: +`graph_to_tf.py` has a function named as `graph_to_network`, here is its skeleton code: ``` def graph_to_network(input1, diff --git a/examples/trials/network_morphism/FashionMNIST/FashionMNIST_keras.py b/examples/trials/network_morphism/FashionMNIST/FashionMNIST_keras.py new file mode 100644 index 0000000000..01c77c4405 --- /dev/null +++ b/examples/trials/network_morphism/FashionMNIST/FashionMNIST_keras.py @@ -0,0 +1,200 @@ +# Copyright (c) Microsoft Corporation +# All rights reserved. +# +# MIT License +# +# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated +# documentation files (the "Software"), to deal in the Software without restriction, including without limitation +# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and +# to permit persons to whom the Software is furnished to do so, subject to the following conditions: +# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, +# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +import argparse +import logging +import os + +import tensorflow as tf +import keras +from keras.callbacks import EarlyStopping, TensorBoard +from keras.datasets import fashion_mnist +from keras.optimizers import SGD, Adadelta, Adagrad, Adam, Adamax, RMSprop +from keras.utils import multi_gpu_model, to_categorical +import keras.backend.tensorflow_backend as KTF + +import nni +from nni.networkmorphism_tuner.graph import json_to_graph + +# set the logger format +log_format = "%(asctime)s %(message)s" +logging.basicConfig( + filename="networkmorphism.log", + filemode="a", + level=logging.INFO, + format=log_format, + datefmt="%m/%d %I:%M:%S %p", +) +# set the logger format +logger = logging.getLogger("fashion_mnist-network-morphism-keras") + + +# restrict gpu usage background +config = tf.ConfigProto() +# pylint: disable=E1101,W0603 +config.gpu_options.allow_growth = True +sess = tf.Session(config=config) + +KTF.set_session(sess) + + +def get_args(): + """ get args from command line + """ + parser = argparse.ArgumentParser("fashion_mnist") + parser.add_argument("--batch_size", type=int, default=128, help="batch size") + parser.add_argument("--optimizer", type=str, default="SGD", help="optimizer") + parser.add_argument("--epochs", type=int, default=200, help="epoch limit") + parser.add_argument( + "--learning_rate", type=float, default=0.001, help="learning rate" + ) + parser.add_argument( + "--weight_decay", + type=float, + default=1e-5, + help="weight decay of the learning rate", + ) + return parser.parse_args() + + +trainloader = None +testloader = None +net = None +args = get_args() +TENSORBOARD_DIR = os.environ["NNI_OUTPUT_DIR"] + + +def build_graph_from_json(ir_model_json): + """build model from json representation + """ + graph = json_to_graph(ir_model_json) + logging.debug(graph.operation_history) + model = graph.produce_keras_model() + return model + + +def parse_rev_args(receive_msg): + """ parse reveive msgs to global variable + """ + global trainloader + global testloader + global net + + # Loading Data + logger.debug("Preparing data..") + + (x_train, y_train), (x_test, y_test) = fashion_mnist.load_data() + y_train = to_categorical(y_train, 10) + y_test = to_categorical(y_test, 10) + x_train = x_train.reshape(x_train.shape+(1,)).astype("float32") + x_test = x_test.reshape(x_test.shape+(1,)).astype("float32") + x_train /= 255.0 + x_test /= 255.0 + trainloader = (x_train, y_train) + testloader = (x_test, y_test) + + # Model + logger.debug("Building model..") + net = build_graph_from_json(receive_msg) + + # parallel model + try: + available_devices = os.environ["CUDA_VISIBLE_DEVICES"] + gpus = len(available_devices.split(",")) + if gpus > 1: + net = multi_gpu_model(net, gpus) + except KeyError: + logger.debug("parallel model not support in this config settings") + + if args.optimizer == "SGD": + optimizer = SGD(lr=args.learning_rate, momentum=0.9, decay=args.weight_decay) + if args.optimizer == "Adadelta": + optimizer = Adadelta(lr=args.learning_rate, decay=args.weight_decay) + if args.optimizer == "Adagrad": + optimizer = Adagrad(lr=args.learning_rate, decay=args.weight_decay) + if args.optimizer == "Adam": + optimizer = Adam(lr=args.learning_rate, decay=args.weight_decay) + if args.optimizer == "Adamax": + optimizer = Adamax(lr=args.learning_rate, decay=args.weight_decay) + if args.optimizer == "RMSprop": + optimizer = RMSprop(lr=args.learning_rate, decay=args.weight_decay) + + # Compile the model + net.compile( + loss="categorical_crossentropy", optimizer=optimizer, metrics=["accuracy"] + ) + return 0 + + +class SendMetrics(keras.callbacks.Callback): + """ + Keras callback to send metrics to NNI framework + """ + + def on_epoch_end(self, epoch, logs=None): + """ + Run on end of each epoch + """ + if logs is None: + logs = dict() + logger.debug(logs) + nni.report_intermediate_result(logs["acc"]) + + +# Training +def train_eval(): + """ train and eval the model + """ + + global trainloader + global testloader + global net + + (x_train, y_train) = trainloader + (x_test, y_test) = testloader + + # train procedure + net.fit( + x=x_train, + y=y_train, + batch_size=args.batch_size, + validation_data=(x_test, y_test), + epochs=args.epochs, + shuffle=True, + callbacks=[ + SendMetrics(), + EarlyStopping(min_delta=0.001, patience=10), + TensorBoard(log_dir=TENSORBOARD_DIR), + ], + ) + + # trial report final acc to tuner + _, acc = net.evaluate(x_test, y_test) + logger.debug("Final result is: %d", acc) + nni.report_final_result(acc) + + +if __name__ == "__main__": + try: + # trial get next parameter from network morphism tuner + RCV_CONFIG = nni.get_next_parameter() + logger.debug(RCV_CONFIG) + parse_rev_args(RCV_CONFIG) + train_eval() + except Exception as exception: + logger.exception(exception) + raise diff --git a/examples/trials/network_morphism/FashionMNIST/FashionMNIST_pytorch.py b/examples/trials/network_morphism/FashionMNIST/FashionMNIST_pytorch.py new file mode 100644 index 0000000000..37ee2670e7 --- /dev/null +++ b/examples/trials/network_morphism/FashionMNIST/FashionMNIST_pytorch.py @@ -0,0 +1,256 @@ +# Copyright (c) Microsoft Corporation +# All rights reserved. +# +# MIT License +# +# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated +# documentation files (the "Software"), to deal in the Software without restriction, including without limitation +# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and +# to permit persons to whom the Software is furnished to do so, subject to the following conditions: +# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, +# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +import argparse +import logging +import sys + +import nni +from nni.networkmorphism_tuner.graph import json_to_graph + +import torch +import torch.nn as nn +import torch.optim as optim +import torchvision + +sys.path.append("../") +from network_morphism import utils + + +# set the logger format +log_format = "%(asctime)s %(message)s" +logging.basicConfig( + filename="networkmorphism.log", + filemode="a", + level=logging.INFO, + format=log_format, + datefmt="%m/%d %I:%M:%S %p", +) +# pylint: disable=W0603 +# set the logger format +logger = logging.getLogger("FashionMNIST-network-morphism") + + +def get_args(): + """ get args from command line + """ + parser = argparse.ArgumentParser("FashionMNIST") + parser.add_argument("--batch_size", type=int, default=128, help="batch size") + parser.add_argument("--optimizer", type=str, default="SGD", help="optimizer") + parser.add_argument("--epochs", type=int, default=200, help="epoch limit") + parser.add_argument( + "--learning_rate", type=float, default=0.001, help="learning rate" + ) + parser.add_argument("--cutout", action="store_true", default=False, help="use cutout") + parser.add_argument("--cutout_length", type=int, default=8, help="cutout length") + parser.add_argument( + "--model_path", type=str, default="./", help="Path to save the destination model" + ) + return parser.parse_args() + + +trainloader = None +testloader = None +net = None +criterion = None +optimizer = None +device = "cuda" if torch.cuda.is_available() else "cpu" +best_acc = 0.0 +args = get_args() + + +def build_graph_from_json(ir_model_json): + """build model from json representation + """ + graph = json_to_graph(ir_model_json) + logging.debug(graph.operation_history) + model = graph.produce_torch_model() + return model + + +def parse_rev_args(receive_msg): + """ parse reveive msgs to global variable + """ + global trainloader + global testloader + global net + global criterion + global optimizer + + # Loading Data + logger.debug("Preparing data..") + + raw_train_data = torchvision.datasets.FashionMNIST( + root="./data", train=True, download=True + ) + + dataset_mean, dataset_std = ( + [raw_train_data.train_data.float().mean() / 255], + [raw_train_data.train_data.float().std() / 255], + ) + + transform_train, transform_test = utils.data_transforms_mnist( + args, dataset_mean, dataset_std + ) + + trainset = torchvision.datasets.FashionMNIST( + root="./data", train=True, download=True, transform=transform_train + ) + trainloader = torch.utils.data.DataLoader( + trainset, batch_size=args.batch_size, shuffle=True, num_workers=2 + ) + + testset = torchvision.datasets.FashionMNIST( + root="./data", train=False, download=True, transform=transform_test + ) + testloader = torch.utils.data.DataLoader( + testset, batch_size=args.batch_size, shuffle=False, num_workers=2 + ) + + # Model + logger.debug("Building model..") + net = build_graph_from_json(receive_msg) + + net = net.to(device) + criterion = nn.CrossEntropyLoss() + + if args.optimizer == "SGD": + optimizer = optim.SGD( + net.parameters(), lr=args.learning_rate, momentum=0.9, weight_decay=5e-4 + ) + if args.optimizer == "Adadelta": + optimizer = optim.Adadelta(net.parameters(), lr=args.learning_rate) + if args.optimizer == "Adagrad": + optimizer = optim.Adagrad(net.parameters(), lr=args.learning_rate) + if args.optimizer == "Adam": + optimizer = optim.Adam(net.parameters(), lr=args.learning_rate) + if args.optimizer == "Adamax": + optimizer = optim.Adamax(net.parameters(), lr=args.learning_rate) + if args.optimizer == "RMSprop": + optimizer = optim.RMSprop(net.parameters(), lr=args.learning_rate) + + return 0 + + +# Training +def train(epoch): + """ train model on each epoch in trainset + """ + + global trainloader + global testloader + global net + global criterion + global optimizer + + logger.debug("Epoch: %d", epoch) + net.train() + train_loss = 0 + correct = 0 + total = 0 + + for batch_idx, (inputs, targets) in enumerate(trainloader): + inputs, targets = inputs.to(device), targets.to(device) + optimizer.zero_grad() + outputs = net(inputs) + loss = criterion(outputs, targets) + loss.backward() + optimizer.step() + + train_loss += loss.item() + _, predicted = outputs.max(1) + total += targets.size(0) + correct += predicted.eq(targets).sum().item() + + acc = 100.0 * correct / total + + logger.debug( + "Loss: %.3f | Acc: %.3f%% (%d/%d)", + train_loss / (batch_idx + 1), + 100.0 * correct / total, + correct, + total, + ) + + return acc + + +def test(epoch): + """ eval model on each epoch in testset + """ + global best_acc + global trainloader + global testloader + global net + global criterion + global optimizer + + logger.debug("Eval on epoch: %d", epoch) + net.eval() + test_loss = 0 + correct = 0 + total = 0 + with torch.no_grad(): + for batch_idx, (inputs, targets) in enumerate(testloader): + inputs, targets = inputs.to(device), targets.to(device) + outputs = net(inputs) + loss = criterion(outputs, targets) + + test_loss += loss.item() + _, predicted = outputs.max(1) + total += targets.size(0) + correct += predicted.eq(targets).sum().item() + + acc = 100.0 * correct / total + + logger.debug( + "Loss: %.3f | Acc: %.3f%% (%d/%d)", + test_loss / (batch_idx + 1), + 100.0 * correct / total, + correct, + total, + ) + + acc = 100.0 * correct / total + if acc > best_acc: + best_acc = acc + return acc, best_acc + + +if __name__ == "__main__": + try: + # trial get next parameter from network morphism tuner + RCV_CONFIG = nni.get_next_parameter() + logger.debug(RCV_CONFIG) + + parse_rev_args(RCV_CONFIG) + train_acc = 0.0 + best_acc = 0.0 + early_stop = utils.EarlyStopping(mode="max") + for ep in range(args.epochs): + train_acc = train(ep) + test_acc, best_acc = test(ep) + nni.report_intermediate_result(test_acc) + logger.debug(test_acc) + if early_stop.step(test_acc): + break + + # trial report best_acc to tuner + nni.report_final_result(best_acc) + except Exception as exception: + logger.exception(exception) + raise diff --git a/examples/trials/network_morphism/FashionMNIST/__init__.py b/examples/trials/network_morphism/FashionMNIST/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/examples/trials/network_morphism/FashionMNIST/config.yml b/examples/trials/network_morphism/FashionMNIST/config.yml new file mode 100644 index 0000000000..1f226c2c57 --- /dev/null +++ b/examples/trials/network_morphism/FashionMNIST/config.yml @@ -0,0 +1,29 @@ +authorName: default +experimentName: example_FashionMNIST-network-morphism +trialConcurrency: 4 +maxExecDuration: 48h +maxTrialNum: 200 +#choice: local, remote, pai +trainingServicePlatform: local +#searchSpacePath: search_space.json +#choice: true, false +useAnnotation: false +tuner: + #choice: TPE, Random, Anneal, Evolution, BatchTuner, NetworkMorphism + #SMAC (SMAC should be installed through nnictl) + builtinTunerName: NetworkMorphism + classArgs: + #choice: maximize, minimize + optimize_mode: maximize + #for now, this tuner only supports cv domain + task: cv + #input image width + input_width: 28 + #input image channel + input_channel: 1 + #number of classes + n_output_node: 10 +trial: + command: python3 FashionMNIST_keras.py + codeDir: . + gpuNum: 1 diff --git a/examples/trials/network_morphism/FashionMNIST/config_pai.yml b/examples/trials/network_morphism/FashionMNIST/config_pai.yml new file mode 100644 index 0000000000..95f8abc493 --- /dev/null +++ b/examples/trials/network_morphism/FashionMNIST/config_pai.yml @@ -0,0 +1,43 @@ +authorName: default +experimentName: example_FashionMNIST-network-morphism +trialConcurrency: 1 +maxExecDuration: 24h +maxTrialNum: 10 +#choice: local, remote, pai +trainingServicePlatform: pai +#choice: true, false +useAnnotation: false +tuner: + #choice: TPE, Random, Anneal, Evolution, BatchTuner, NetworkMorphism + #SMAC (SMAC should be installed through nnictl) + builtinTunerName: NetworkMorphism + classArgs: + #choice: maximize, minimize + optimize_mode: maximize + # for now, this tuner only supports cv domain + task: cv + #input image width + input_width: 28 + #input image channel + input_channel: 1 + #number of classes + n_output_node: 10 +trial: + command: python3 FashionMNIST_keras.py + codeDir: . + gpuNum: 1 + cpuNum: 1 + memoryMB: 8196 + #The docker image to run nni job on pai + image: msranni/nni:latest + #The hdfs directory to store data on pai, format 'hdfs://host:port/directory' + dataDir: hdfs://10.10.10.10:9000/username/nni + #The hdfs directory to store output data generated by nni, format 'hdfs://host:port/directory' + outputDir: hdfs://10.10.10.10:9000/username/nni +paiConfig: + #The username to login pai + userName: username + #The password to login pai + passWord: password + #The host of restful server of pai + host: 10.10.10.10 \ No newline at end of file diff --git a/examples/trials/network_morphism/README.md b/examples/trials/network_morphism/README.md new file mode 100644 index 0000000000..7186813d12 --- /dev/null +++ b/examples/trials/network_morphism/README.md @@ -0,0 +1,108 @@ +# Network Morphism for Automatic Model Architecture Search in NNI +The Network Morphism is a build-in Tuner using network morphism techniques to search and evaluate the new network architecture. This example shows us how to use it to find good model architectures for deep learning. + +## How to run this example? + +### 1. Training framework support + +The network morphism now is framework-based, and we have not implemented the framework-free methods. The training frameworks which we have supported yet are Pytorch and Keras. If you get familiar with the intermediate JSON format, you can build your own model in your own training framework. In the future, we will change to intermediate format from JSON to ONNX in order to get a [standard intermediate representation spec](https://github.com/onnx/onnx/blob/master/docs/IR.md). + + +### 2. Install the requirements + +```bash +# install the requirements packages +cd examples/trials/network_morphism/ +pip install -r requirements.txt +``` + +### 3. Update configuration + +Modify `examples/trials/network_morphism/cifar10/config.yaml` to fit your own task, note that searchSpacePath is not required in our configuration. Here is the default configuration: + +```yaml +authorName: default +experimentName: example_cifar10-network-morphism +trialConcurrency: 1 +maxExecDuration: 48h +maxTrialNum: 200 +#choice: local, remote, pai +trainingServicePlatform: local +#choice: true, false +useAnnotation: false +tuner: + #choice: TPE, Random, Anneal, Evolution, BatchTuner, NetworkMorphism + #SMAC (SMAC should be installed through nnictl) + builtinTunerName: NetworkMorphism + classArgs: + #choice: maximize, minimize + optimize_mode: maximize + #for now, this tuner only supports cv domain + task: cv + #modify to fit your input image width + input_width: 32 + #modify to fit your input image channel + input_channel: 3 + #modify to fit your number of classes + n_output_node: 10 +trial: + # your own command here + command: python3 cifar10_keras.py + codeDir: . + gpuNum: 0 +``` + +In the "trial" part, if you want to use GPU to perform the architecture search, change `gpuNum` from `0` to `1`. You need to increase the `maxTrialNum` and `maxExecDuration`, according to how long you want to wait for the search result. + +`trialConcurrency` is the number of trials running concurrently, which is the number of GPUs you want to use, if you are setting `gpuNum` to 1. + +### 4. Call "json\_to\_graph()" function in your own code + +Modify your code and call "json\_to\_graph()" function to build a pytorch model or keras model from received json string. Here is the simple example. + +```python +import nni +from nni.networkmorphism_tuner.graph import json_to_graph + +def build_graph_from_json(ir_model_json): + """build a pytorch model from json representation + """ + graph = json_to_graph(ir_model_json) + model = graph.produce_torch_model() + return model + +# trial get next parameter from network morphism tuner +RCV_CONFIG = nni.get_next_parameter() +# call the function to build pytorch model or keras model +net = build_graph_from_json(RCV_CONFIG) + +# training procedure +# .... + +# report the final accuracy to nni +nni.report_final_result(best_acc) +``` + +### 5. Submit this job + +```bash +# You can use nni command tool "nnictl" to create the a job which submit to the nni +# finally you successfully commit a Network Morphism Job to nni +nnictl create --config config.yaml +``` + +## Trial Examples + +The trial has some examples which can guide you which located in `examples/trials/network_morphism/`. You can refer to it and modify to your own task. Hope this will help you to build your code. + +### FashionMNIST + +`Fashion-MNIST` is a dataset of [Zalando](https://jobs.zalando.com/tech/)'s article images—consisting of a training set of 60,000 examples and a test set of 10,000 examples. Each example is a 28x28 grayscale image, associated with a label from 10 classes. It is a modern image classification dataset widely used to replacing MNIST as a baseline dataset, because the dataset MNIST is too easy and overused. + +There are two examples, [FashionMNIST-keras.py](./FashionMNIST/FashionMNIST_keras.py) and [FashionMNIST-pytorch.py](./FashionMNIST/FashionMNIST_pytorch.py). Attention, you should change the `input_width` to 28 and `input_channel` to 1 in `config.yaml ` for this dataset. + +### Cifar10 + +The `CIFAR-10` dataset [Canadian Institute For Advanced Research](https://www.cifar.ca/) is a collection of images that are commonly used to train machine learning and computer vision algorithms. It is one of the most widely used datasets for machine learning research. The CIFAR-10 dataset contains 60,000 32x32 color images in 10 different classes. + +There are two examples, [cifar10-keras.py](./cifar10/cifar10_keras.py) and [cifar10-pytorch.py](./cifar10/cifar10_pytorch.py). The value `input_width` is 32 and the value `input_channel` is 3 in `config.yaml ` for this dataset. \ No newline at end of file diff --git a/examples/trials/network_morphism/cifar10/__init__.py b/examples/trials/network_morphism/cifar10/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/examples/trials/network_morphism/cifar10/cifar10_keras.py b/examples/trials/network_morphism/cifar10/cifar10_keras.py new file mode 100644 index 0000000000..04e10cf8af --- /dev/null +++ b/examples/trials/network_morphism/cifar10/cifar10_keras.py @@ -0,0 +1,200 @@ +# Copyright (c) Microsoft Corporation +# All rights reserved. +# +# MIT License +# +# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated +# documentation files (the "Software"), to deal in the Software without restriction, including without limitation +# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and +# to permit persons to whom the Software is furnished to do so, subject to the following conditions: +# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, +# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +import argparse +import logging +import os + +import tensorflow as tf +import keras +from keras.callbacks import EarlyStopping, TensorBoard +from keras.datasets import cifar10 +from keras.optimizers import SGD, Adadelta, Adagrad, Adam, Adamax, RMSprop +from keras.utils import multi_gpu_model, to_categorical +import keras.backend.tensorflow_backend as KTF + +import nni +from nni.networkmorphism_tuner.graph import json_to_graph + +# set the logger format +log_format = "%(asctime)s %(message)s" +logging.basicConfig( + filename="networkmorphism.log", + filemode="a", + level=logging.INFO, + format=log_format, + datefmt="%m/%d %I:%M:%S %p", +) +# set the logger format +logger = logging.getLogger("cifar10-network-morphism-keras") + + +# restrict gpu usage background +config = tf.ConfigProto() +# pylint: disable=E1101,W0603 +config.gpu_options.allow_growth = True +sess = tf.Session(config=config) + +KTF.set_session(sess) + + +def get_args(): + """ get args from command line + """ + parser = argparse.ArgumentParser("cifar10") + parser.add_argument("--batch_size", type=int, default=128, help="batch size") + parser.add_argument("--optimizer", type=str, default="SGD", help="optimizer") + parser.add_argument("--epochs", type=int, default=200, help="epoch limit") + parser.add_argument( + "--learning_rate", type=float, default=0.001, help="learning rate" + ) + parser.add_argument( + "--weight_decay", + type=float, + default=1e-5, + help="weight decay of the learning rate", + ) + return parser.parse_args() + + +trainloader = None +testloader = None +net = None +args = get_args() +TENSORBOARD_DIR = os.environ["NNI_OUTPUT_DIR"] + + +def build_graph_from_json(ir_model_json): + """build model from json representation + """ + graph = json_to_graph(ir_model_json) + logging.debug(graph.operation_history) + model = graph.produce_keras_model() + return model + + +def parse_rev_args(receive_msg): + """ parse reveive msgs to global variable + """ + global trainloader + global testloader + global net + + # Loading Data + logger.debug("Preparing data..") + + (x_train, y_train), (x_test, y_test) = cifar10.load_data() + y_train = to_categorical(y_train, 10) + y_test = to_categorical(y_test, 10) + x_train = x_train.astype("float32") + x_test = x_test.astype("float32") + x_train /= 255.0 + x_test /= 255.0 + trainloader = (x_train, y_train) + testloader = (x_test, y_test) + + # Model + logger.debug("Building model..") + net = build_graph_from_json(receive_msg) + + # parallel model + try: + available_devices = os.environ["CUDA_VISIBLE_DEVICES"] + gpus = len(available_devices.split(",")) + if gpus > 1: + net = multi_gpu_model(net, gpus) + except KeyError: + logger.debug("parallel model not support in this config settings") + + if args.optimizer == "SGD": + optimizer = SGD(lr=args.learning_rate, momentum=0.9, decay=args.weight_decay) + if args.optimizer == "Adadelta": + optimizer = Adadelta(lr=args.learning_rate, decay=args.weight_decay) + if args.optimizer == "Adagrad": + optimizer = Adagrad(lr=args.learning_rate, decay=args.weight_decay) + if args.optimizer == "Adam": + optimizer = Adam(lr=args.learning_rate, decay=args.weight_decay) + if args.optimizer == "Adamax": + optimizer = Adamax(lr=args.learning_rate, decay=args.weight_decay) + if args.optimizer == "RMSprop": + optimizer = RMSprop(lr=args.learning_rate, decay=args.weight_decay) + + # Compile the model + net.compile( + loss="categorical_crossentropy", optimizer=optimizer, metrics=["accuracy"] + ) + return 0 + + +class SendMetrics(keras.callbacks.Callback): + """ + Keras callback to send metrics to NNI framework + """ + + def on_epoch_end(self, epoch, logs=None): + """ + Run on end of each epoch + """ + if logs is None: + logs = dict() + logger.debug(logs) + nni.report_intermediate_result(logs["acc"]) + + +# Training +def train_eval(): + """ train and eval the model + """ + + global trainloader + global testloader + global net + + (x_train, y_train) = trainloader + (x_test, y_test) = testloader + + # train procedure + net.fit( + x=x_train, + y=y_train, + batch_size=args.batch_size, + validation_data=(x_test, y_test), + epochs=args.epochs, + shuffle=True, + callbacks=[ + SendMetrics(), + EarlyStopping(min_delta=0.001, patience=10), + TensorBoard(log_dir=TENSORBOARD_DIR), + ], + ) + + # trial report final acc to tuner + _, acc = net.evaluate(x_test, y_test) + logger.debug("Final result is: %d", acc) + nni.report_final_result(acc) + + +if __name__ == "__main__": + try: + # trial get next parameter from network morphism tuner + RCV_CONFIG = nni.get_next_parameter() + logger.debug(RCV_CONFIG) + parse_rev_args(RCV_CONFIG) + train_eval() + except Exception as exception: + logger.exception(exception) + raise diff --git a/examples/trials/network_morphism/cifar10/cifar10_pytorch.py b/examples/trials/network_morphism/cifar10/cifar10_pytorch.py new file mode 100644 index 0000000000..a8631fa6f6 --- /dev/null +++ b/examples/trials/network_morphism/cifar10/cifar10_pytorch.py @@ -0,0 +1,247 @@ +# Copyright (c) Microsoft Corporation +# All rights reserved. +# +# MIT License +# +# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated +# documentation files (the "Software"), to deal in the Software without restriction, including without limitation +# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and +# to permit persons to whom the Software is furnished to do so, subject to the following conditions: +# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, +# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +import argparse +import logging +import sys + +import nni +from nni.networkmorphism_tuner.graph import json_to_graph + +import torch +import torch.nn as nn +import torch.optim as optim +import torchvision + +sys.path.append("../") +from network_morphism import utils + +# set the logger format +log_format = "%(asctime)s %(message)s" +logging.basicConfig( + filename="networkmorphism.log", + filemode="a", + level=logging.INFO, + format=log_format, + datefmt="%m/%d %I:%M:%S %p", +) +# pylint: disable=W0603 +# set the logger format +logger = logging.getLogger("cifar10-network-morphism-pytorch") + + +def get_args(): + """ get args from command line + """ + parser = argparse.ArgumentParser("cifar10") + parser.add_argument("--batch_size", type=int, default=128, help="batch size") + parser.add_argument("--optimizer", type=str, default="SGD", help="optimizer") + parser.add_argument("--epochs", type=int, default=200, help="epoch limit") + parser.add_argument( + "--learning_rate", type=float, default=0.001, help="learning rate" + ) + parser.add_argument("--cutout", action="store_true", default=False, help="use cutout") + parser.add_argument("--cutout_length", type=int, default=8, help="cutout length") + parser.add_argument( + "--model_path", type=str, default="./", help="Path to save the destination model" + ) + return parser.parse_args() + + +trainloader = None +testloader = None +net = None +criterion = None +optimizer = None +device = "cuda" if torch.cuda.is_available() else "cpu" +best_acc = 0.0 +args = get_args() + + +def build_graph_from_json(ir_model_json): + """build model from json representation + """ + graph = json_to_graph(ir_model_json) + logging.debug(graph.operation_history) + model = graph.produce_torch_model() + return model + + +def parse_rev_args(receive_msg): + """ parse reveive msgs to global variable + """ + global trainloader + global testloader + global net + global criterion + global optimizer + + # Loading Data + logger.debug("Preparing data..") + + transform_train, transform_test = utils.data_transforms_cifar10(args) + + trainset = torchvision.datasets.CIFAR10( + root="./data", train=True, download=True, transform=transform_train + ) + trainloader = torch.utils.data.DataLoader( + trainset, batch_size=args.batch_size, shuffle=True, num_workers=2 + ) + + testset = torchvision.datasets.CIFAR10( + root="./data", train=False, download=True, transform=transform_test + ) + testloader = torch.utils.data.DataLoader( + testset, batch_size=args.batch_size, shuffle=False, num_workers=2 + ) + + # Model + logger.debug("Building model..") + net = build_graph_from_json(receive_msg) + + net = net.to(device) + criterion = nn.CrossEntropyLoss() + if device == "cuda" and torch.cuda.device_count() > 1: + net = torch.nn.DataParallel(net) + + if args.optimizer == "SGD": + optimizer = optim.SGD( + net.parameters(), lr=args.learning_rate, momentum=0.9, weight_decay=5e-4 + ) + if args.optimizer == "Adadelta": + optimizer = optim.Adadelta(net.parameters(), lr=args.learning_rate) + if args.optimizer == "Adagrad": + optimizer = optim.Adagrad(net.parameters(), lr=args.learning_rate) + if args.optimizer == "Adam": + optimizer = optim.Adam(net.parameters(), lr=args.learning_rate) + if args.optimizer == "Adamax": + optimizer = optim.Adamax(net.parameters(), lr=args.learning_rate) + if args.optimizer == "RMSprop": + optimizer = optim.RMSprop(net.parameters(), lr=args.learning_rate) + + + return 0 + + +# Training +def train(epoch): + """ train model on each epoch in trainset + """ + + global trainloader + global testloader + global net + global criterion + global optimizer + + logger.debug("Epoch: %d", epoch) + net.train() + train_loss = 0 + correct = 0 + total = 0 + + for batch_idx, (inputs, targets) in enumerate(trainloader): + inputs, targets = inputs.to(device), targets.to(device) + optimizer.zero_grad() + outputs = net(inputs) + loss = criterion(outputs, targets) + loss.backward() + optimizer.step() + + train_loss += loss.item() + _, predicted = outputs.max(1) + total += targets.size(0) + correct += predicted.eq(targets).sum().item() + + acc = 100.0 * correct / total + + logger.debug( + "Loss: %.3f | Acc: %.3f%% (%d/%d)", + train_loss / (batch_idx + 1), + 100.0 * correct / total, + correct, + total, + ) + + return acc + + +def test(epoch): + """ eval model on each epoch in testset + """ + global best_acc + global trainloader + global testloader + global net + global criterion + global optimizer + + logger.debug("Eval on epoch: %d", epoch) + net.eval() + test_loss = 0 + correct = 0 + total = 0 + with torch.no_grad(): + for batch_idx, (inputs, targets) in enumerate(testloader): + inputs, targets = inputs.to(device), targets.to(device) + outputs = net(inputs) + loss = criterion(outputs, targets) + + test_loss += loss.item() + _, predicted = outputs.max(1) + total += targets.size(0) + correct += predicted.eq(targets).sum().item() + + acc = 100.0 * correct / total + + logger.debug( + "Loss: %.3f | Acc: %.3f%% (%d/%d)", + test_loss / (batch_idx + 1), + 100.0 * correct / total, + correct, + total, + ) + + acc = 100.0 * correct / total + if acc > best_acc: + best_acc = acc + return acc, best_acc + + +if __name__ == "__main__": + try: + # trial get next parameter from network morphism tuner + RCV_CONFIG = nni.get_next_parameter() + logger.debug(RCV_CONFIG) + + parse_rev_args(RCV_CONFIG) + train_acc = 0.0 + best_acc = 0.0 + early_stop = utils.EarlyStopping(mode="max") + for ep in range(args.epochs): + train_acc = train(ep) + test_acc, best_acc = test(ep) + nni.report_intermediate_result(test_acc) + logger.debug(test_acc) + if early_stop.step(test_acc): + break + + # trial report best_acc to tuner + nni.report_final_result(best_acc) + except Exception as exception: + logger.exception(exception) + raise diff --git a/examples/trials/network_morphism/cifar10/config.yml b/examples/trials/network_morphism/cifar10/config.yml new file mode 100644 index 0000000000..e2deb3cc86 --- /dev/null +++ b/examples/trials/network_morphism/cifar10/config.yml @@ -0,0 +1,29 @@ +authorName: default +experimentName: example_cifar10-network-morphism +trialConcurrency: 4 +maxExecDuration: 48h +maxTrialNum: 200 +#choice: local, remote, pai +trainingServicePlatform: local +#searchSpacePath: search_space.json +#choice: true, false +useAnnotation: false +tuner: + #choice: TPE, Random, Anneal, Evolution, BatchTuner, NetworkMorphism + #SMAC (SMAC should be installed through nnictl) + builtinTunerName: NetworkMorphism + classArgs: + #choice: maximize, minimize + optimize_mode: maximize + #for now, this tuner only supports cv domain + task: cv + #input image width + input_width: 32 + #input image channel + input_channel: 3 + #number of classes + n_output_node: 10 +trial: + command: python3 cifar10_keras.py + codeDir: . + gpuNum: 1 diff --git a/examples/trials/network_morphism/cifar10/config_pai.yml b/examples/trials/network_morphism/cifar10/config_pai.yml new file mode 100644 index 0000000000..42481bc558 --- /dev/null +++ b/examples/trials/network_morphism/cifar10/config_pai.yml @@ -0,0 +1,43 @@ +authorName: default +experimentName: example_cifar10-network-morphism +trialConcurrency: 1 +maxExecDuration: 24h +maxTrialNum: 10 +#choice: local, remote, pai +trainingServicePlatform: pai +#choice: true, false +useAnnotation: false +tuner: + #choice: TPE, Random, Anneal, Evolution, BatchTuner, NetworkMorphism + #SMAC (SMAC should be installed through nnictl) + builtinTunerName: NetworkMorphism + classArgs: + #choice: maximize, minimize + optimize_mode: maximize + # for now, this tuner only supports cv domain + task: cv + #input image width + input_width: 32 + #input image channel + input_channel: 3 + #number of classes + n_output_node: 10 +trial: + command: python3 cifar10_keras.py + codeDir: . + gpuNum: 1 + cpuNum: 1 + memoryMB: 8196 + #The docker image to run nni job on pai + image: msranni/nni:latest + #The hdfs directory to store data on pai, format 'hdfs://host:port/directory' + dataDir: hdfs://10.10.10.10:9000/username/nni + #The hdfs directory to store output data generated by nni, format 'hdfs://host:port/directory' + outputDir: hdfs://10.10.10.10:9000/username/nni +paiConfig: + #The username to login pai + userName: username + #The password to login pai + passWord: password + #The host of restful server of pai + host: 10.10.10.10 \ No newline at end of file diff --git a/examples/trials/network_morphism/requirements.txt b/examples/trials/network_morphism/requirements.txt new file mode 100644 index 0000000000..c7c29b39fd --- /dev/null +++ b/examples/trials/network_morphism/requirements.txt @@ -0,0 +1,6 @@ +numpy==1.14.2 +tensorflow==1.12.0 +torchvision==0.2.1 +Keras==2.2.2 +nni==0.3.0 +torch==0.4.1 diff --git a/examples/trials/network_morphism/utils.py b/examples/trials/network_morphism/utils.py new file mode 100644 index 0000000000..5c98793615 --- /dev/null +++ b/examples/trials/network_morphism/utils.py @@ -0,0 +1,184 @@ +"""Some helper functions for PyTorch, including: + - get_mean_and_std: calculate the mean and std value of dataset. + - msr_init: net parameter initialization. + - progress_bar: progress bar mimic xlua.progress. +""" +import numpy as np +import torch +import torch.nn as nn +import torch.nn.init as init +import torchvision.transforms as transforms + + +class EarlyStopping: + """ EarlyStopping class to keep NN from overfitting + """ + + # pylint: disable=E0202 + def __init__(self, mode="min", min_delta=0, patience=10, percentage=False): + self.mode = mode + self.min_delta = min_delta + self.patience = patience + self.best = None + self.num_bad_epochs = 0 + self.is_better = None + self._init_is_better(mode, min_delta, percentage) + + if patience == 0: + self.is_better = lambda a, b: True + self.step = lambda a: False + + def step(self, metrics): + """ EarlyStopping step on each epoch + Arguments: + metrics {float} -- metric value + """ + + if self.best is None: + self.best = metrics + return False + + if np.isnan(metrics): + return True + + if self.is_better(metrics, self.best): + self.num_bad_epochs = 0 + self.best = metrics + else: + self.num_bad_epochs += 1 + + if self.num_bad_epochs >= self.patience: + return True + + return False + + def _init_is_better(self, mode, min_delta, percentage): + if mode not in {"min", "max"}: + raise ValueError("mode " + mode + " is unknown!") + if not percentage: + if mode == "min": + self.is_better = lambda a, best: a < best - min_delta + if mode == "max": + self.is_better = lambda a, best: a > best + min_delta + else: + if mode == "min": + self.is_better = lambda a, best: a < best - (best * min_delta / 100) + if mode == "max": + self.is_better = lambda a, best: a > best + (best * min_delta / 100) + + +class Cutout: + """Randomly mask out one or more patches from an image. + Args: + n_holes (int): Number of patches to cut out of each image. + length (int): The length (in pixels) of each square patch. + """ + + def __init__(self, length): + self.length = length + + def __call__(self, img): + """ + Args: + img (Tensor): Tensor image of size (C, H, W). + Returns: + Tensor: Image with n_holes of dimension length x length cut out of it. + """ + h_img, w_img = img.size(1), img.size(2) + mask = np.ones((h_img, w_img), np.float32) + y_img = np.random.randint(h_img) + x_img = np.random.randint(w_img) + + y1_img = np.clip(y_img - self.length // 2, 0, h_img) + y2_img = np.clip(y_img + self.length // 2, 0, h_img) + x1_img = np.clip(x_img - self.length // 2, 0, w_img) + x2_img = np.clip(x_img + self.length // 2, 0, w_img) + + mask[y1_img:y2_img, x1_img:x2_img] = 0.0 + mask = torch.from_numpy(mask) + mask = mask.expand_as(img) + img *= mask + return img + + +def data_transforms_cifar10(args): + """ data_transforms for cifar10 dataset + """ + + cifar_mean = [0.49139968, 0.48215827, 0.44653124] + cifar_std = [0.24703233, 0.24348505, 0.26158768] + + train_transform = transforms.Compose( + [ + transforms.RandomCrop(32, padding=4), + transforms.RandomHorizontalFlip(), + transforms.ToTensor(), + transforms.Normalize(cifar_mean, cifar_std), + ] + ) + if args.cutout: + train_transform.transforms.append(Cutout(args.cutout_length)) + + valid_transform = transforms.Compose( + [transforms.ToTensor(), transforms.Normalize(cifar_mean, cifar_std)] + ) + return train_transform, valid_transform + + +def data_transforms_mnist(args, mnist_mean=None, mnist_std=None): + """ data_transforms for mnist dataset + """ + if mnist_mean is None: + mnist_mean = [0.5] + + if mnist_std is None: + mnist_std = [0.5] + + train_transform = transforms.Compose( + [ + transforms.RandomCrop(28, padding=4), + transforms.RandomHorizontalFlip(), + transforms.ToTensor(), + transforms.Normalize(mnist_mean, mnist_std), + ] + ) + if args.cutout: + train_transform.transforms.append(Cutout(args.cutout_length)) + + valid_transform = transforms.Compose( + [transforms.ToTensor(), transforms.Normalize(mnist_mean, mnist_std)] + ) + return train_transform, valid_transform + + +def get_mean_and_std(dataset): + """Compute the mean and std value of dataset.""" + dataloader = torch.utils.data.DataLoader( + dataset, batch_size=1, shuffle=True, num_workers=2 + ) + mean = torch.zeros(3) + std = torch.zeros(3) + print("==> Computing mean and std..") + for inputs, _ in dataloader: + for i in range(3): + mean[i] += inputs[:, i, :, :].mean() + std[i] += inputs[:, i, :, :].std() + mean.div_(len(dataset)) + std.div_(len(dataset)) + return mean, std + + +def init_params(net): + """Init layer parameters.""" + for module in net.modules(): + if isinstance(module, nn.Conv2d): + init.kaiming_normal(module.weight, mode="fan_out") + if module.bias: + init.constant(module.bias, 0) + elif isinstance(module, nn.BatchNorm2d): + init.constant(module.weight, 1) + init.constant(module.bias, 0) + elif isinstance(module, nn.Linear): + init.normal(module.weight, std=1e-3) + if module.bias: + init.constant(module.bias, 0) diff --git a/pylintrc b/pylintrc index 928973fee0..cbb5607c94 100644 --- a/pylintrc +++ b/pylintrc @@ -16,3 +16,6 @@ const-naming-style=any disable=duplicate-code, super-init-not-called + +# List of members which are set dynamically and missed by pylint inference +generated-members=numpy.*,torch.* diff --git a/src/nni_manager/rest_server/restValidationSchemas.ts b/src/nni_manager/rest_server/restValidationSchemas.ts index 07891b1d56..be2646a861 100644 --- a/src/nni_manager/rest_server/restValidationSchemas.ts +++ b/src/nni_manager/rest_server/restValidationSchemas.ts @@ -118,7 +118,7 @@ export namespace ValidationSchemas { checkpointDir: joi.string() }), tuner: joi.object({ - builtinTunerName: joi.string().valid('TPE', 'Random', 'Anneal', 'Evolution', 'SMAC', 'BatchTuner', 'GridSearch'), + builtinTunerName: joi.string().valid('TPE', 'Random', 'Anneal', 'Evolution', 'SMAC', 'BatchTuner', 'GridSearch', 'NetworkMorphism'), codeDir: joi.string(), classFileName: joi.string(), className: joi.string(), diff --git a/src/sdk/pynni/nni/constants.py b/src/sdk/pynni/nni/constants.py index 8d73108884..7a32832d82 100644 --- a/src/sdk/pynni/nni/constants.py +++ b/src/sdk/pynni/nni/constants.py @@ -25,9 +25,9 @@ 'Evolution': 'nni.evolution_tuner.evolution_tuner', 'SMAC': 'nni.smac_tuner.smac_tuner', 'BatchTuner': 'nni.batch_tuner.batch_tuner', + 'Medianstop': 'nni.medianstop_assessor.medianstop_assessor', 'GridSearch': 'nni.gridsearch_tuner.gridsearch_tuner', - - 'Medianstop': 'nni.medianstop_assessor.medianstop_assessor' + 'NetworkMorphism': 'nni.networkmorphism_tuner.networkmorphism_tuner' } ClassName = { @@ -38,6 +38,7 @@ 'SMAC': 'SMACTuner', 'BatchTuner': 'BatchTuner', 'GridSearch': 'GridSearchTuner', + 'NetworkMorphism':'NetworkMorphismTuner', 'Medianstop': 'MedianstopAssessor' } diff --git a/src/sdk/pynni/nni/networkmorphism_tuner/README.md b/src/sdk/pynni/nni/networkmorphism_tuner/README.md new file mode 100644 index 0000000000..36e9b8976f --- /dev/null +++ b/src/sdk/pynni/nni/networkmorphism_tuner/README.md @@ -0,0 +1,197 @@ +# Network Morphism Tuner on NNI + +## 1. Intorduction + +[Autokeras](https://arxiv.org/abs/1806.10282) is a popular automl tools using Network Morphism. The basic idea of Autokeras is to use Bayesian Regression to estimate the metric of the Neural Network Architecture. Each time, it generates several child networks from father networks. Then it uses a naïve Bayesian regression estimate its metric value from history trained results of network and metric value pair. Next, it chooses the the child which has best estimated performance and adds it to the training queue. Inspired by its work and referring to its [code](https://github.com/jhfjhfj1/autokeras), we implement our Network Morphism method in our NNI platform. + +If you want to know about network morphism trial usage, please check [Readme.md](../../../../../examples/trials/network-morphism/README.md) of the trial to get more detail. + +## 2. Usage + +To use Network Morphism, you should modify the following spec in your `config.yml` file: + +```yaml +tuner: + #choice: NetworkMorphism + builtinTunerName: NetworkMorphism + classArgs: + #choice: maximize, minimize + optimize_mode: maximize + #for now, this tuner only supports cv domain + task: cv + #modify to fit your input image width + input_width: 32 + #modify to fit your input image channel + input_channel: 3 + #modify to fit your number of classes + n_output_node: 10 +``` + + + +In the training procedure, it generate a JSON file which represent a Network Graph. Users can call "json\_to\_graph()" function to build a pytorch model or keras model from this JSON file. + +```python +import nni +from nni.networkmorphism_tuner.graph import json_to_graph + +def build_graph_from_json(ir_model_json): + """build a pytorch model from json representation + """ + graph = json_to_graph(ir_model_json) + model = graph.produce_torch_model() + return model + +# trial get next parameter from network morphism tuner +RCV_CONFIG = nni.get_next_parameter() +# call the function to build pytorch model or keras model +net = build_graph_from_json(RCV_CONFIG) + +# training procedure +# .... + +# report the final accuracy to nni +nni.report_final_result(best_acc) +``` + +## 3. File Structure + +The tuner has a lot of different files, functions and classes. Here we will only give most of those files a brief introduction: + +- `networkmorphism_tuner.py` is a tuner which using network morphism techniques. + +- `bayesian.py` is Bayesian method to estimate the metric of unseen model based on the models we have already searched. +- `graph.py` is the meta graph data structure. Class Graph is representing the neural architecture graph of a model. + - Graph extracts the neural architecture graph from a model. + - Each node in the graph is a intermediate tensor between layers. + - Each layer is an edge in the graph. + - Notably, multiple edges may refer to the same layer. +- `graph_transformer.py` includes some graph transformer to wider, deeper or add a skip-connection into the graph. + +- `layers.py` includes all the layers we use in our model. +- `layer_transformer.py` includes some layer transformer to wider, deeper or add a skip-connection into the layer. +- `nn.py` includes the class to generate network class initially. +- `metric.py` some metric classes including Accuracy and MSE. +- `utils.py` is the example search network architectures in dataset `cifar10` by using Keras. + +## 4. The Network Representation Json Example + +Here is an example of the intermediate representation JSON file we defined, which is passed from the tuner to the trial in the architecture search procedure. The example is as follows. + +```json +{ + "input_shape": [32, 32, 3], + "weighted": false, + "operation_history": [], + "layer_id_to_input_node_ids": {"0": [0],"1": [1],"2": [2],"3": [3],"4": [4],"5": [5],"6": [6],"7": [7],"8": [8],"9": [9],"10": [10],"11": [11],"12": [12],"13": [13],"14": [14],"15": [15],"16": [16] + }, + "layer_id_to_output_node_ids": {"0": [1],"1": [2],"2": [3],"3": [4],"4": [5],"5": [6],"6": [7],"7": [8],"8": [9],"9": [10],"10": [11],"11": [12],"12": [13],"13": [14],"14": [15],"15": [16],"16": [17] + }, + "adj_list": { + "0": [[1, 0]], + "1": [[2, 1]], + "2": [[3, 2]], + "3": [[4, 3]], + "4": [[5, 4]], + "5": [[6, 5]], + "6": [[7, 6]], + "7": [[8, 7]], + "8": [[9, 8]], + "9": [[10, 9]], + "10": [[11, 10]], + "11": [[12, 11]], + "12": [[13, 12]], + "13": [[14, 13]], + "14": [[15, 14]], + "15": [[16, 15]], + "16": [[17, 16]], + "17": [] + }, + "reverse_adj_list": { + "0": [], + "1": [[0, 0]], + "2": [[1, 1]], + "3": [[2, 2]], + "4": [[3, 3]], + "5": [[4, 4]], + "6": [[5, 5]], + "7": [[6, 6]], + "8": [[7, 7]], + "9": [[8, 8]], + "10": [[9, 9]], + "11": [[10, 10]], + "12": [[11, 11]], + "13": [[12, 12]], + "14": [[13, 13]], + "15": [[14, 14]], + "16": [[15, 15]], + "17": [[16, 16]] + }, + "node_list": [ + [0, [32, 32, 3]], + [1, [32, 32, 3]], + [2, [32, 32, 64]], + [3, [32, 32, 64]], + [4, [16, 16, 64]], + [5, [16, 16, 64]], + [6, [16, 16, 64]], + [7, [16, 16, 64]], + [8, [8, 8, 64]], + [9, [8, 8, 64]], + [10, [8, 8, 64]], + [11, [8, 8, 64]], + [12, [4, 4, 64]], + [13, [64]], + [14, [64]], + [15, [64]], + [16, [64]], + [17, [10]] + ], + "layer_list": [ + [0, ["StubReLU", 0, 1]], + [1, ["StubConv2d", 1, 2, 3, 64, 3]], + [2, ["StubBatchNormalization2d", 2, 3, 64]], + [3, ["StubPooling2d", 3, 4, 2, 2, 0]], + [4, ["StubReLU", 4, 5]], + [5, ["StubConv2d", 5, 6, 64, 64, 3]], + [6, ["StubBatchNormalization2d", 6, 7, 64]], + [7, ["StubPooling2d", 7, 8, 2, 2, 0]], + [8, ["StubReLU", 8, 9]], + [9, ["StubConv2d", 9, 10, 64, 64, 3]], + [10, ["StubBatchNormalization2d", 10, 11, 64]], + [11, ["StubPooling2d", 11, 12, 2, 2, 0]], + [12, ["StubGlobalPooling2d", 12, 13]], + [13, ["StubDropout2d", 13, 14, 0.25]], + [14, ["StubDense", 14, 15, 64, 64]], + [15, ["StubReLU", 15, 16]], + [16, ["StubDense", 16, 17, 64, 10]] + ] + } +``` + +The definition of each model is a JSON object(also you can consider the model as a DAG graph), where: + +- `input_shape` is a list of integers, which does not include the batch axis. +- `weighted` means whether the weights and biases in the neural network should be included in the graph. +- `operation_history` is the number of inputs the layer has. +- `layer_id_to_input_node_ids` is a dictionary instance mapping from layer identifiers to their input nodes identifiers. +- `layer_id_to_output_node_ids` is a dictionary instance mapping from layer identifiers to their output nodes identifiers +- `adj_list` is a two dimensional list. The adjacency list of the graph. The first dimension is identified by tensor identifiers. In each edge list, the elements are two-element tuples of (tensor identifier, layer identifier). +- `reverse_adj_list` is a A reverse adjacent list in the same format as adj_list. +- `node_list` is a list of integers. The indices of the list are the identifiers. +- `layer_list` is a list of stub layers. The indices of the list are the identifiers. + - For `StubConv (StubConv1d, StubConv2d, StubConv3d)`, the number follows is its node input id(or id list), node output id, input_channel, filters, kernel_size, stride and padding. + + - For `StubDense`, the number follows is its node input id(or id list), node output id, input_units and units. + + - For `StubBatchNormalization (StubBatchNormalization1d, StubBatchNormalization2d, StubBatchNormalization3d)`, the number follows is its node input id(or id list), node output id and features numbers. + + - For `StubDropout(StubDropout1d, StubDropout2d, StubDropout3d)`, the number follows is its node input id(or id list), node output id and dropout rate. + + - For `StubPooling (StubPooling1d, StubPooling2d, StubPooling3d)`, the number follows is its node input id(or id list), node output id, kernel_size, stride and padding. + + - For else layers, the number follows is its node input id(or id list) and node output id. + +## 5. TODO + +Next step, we will change the API from fixed network generator to more network operator generator. Besides, we will use ONNX instead of JSON later as the intermediate representation spec in the future. diff --git a/src/sdk/pynni/nni/networkmorphism_tuner/__init__.py b/src/sdk/pynni/nni/networkmorphism_tuner/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/src/sdk/pynni/nni/networkmorphism_tuner/bayesian.py b/src/sdk/pynni/nni/networkmorphism_tuner/bayesian.py new file mode 100644 index 0000000000..fe4a3914da --- /dev/null +++ b/src/sdk/pynni/nni/networkmorphism_tuner/bayesian.py @@ -0,0 +1,488 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# +# MIT License +# +# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and +# associated documentation files (the "Software"), to deal in the Software without restriction, +# including without limitation the rights to use, copy, modify, merge, publish, distribute, +# sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all copies or +# substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT +# NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, +# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT +# OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +# ================================================================================================== + +import math +import random +from copy import deepcopy +from functools import total_ordering +from queue import PriorityQueue + +import numpy as np +from scipy.linalg import LinAlgError, cho_solve, cholesky, solve_triangular +from scipy.optimize import linear_sum_assignment +from sklearn.metrics.pairwise import rbf_kernel + +from nni.networkmorphism_tuner.graph_transformer import transform +from nni.networkmorphism_tuner.utils import Constant, OptimizeMode +from nni.networkmorphism_tuner.layers import is_layer + + +def layer_distance(a, b): + """The distance between two layers.""" + # pylint: disable=unidiomatic-typecheck + if type(a) != type(b): + return 1.0 + if is_layer(a, "Conv"): + att_diff = [ + (a.filters, b.filters), + (a.kernel_size, b.kernel_size), + (a.stride, b.stride), + ] + return attribute_difference(att_diff) + if is_layer(a, "Pooling"): + att_diff = [ + (a.padding, b.padding), + (a.kernel_size, b.kernel_size), + (a.stride, b.stride), + ] + return attribute_difference(att_diff) + return 0.0 + + +def attribute_difference(att_diff): + ''' The attribute distance. + ''' + + ret = 0 + for a_value, b_value in att_diff: + if max(a_value, b_value) == 0: + ret += 0 + else: + ret += abs(a_value - b_value) * 1.0 / max(a_value, b_value) + return ret * 1.0 / len(att_diff) + + +def layers_distance(list_a, list_b): + """The distance between the layers of two neural networks.""" + len_a = len(list_a) + len_b = len(list_b) + f = np.zeros((len_a + 1, len_b + 1)) + f[-1][-1] = 0 + for i in range(-1, len_a): + f[i][-1] = i + 1 + for j in range(-1, len_b): + f[-1][j] = j + 1 + for i in range(len_a): + for j in range(len_b): + f[i][j] = min( + f[i][j - 1] + 1, + f[i - 1][j] + 1, + f[i - 1][j - 1] + layer_distance(list_a[i], list_b[j]), + ) + return f[len_a - 1][len_b - 1] + + +def skip_connection_distance(a, b): + """The distance between two skip-connections.""" + if a[2] != b[2]: + return 1.0 + len_a = abs(a[1] - a[0]) + len_b = abs(b[1] - b[0]) + return (abs(a[0] - b[0]) + abs(len_a - len_b)) / (max(a[0], b[0]) + max(len_a, len_b)) + + +def skip_connections_distance(list_a, list_b): + """The distance between the skip-connections of two neural networks.""" + distance_matrix = np.zeros((len(list_a), len(list_b))) + for i, a in enumerate(list_a): + for j, b in enumerate(list_b): + distance_matrix[i][j] = skip_connection_distance(a, b) + return distance_matrix[linear_sum_assignment(distance_matrix)].sum() + abs( + len(list_a) - len(list_b) + ) + + +def edit_distance(x, y): + """The distance between two neural networks. + Args: + x: An instance of NetworkDescriptor. + y: An instance of NetworkDescriptor + Returns: + The edit-distance between x and y. + """ + + ret = layers_distance(x.layers, y.layers) + ret += Constant.KERNEL_LAMBDA * skip_connections_distance( + x.skip_connections, y.skip_connections + ) + return ret + + +class IncrementalGaussianProcess: + """Gaussian process regressor. + Attributes: + alpha: A hyperparameter. + """ + + def __init__(self): + self.alpha = 1e-10 + self._distance_matrix = None + self._x = None + self._y = None + self._first_fitted = False + self._l_matrix = None + self._alpha_vector = None + + @property + def kernel_matrix(self): + ''' Kernel matric. + ''' + return self._distance_matrix + + def fit(self, train_x, train_y): + """ Fit the regressor with more data. + Args: + train_x: A list of NetworkDescriptor. + train_y: A list of metric values. + """ + if self.first_fitted: + self.incremental_fit(train_x, train_y) + else: + self.first_fit(train_x, train_y) + + def incremental_fit(self, train_x, train_y): + """ Incrementally fit the regressor. """ + if not self._first_fitted: + raise ValueError("The first_fit function needs to be called first.") + + train_x, train_y = np.array(train_x), np.array(train_y) + + # Incrementally compute K + up_right_k = edit_distance_matrix(self._x, train_x) + down_left_k = np.transpose(up_right_k) + down_right_k = edit_distance_matrix(train_x) + up_k = np.concatenate((self._distance_matrix, up_right_k), axis=1) + down_k = np.concatenate((down_left_k, down_right_k), axis=1) + temp_distance_matrix = np.concatenate((up_k, down_k), axis=0) + k_matrix = bourgain_embedding_matrix(temp_distance_matrix) + diagonal = np.diag_indices_from(k_matrix) + diagonal = (diagonal[0][-len(train_x) :], diagonal[1][-len(train_x) :]) + k_matrix[diagonal] += self.alpha + + try: + self._l_matrix = cholesky(k_matrix, lower=True) # Line 2 + except LinAlgError: + return self + + self._x = np.concatenate((self._x, train_x), axis=0) + self._y = np.concatenate((self._y, train_y), axis=0) + self._distance_matrix = temp_distance_matrix + + self._alpha_vector = cho_solve((self._l_matrix, True), self._y) # Line 3 + + return self + + @property + def first_fitted(self): + ''' if it is firsr fitted + ''' + return self._first_fitted + + def first_fit(self, train_x, train_y): + """ Fit the regressor for the first time. """ + train_x, train_y = np.array(train_x), np.array(train_y) + + self._x = np.copy(train_x) + self._y = np.copy(train_y) + + self._distance_matrix = edit_distance_matrix(self._x) + k_matrix = bourgain_embedding_matrix(self._distance_matrix) + k_matrix[np.diag_indices_from(k_matrix)] += self.alpha + + self._l_matrix = cholesky(k_matrix, lower=True) # Line 2 + + self._alpha_vector = cho_solve((self._l_matrix, True), self._y) # Line 3 + + self._first_fitted = True + return self + + def predict(self, train_x): + """Predict the result. + Args: + train_x: A list of NetworkDescriptor. + Returns: + y_mean: The predicted mean. + y_std: The predicted standard deviation. + """ + k_trans = np.exp(-np.power(edit_distance_matrix(train_x, self._x), 2)) + y_mean = k_trans.dot(self._alpha_vector) # Line 4 (y_mean = f_star) + + # compute inverse K_inv of K based on its Cholesky + # decomposition L and its inverse L_inv + l_inv = solve_triangular(self._l_matrix.T, np.eye(self._l_matrix.shape[0])) + k_inv = l_inv.dot(l_inv.T) + # Compute variance of predictive distribution + y_var = np.ones(len(train_x), dtype=np.float) + y_var -= np.einsum("ij,ij->i", np.dot(k_trans, k_inv), k_trans) + + # Check if any of the variances is negative because of + # numerical issues. If yes: set the variance to 0. + y_var_negative = y_var < 0 + if np.any(y_var_negative): + y_var[y_var_negative] = 0.0 + return y_mean, np.sqrt(y_var) + + +def edit_distance_matrix(train_x, train_y=None): + """Calculate the edit distance. + Args: + train_x: A list of neural architectures. + train_y: A list of neural architectures. + Returns: + An edit-distance matrix. + """ + if train_y is None: + ret = np.zeros((train_x.shape[0], train_x.shape[0])) + for x_index, x in enumerate(train_x): + for y_index, y in enumerate(train_x): + if x_index == y_index: + ret[x_index][y_index] = 0 + elif x_index < y_index: + ret[x_index][y_index] = edit_distance(x, y) + else: + ret[x_index][y_index] = ret[y_index][x_index] + return ret + ret = np.zeros((train_x.shape[0], train_y.shape[0])) + for x_index, x in enumerate(train_x): + for y_index, y in enumerate(train_y): + ret[x_index][y_index] = edit_distance(x, y) + return ret + + +def vector_distance(a, b): + """The Euclidean distance between two vectors.""" + a = np.array(a) + b = np.array(b) + return np.linalg.norm(a - b) + + +def bourgain_embedding_matrix(distance_matrix): + """Use Bourgain algorithm to embed the neural architectures based on their edit-distance. + Args: + distance_matrix: A matrix of edit-distances. + Returns: + A matrix of distances after embedding. + """ + distance_matrix = np.array(distance_matrix) + n = len(distance_matrix) + if n == 1: + return distance_matrix + np.random.seed(123) + distort_elements = [] + r = range(n) + k = int(math.ceil(math.log(n) / math.log(2) - 1)) + t = int(math.ceil(math.log(n))) + counter = 0 + for i in range(0, k + 1): + for t in range(t): + s = np.random.choice(r, 2 ** i) + for j in r: + d = min([distance_matrix[j][s] for s in s]) + counter += len(s) + if i == 0 and t == 0: + distort_elements.append([d]) + else: + distort_elements[j].append(d) + return rbf_kernel(distort_elements, distort_elements) + + +class BayesianOptimizer: + """ A Bayesian optimizer for neural architectures. + Attributes: + searcher: The Searcher which is calling the Bayesian optimizer. + t_min: The minimum temperature for simulated annealing. + metric: An instance of the Metric subclasses. + gpr: A GaussianProcessRegressor for bayesian optimization. + beta: The beta in acquisition function. (refer to our paper) + search_tree: The network morphism search tree. + """ + + def __init__(self, searcher, t_min, optimizemode, beta=None): + self.searcher = searcher + self.t_min = t_min + self.optimizemode = optimizemode + self.gpr = IncrementalGaussianProcess() + self.beta = beta if beta is not None else Constant.BETA + self.search_tree = SearchTree() + + def fit(self, x_queue, y_queue): + """ Fit the optimizer with new architectures and performances. + Args: + x_queue: A list of NetworkDescriptor. + y_queue: A list of metric values. + """ + self.gpr.fit(x_queue, y_queue) + + def generate(self, descriptors): + """Generate new architecture. + Args: + descriptors: All the searched neural architectures. + Returns: + graph: An instance of Graph. A morphed neural network with weights. + father_id: The father node ID in the search tree. + """ + model_ids = self.search_tree.adj_list.keys() + + target_graph = None + father_id = None + descriptors = deepcopy(descriptors) + elem_class = Elem + if self.optimizemode is OptimizeMode.Maximize: + elem_class = ReverseElem + + # Initialize the priority queue. + pq = PriorityQueue() + temp_list = [] + for model_id in model_ids: + metric_value = self.searcher.get_metric_value_by_id(model_id) + temp_list.append((metric_value, model_id)) + temp_list = sorted(temp_list) + for metric_value, model_id in temp_list: + graph = self.searcher.load_model_by_id(model_id) + graph.clear_operation_history() + graph.clear_weights() + pq.put(elem_class(metric_value, model_id, graph)) + + t = 1.0 + t_min = self.t_min + alpha = 0.9 + opt_acq = self._get_init_opt_acq_value() + while not pq.empty() and t > t_min: + elem = pq.get() + if self.optimizemode is OptimizeMode.Maximize: + temp_exp = min((elem.metric_value - opt_acq) / t, 1.0) + else: + temp_exp = min((opt_acq - elem.metric_value) / t, 1.0) + ap = math.exp(temp_exp) + if ap >= random.uniform(0, 1): + for temp_graph in transform(elem.graph): + if contain(descriptors, temp_graph.extract_descriptor()): + continue + + temp_acq_value = self.acq(temp_graph) + pq.put(elem_class(temp_acq_value, elem.father_id, temp_graph)) + descriptors.append(temp_graph.extract_descriptor()) + if self._accept_new_acq_value(opt_acq, temp_acq_value): + opt_acq = temp_acq_value + father_id = elem.father_id + target_graph = deepcopy(temp_graph) + t *= alpha + + # Did not found a not duplicated architecture + if father_id is None: + return None, None + nm_graph = self.searcher.load_model_by_id(father_id) + for args in target_graph.operation_history: + getattr(nm_graph, args[0])(*list(args[1:])) + return nm_graph, father_id + + def acq(self, graph): + ''' estimate the value of generated graph + ''' + mean, std = self.gpr.predict(np.array([graph.extract_descriptor()])) + if self.optimizemode is OptimizeMode.Maximize: + return mean + self.beta * std + return mean - self.beta * std + + def _get_init_opt_acq_value(self): + if self.optimizemode is OptimizeMode.Maximize: + return -np.inf + return np.inf + + def _accept_new_acq_value(self, opt_acq, temp_acq_value): + if temp_acq_value > opt_acq and self.optimizemode is OptimizeMode.Maximize: + return True + if temp_acq_value < opt_acq and not self.optimizemode is OptimizeMode.Maximize: + return True + return False + + def add_child(self, father_id, model_id): + ''' add child to the search tree + Arguments: + father_id {int} -- father id + model_id {int} -- model id + ''' + + self.search_tree.add_child(father_id, model_id) + + +@total_ordering +class Elem: + """Elements to be sorted according to metric value.""" + + def __init__(self, metric_value, father_id, graph): + self.father_id = father_id + self.graph = graph + self.metric_value = metric_value + + def __eq__(self, other): + return self.metric_value == other.metric_value + + def __lt__(self, other): + return self.metric_value < other.metric_value + + +class ReverseElem(Elem): + """Elements to be reversely sorted according to metric value.""" + + def __lt__(self, other): + return self.metric_value > other.metric_value + + +def contain(descriptors, target_descriptor): + """Check if the target descriptor is in the descriptors.""" + for descriptor in descriptors: + if edit_distance(descriptor, target_descriptor) < 1e-5: + return True + return False + + +class SearchTree: + """The network morphism search tree.""" + + def __init__(self): + self.root = None + self.adj_list = {} + + def add_child(self, u, v): + ''' add child to search tree itself. + Arguments: + u {int} -- father id + v {int} -- child id + ''' + + if u == -1: + self.root = v + self.adj_list[v] = [] + return + if v not in self.adj_list[u]: + self.adj_list[u].append(v) + if v not in self.adj_list: + self.adj_list[v] = [] + + def get_dict(self, u=None): + """ A recursive function to return the content of the tree in a dict.""" + if u is None: + return self.get_dict(self.root) + children = [] + for v in self.adj_list[u]: + children.append(self.get_dict(v)) + ret = {"name": u, "children": children} + return ret diff --git a/src/sdk/pynni/nni/networkmorphism_tuner/graph.py b/src/sdk/pynni/nni/networkmorphism_tuner/graph.py new file mode 100644 index 0000000000..82b37f7243 --- /dev/null +++ b/src/sdk/pynni/nni/networkmorphism_tuner/graph.py @@ -0,0 +1,988 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# +# MIT License +# +# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and +# associated documentation files (the "Software"), to deal in the Software without restriction, +# including without limitation the rights to use, copy, modify, merge, publish, distribute, +# sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all copies or +# substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT +# NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, +# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT +# OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +# ================================================================================================== + +import json +from collections import Iterable +from copy import deepcopy, copy +from queue import Queue + +import numpy as np +import torch + +from nni.networkmorphism_tuner.layer_transformer import ( + add_noise, + wider_bn, + wider_next_conv, + wider_next_dense, + wider_pre_conv, + wider_pre_dense, + init_dense_weight, + init_conv_weight, + init_bn_weight, +) +from nni.networkmorphism_tuner.layers import ( + StubAdd, + StubConcatenate, + StubReLU, + get_batch_norm_class, + get_conv_class, + is_layer, + layer_width, + set_keras_weight_to_stub, + set_stub_weight_to_keras, + set_stub_weight_to_torch, + set_torch_weight_to_stub, + to_real_keras_layer, + layer_description_extractor, + layer_description_builder, +) +from nni.networkmorphism_tuner.utils import Constant + + +class NetworkDescriptor: + """A class describing the neural architecture for neural network kernel. + It only record the width of convolutional and dense layers, and the skip-connection types and positions. + """ + + CONCAT_CONNECT = "concat" + ADD_CONNECT = "add" + + def __init__(self): + self.skip_connections = [] + self.layers = [] + + @property + def n_layers(self): + return len(self.layers) + + def add_skip_connection(self, u, v, connection_type): + """ Add a skip-connection to the descriptor. + Args: + u: Number of convolutional layers before the starting point. + v: Number of convolutional layers before the ending point. + connection_type: Must be either CONCAT_CONNECT or ADD_CONNECT. + """ + if connection_type not in [self.CONCAT_CONNECT, self.ADD_CONNECT]: + raise ValueError( + "connection_type should be NetworkDescriptor.CONCAT_CONNECT " + "or NetworkDescriptor.ADD_CONNECT." + ) + self.skip_connections.append((u, v, connection_type)) + + def to_json(self): + ''' NetworkDescriptor to json representation + ''' + + skip_list = [] + for u, v, connection_type in self.skip_connections: + skip_list.append({"from": u, "to": v, "type": connection_type}) + return {"node_list": self.layers, "skip_list": skip_list} + + def add_layer(self, layer): + ''' add one layer + ''' + + self.layers.append(layer) + + +class Node: + """A class for intermediate output tensor (node) in the Graph. + Attributes: + shape: A tuple describing the shape of the tensor. + """ + + def __init__(self, shape): + self.shape = shape + + +class Graph: + """A class representing the neural architecture graph of a model. + Graph extracts the neural architecture graph from a model. + Each node in the graph is a intermediate tensor between layers. + Each layer is an edge in the graph. + Notably, multiple edges may refer to the same layer. + (e.g. Add layer is adding two tensor into one tensor. So it is related to two edges.) + Attributes: + weighted: A boolean of whether the weights and biases in the neural network + should be included in the graph. + input_shape: A tuple of integers, which does not include the batch axis. + node_list: A list of integers. The indices of the list are the identifiers. + layer_list: A list of stub layers. The indices of the list are the identifiers. + node_to_id: A dict instance mapping from node integers to their identifiers. + layer_to_id: A dict instance mapping from stub layers to their identifiers. + layer_id_to_input_node_ids: A dict instance mapping from layer identifiers + to their input nodes identifiers. + layer_id_to_output_node_ids: A dict instance mapping from layer identifiers + to their output nodes identifiers. + adj_list: A two dimensional list. The adjacency list of the graph. The first dimension is + identified by tensor identifiers. In each edge list, the elements are two-element tuples + of (tensor identifier, layer identifier). + reverse_adj_list: A reverse adjacent list in the same format as adj_list. + operation_history: A list saving all the network morphism operations. + vis: A dictionary of temporary storage for whether an local operation has been done + during the network morphism. + """ + + def __init__(self, input_shape, weighted=True): + """Initializer for Graph. + """ + self.input_shape = input_shape + self.weighted = weighted + self.node_list = [] + self.layer_list = [] + # node id start with 0 + self.node_to_id = {} + self.layer_to_id = {} + self.layer_id_to_input_node_ids = {} + self.layer_id_to_output_node_ids = {} + self.adj_list = {} + self.reverse_adj_list = {} + self.operation_history = [] + self.n_dim = len(input_shape) - 1 + self.conv = get_conv_class(self.n_dim) + self.batch_norm = get_batch_norm_class(self.n_dim) + + self.vis = None + self._add_node(Node(input_shape)) + + def add_layer(self, layer, input_node_id): + """Add a layer to the Graph. + Args: + layer: An instance of the subclasses of StubLayer in layers.py. + input_node_id: An integer. The ID of the input node of the layer. + Returns: + output_node_id: An integer. The ID of the output node of the layer. + """ + if isinstance(input_node_id, Iterable): + layer.input = list(map(lambda x: self.node_list[x], input_node_id)) + output_node_id = self._add_node(Node(layer.output_shape)) + for node_id in input_node_id: + self._add_edge(layer, node_id, output_node_id) + + else: + layer.input = self.node_list[input_node_id] + output_node_id = self._add_node(Node(layer.output_shape)) + self._add_edge(layer, input_node_id, output_node_id) + + layer.output = self.node_list[output_node_id] + return output_node_id + + def clear_operation_history(self): + self.operation_history = [] + + @property + def n_nodes(self): + """Return the number of nodes in the model.""" + return len(self.node_list) + + @property + def n_layers(self): + """Return the number of layers in the model.""" + return len(self.layer_list) + + def _add_node(self, node): + """Add a new node to node_list and give the node an ID. + Args: + node: An instance of Node. + Returns: + node_id: An integer. + """ + node_id = len(self.node_list) + self.node_to_id[node] = node_id + self.node_list.append(node) + self.adj_list[node_id] = [] + self.reverse_adj_list[node_id] = [] + return node_id + + def _add_edge(self, layer, input_id, output_id): + """Add a new layer to the graph. The nodes should be created in advance.""" + + if layer in self.layer_to_id: + layer_id = self.layer_to_id[layer] + if input_id not in self.layer_id_to_input_node_ids[layer_id]: + self.layer_id_to_input_node_ids[layer_id].append(input_id) + if output_id not in self.layer_id_to_output_node_ids[layer_id]: + self.layer_id_to_output_node_ids[layer_id].append(output_id) + else: + layer_id = len(self.layer_list) + self.layer_list.append(layer) + self.layer_to_id[layer] = layer_id + self.layer_id_to_input_node_ids[layer_id] = [input_id] + self.layer_id_to_output_node_ids[layer_id] = [output_id] + + self.adj_list[input_id].append((output_id, layer_id)) + self.reverse_adj_list[output_id].append((input_id, layer_id)) + + def _redirect_edge(self, u_id, v_id, new_v_id): + """Redirect the layer to a new node. + Change the edge originally from `u_id` to `v_id` into an edge from `u_id` to `new_v_id` + while keeping all other property of the edge the same. + """ + layer_id = None + for index, edge_tuple in enumerate(self.adj_list[u_id]): + if edge_tuple[0] == v_id: + layer_id = edge_tuple[1] + self.adj_list[u_id][index] = (new_v_id, layer_id) + self.layer_list[layer_id].output = self.node_list[new_v_id] + break + + for index, edge_tuple in enumerate(self.reverse_adj_list[v_id]): + if edge_tuple[0] == u_id: + layer_id = edge_tuple[1] + self.reverse_adj_list[v_id].remove(edge_tuple) + break + self.reverse_adj_list[new_v_id].append((u_id, layer_id)) + for index, value in enumerate(self.layer_id_to_output_node_ids[layer_id]): + if value == v_id: + self.layer_id_to_output_node_ids[layer_id][index] = new_v_id + break + + def _replace_layer(self, layer_id, new_layer): + """Replace the layer with a new layer.""" + old_layer = self.layer_list[layer_id] + new_layer.input = old_layer.input + new_layer.output = old_layer.output + new_layer.output.shape = new_layer.output_shape + self.layer_list[layer_id] = new_layer + self.layer_to_id[new_layer] = layer_id + self.layer_to_id.pop(old_layer) + + @property + def topological_order(self): + """Return the topological order of the node IDs from the input node to the output node.""" + q = Queue() + in_degree = {} + for i in range(self.n_nodes): + in_degree[i] = 0 + for u in range(self.n_nodes): + for v, _ in self.adj_list[u]: + in_degree[v] += 1 + for i in range(self.n_nodes): + if in_degree[i] == 0: + q.put(i) + + order_list = [] + while not q.empty(): + u = q.get() + order_list.append(u) + for v, _ in self.adj_list[u]: + in_degree[v] -= 1 + if in_degree[v] == 0: + q.put(v) + return order_list + + def _get_pooling_layers(self, start_node_id, end_node_id): + """Given two node IDs, return all the pooling layers between them.""" + layer_list = [] + node_list = [start_node_id] + assert self._depth_first_search(end_node_id, layer_list, node_list) + ret = [] + for layer_id in layer_list: + layer = self.layer_list[layer_id] + if is_layer(layer, "Pooling"): + ret.append(layer) + elif is_layer(layer, "Conv") and layer.stride != 1: + ret.append(layer) + return ret + + def _depth_first_search(self, target_id, layer_id_list, node_list): + """Search for all the layers and nodes down the path. + A recursive function to search all the layers and nodes between the node in the node_list + and the node with target_id.""" + assert len(node_list) <= self.n_nodes + u = node_list[-1] + if u == target_id: + return True + + for v, layer_id in self.adj_list[u]: + layer_id_list.append(layer_id) + node_list.append(v) + if self._depth_first_search(target_id, layer_id_list, node_list): + return True + layer_id_list.pop() + node_list.pop() + + return False + + def _search(self, u, start_dim, total_dim, n_add): + """Search the graph for all the layers to be widened caused by an operation. + It is an recursive function with duplication check to avoid deadlock. + It searches from a starting node u until the corresponding layers has been widened. + Args: + u: The starting node ID. + start_dim: The position to insert the additional dimensions. + total_dim: The total number of dimensions the layer has before widening. + n_add: The number of dimensions to add. + """ + if (u, start_dim, total_dim, n_add) in self.vis: + return + self.vis[(u, start_dim, total_dim, n_add)] = True + for v, layer_id in self.adj_list[u]: + layer = self.layer_list[layer_id] + + if is_layer(layer, "Conv"): + new_layer = wider_next_conv( + layer, start_dim, total_dim, n_add, self.weighted + ) + self._replace_layer(layer_id, new_layer) + + elif is_layer(layer, "Dense"): + new_layer = wider_next_dense( + layer, start_dim, total_dim, n_add, self.weighted + ) + self._replace_layer(layer_id, new_layer) + + elif is_layer(layer, "BatchNormalization"): + new_layer = wider_bn(layer, start_dim, total_dim, n_add, self.weighted) + self._replace_layer(layer_id, new_layer) + self._search(v, start_dim, total_dim, n_add) + + elif is_layer(layer, "Concatenate"): + if self.layer_id_to_input_node_ids[layer_id][1] == u: + # u is on the right of the concat + # next_start_dim += next_total_dim - total_dim + left_dim = self._upper_layer_width( + self.layer_id_to_input_node_ids[layer_id][0] + ) + next_start_dim = start_dim + left_dim + next_total_dim = total_dim + left_dim + else: + next_start_dim = start_dim + next_total_dim = total_dim + self._upper_layer_width( + self.layer_id_to_input_node_ids[layer_id][1] + ) + self._search(v, next_start_dim, next_total_dim, n_add) + + else: + self._search(v, start_dim, total_dim, n_add) + + for v, layer_id in self.reverse_adj_list[u]: + layer = self.layer_list[layer_id] + if is_layer(layer, "Conv"): + new_layer = wider_pre_conv(layer, n_add, self.weighted) + self._replace_layer(layer_id, new_layer) + elif is_layer(layer, "Dense"): + new_layer = wider_pre_dense(layer, n_add, self.weighted) + self._replace_layer(layer_id, new_layer) + elif is_layer(layer, "Concatenate"): + continue + else: + self._search(v, start_dim, total_dim, n_add) + + def _upper_layer_width(self, u): + for v, layer_id in self.reverse_adj_list[u]: + layer = self.layer_list[layer_id] + if is_layer(layer, "Conv") or is_layer(layer, "Dense"): + return layer_width(layer) + elif is_layer(layer, "Concatenate"): + a = self.layer_id_to_input_node_ids[layer_id][0] + b = self.layer_id_to_input_node_ids[layer_id][1] + return self._upper_layer_width(a) + self._upper_layer_width(b) + else: + return self._upper_layer_width(v) + return self.node_list[0].shape[-1] + + def to_deeper_model(self, target_id, new_layer): + """Insert a relu-conv-bn block after the target block. + Args: + target_id: A convolutional layer ID. The new block should be inserted after the block. + new_layer: An instance of StubLayer subclasses. + """ + self.operation_history.append(("to_deeper_model", target_id, new_layer)) + input_id = self.layer_id_to_input_node_ids[target_id][0] + output_id = self.layer_id_to_output_node_ids[target_id][0] + if self.weighted: + if is_layer(new_layer, "Dense"): + init_dense_weight(new_layer) + elif is_layer(new_layer, "Conv"): + init_conv_weight(new_layer) + elif is_layer(new_layer, "BatchNormalization"): + init_bn_weight(new_layer) + + self._insert_new_layers([new_layer], input_id, output_id) + + def to_wider_model(self, pre_layer_id, n_add): + """Widen the last dimension of the output of the pre_layer. + Args: + pre_layer_id: The ID of a convolutional layer or dense layer. + n_add: The number of dimensions to add. + """ + self.operation_history.append(("to_wider_model", pre_layer_id, n_add)) + pre_layer = self.layer_list[pre_layer_id] + output_id = self.layer_id_to_output_node_ids[pre_layer_id][0] + dim = layer_width(pre_layer) + self.vis = {} + self._search(output_id, dim, dim, n_add) + # Update the tensor shapes. + for u in self.topological_order: + for v, layer_id in self.adj_list[u]: + self.node_list[v].shape = self.layer_list[layer_id].output_shape + + def _insert_new_layers(self, new_layers, start_node_id, end_node_id): + """Insert the new_layers after the node with start_node_id.""" + new_node_id = self._add_node(deepcopy(self.node_list[end_node_id])) + temp_output_id = new_node_id + for layer in new_layers[:-1]: + temp_output_id = self.add_layer(layer, temp_output_id) + + self._add_edge(new_layers[-1], temp_output_id, end_node_id) + new_layers[-1].input = self.node_list[temp_output_id] + new_layers[-1].output = self.node_list[end_node_id] + self._redirect_edge(start_node_id, end_node_id, new_node_id) + + def _block_end_node(self, layer_id, block_size): + ret = self.layer_id_to_output_node_ids[layer_id][0] + for _ in range(block_size - 2): + ret = self.adj_list[ret][0][0] + return ret + + def _dense_block_end_node(self, layer_id): + return self.layer_id_to_input_node_ids[layer_id][0] + + def _conv_block_end_node(self, layer_id): + """Get the input node ID of the last layer in the block by layer ID. + Return the input node ID of the last layer in the convolutional block. + Args: + layer_id: the convolutional layer ID. + """ + return self._block_end_node(layer_id, Constant.CONV_BLOCK_DISTANCE) + + def to_add_skip_model(self, start_id, end_id): + """Add a weighted add skip-connection from after start node to end node. + Args: + start_id: The convolutional layer ID, after which to start the skip-connection. + end_id: The convolutional layer ID, after which to end the skip-connection. + """ + self.operation_history.append(("to_add_skip_model", start_id, end_id)) + filters_end = self.layer_list[end_id].output.shape[-1] + filters_start = self.layer_list[start_id].output.shape[-1] + start_node_id = self.layer_id_to_output_node_ids[start_id][0] + + pre_end_node_id = self.layer_id_to_input_node_ids[end_id][0] + end_node_id = self.layer_id_to_output_node_ids[end_id][0] + + skip_output_id = self._insert_pooling_layer_chain(start_node_id, end_node_id) + + # Add the conv layer + new_conv_layer = get_conv_class(self.n_dim)(filters_start, filters_end, 1) + skip_output_id = self.add_layer(new_conv_layer, skip_output_id) + + # Add the add layer. + add_input_node_id = self._add_node(deepcopy(self.node_list[end_node_id])) + add_layer = StubAdd() + + self._redirect_edge(pre_end_node_id, end_node_id, add_input_node_id) + self._add_edge(add_layer, add_input_node_id, end_node_id) + self._add_edge(add_layer, skip_output_id, end_node_id) + add_layer.input = [ + self.node_list[add_input_node_id], + self.node_list[skip_output_id], + ] + add_layer.output = self.node_list[end_node_id] + self.node_list[end_node_id].shape = add_layer.output_shape + + # Set weights to the additional conv layer. + if self.weighted: + filter_shape = (1,) * self.n_dim + weights = np.zeros((filters_end, filters_start) + filter_shape) + bias = np.zeros(filters_end) + new_conv_layer.set_weights( + (add_noise(weights, np.array([0, 1])), add_noise(bias, np.array([0, 1]))) + ) + + def to_concat_skip_model(self, start_id, end_id): + """Add a weighted add concatenate connection from after start node to end node. + Args: + start_id: The convolutional layer ID, after which to start the skip-connection. + end_id: The convolutional layer ID, after which to end the skip-connection. + """ + self.operation_history.append(("to_concat_skip_model", start_id, end_id)) + filters_end = self.layer_list[end_id].output.shape[-1] + filters_start = self.layer_list[start_id].output.shape[-1] + start_node_id = self.layer_id_to_output_node_ids[start_id][0] + + pre_end_node_id = self.layer_id_to_input_node_ids[end_id][0] + end_node_id = self.layer_id_to_output_node_ids[end_id][0] + + skip_output_id = self._insert_pooling_layer_chain(start_node_id, end_node_id) + + concat_input_node_id = self._add_node(deepcopy(self.node_list[end_node_id])) + self._redirect_edge(pre_end_node_id, end_node_id, concat_input_node_id) + + concat_layer = StubConcatenate() + concat_layer.input = [ + self.node_list[concat_input_node_id], + self.node_list[skip_output_id], + ] + concat_output_node_id = self._add_node(Node(concat_layer.output_shape)) + self._add_edge(concat_layer, concat_input_node_id, concat_output_node_id) + self._add_edge(concat_layer, skip_output_id, concat_output_node_id) + concat_layer.output = self.node_list[concat_output_node_id] + self.node_list[concat_output_node_id].shape = concat_layer.output_shape + + # Add the concatenate layer. + new_conv_layer = get_conv_class(self.n_dim)( + filters_start + filters_end, filters_end, 1 + ) + self._add_edge(new_conv_layer, concat_output_node_id, end_node_id) + new_conv_layer.input = self.node_list[concat_output_node_id] + new_conv_layer.output = self.node_list[end_node_id] + self.node_list[end_node_id].shape = new_conv_layer.output_shape + + if self.weighted: + filter_shape = (1,) * self.n_dim + weights = np.zeros((filters_end, filters_end) + filter_shape) + for i in range(filters_end): + filter_weight = np.zeros((filters_end,) + filter_shape) + center_index = (i,) + (0,) * self.n_dim + filter_weight[center_index] = 1 + weights[i, ...] = filter_weight + weights = np.concatenate( + (weights, np.zeros((filters_end, filters_start) + filter_shape)), axis=1 + ) + bias = np.zeros(filters_end) + new_conv_layer.set_weights( + (add_noise(weights, np.array([0, 1])), add_noise(bias, np.array([0, 1]))) + ) + + def _insert_pooling_layer_chain(self, start_node_id, end_node_id): + skip_output_id = start_node_id + for layer in self._get_pooling_layers(start_node_id, end_node_id): + new_layer = deepcopy(layer) + if is_layer(new_layer, "Conv"): + filters = self.node_list[start_node_id].shape[-1] + new_layer = get_conv_class(self.n_dim)(filters, filters, 1, layer.stride) + if self.weighted: + init_conv_weight(new_layer) + else: + new_layer = deepcopy(layer) + skip_output_id = self.add_layer(new_layer, skip_output_id) + skip_output_id = self.add_layer(StubReLU(), skip_output_id) + return skip_output_id + + def extract_descriptor(self): + """Extract the the description of the Graph as an instance of NetworkDescriptor.""" + main_chain = self.get_main_chain() + index_in_main_chain = {} + for index, u in enumerate(main_chain): + index_in_main_chain[u] = index + + ret = NetworkDescriptor() + for u in main_chain: + for v, layer_id in self.adj_list[u]: + if v not in index_in_main_chain: + continue + layer = self.layer_list[layer_id] + copied_layer = copy(layer) + copied_layer.weights = None + ret.add_layer(deepcopy(copied_layer)) + + for u in index_in_main_chain: + for v, layer_id in self.adj_list[u]: + if v not in index_in_main_chain: + temp_u = u + temp_v = v + temp_layer_id = layer_id + skip_type = None + while not (temp_v in index_in_main_chain and temp_u in index_in_main_chain): + if is_layer(self.layer_list[temp_layer_id], "Concatenate"): + skip_type = NetworkDescriptor.CONCAT_CONNECT + if is_layer(self.layer_list[temp_layer_id], "Add"): + skip_type = NetworkDescriptor.ADD_CONNECT + temp_u = temp_v + temp_v, temp_layer_id = self.adj_list[temp_v][0] + ret.add_skip_connection( + index_in_main_chain[u], index_in_main_chain[temp_u], skip_type + ) + + elif index_in_main_chain[v] - index_in_main_chain[u] != 1: + skip_type = None + if is_layer(self.layer_list[layer_id], "Concatenate"): + skip_type = NetworkDescriptor.CONCAT_CONNECT + if is_layer(self.layer_list[layer_id], "Add"): + skip_type = NetworkDescriptor.ADD_CONNECT + ret.add_skip_connection( + index_in_main_chain[u], index_in_main_chain[v], skip_type + ) + + return ret + + def clear_weights(self): + ''' clear weights of the graph + ''' + self.weighted = False + for layer in self.layer_list: + layer.weights = None + + def produce_torch_model(self): + """Build a new Torch model based on the current graph.""" + return TorchModel(self) + + def produce_keras_model(self): + """Build a new keras model based on the current graph.""" + return KerasModel(self).model + + def produce_onnx_model(self): + """Build a new ONNX model based on the current graph.""" + return ONNXModel(self) + + def parsing_onnx_model(self, onnx_model): + '''to do in the future to use the onnx model + ''' + return ONNXModel(onnx_model) + + def produce_json_model(self): + """Build a new Json model based on the current graph.""" + return JSONModel(self).data + + @classmethod + def parsing_json_model(self, json_model): + '''build a graph from json + ''' + return json_to_graph(json_model) + + def _layer_ids_in_order(self, layer_ids): + node_id_to_order_index = {} + for index, node_id in enumerate(self.topological_order): + node_id_to_order_index[node_id] = index + return sorted( + layer_ids, + key=lambda layer_id: node_id_to_order_index[ + self.layer_id_to_output_node_ids[layer_id][0] + ], + ) + + def _layer_ids_by_type(self, type_str): + return list( + filter( + lambda layer_id: is_layer(self.layer_list[layer_id], type_str), + range(self.n_layers), + ) + ) + + def get_main_chain_layers(self): + """Return a list of layer IDs in the main chain.""" + main_chain = self.get_main_chain() + ret = [] + for u in main_chain: + for v, layer_id in self.adj_list[u]: + if v in main_chain and u in main_chain: + ret.append(layer_id) + return ret + + def _conv_layer_ids_in_order(self): + return list( + filter( + lambda layer_id: is_layer(self.layer_list[layer_id], "Conv"), + self.get_main_chain_layers(), + ) + ) + + def _dense_layer_ids_in_order(self): + return self._layer_ids_in_order(self._layer_ids_by_type("Dense")) + + def deep_layer_ids(self): + ret = [] + for layer_id in self.get_main_chain_layers(): + layer = self.layer_list[layer_id] + if is_layer(layer, "GlobalAveragePooling"): + break + if is_layer(layer, "Add") or is_layer(layer, "Concatenate"): + continue + ret.append(layer_id) + return ret + + def wide_layer_ids(self): + return ( + self._conv_layer_ids_in_order()[:-1] + self._dense_layer_ids_in_order()[:-1] + ) + + def skip_connection_layer_ids(self): + return self.deep_layer_ids()[:-1] + + def size(self): + return sum(list(map(lambda x: x.size(), self.layer_list))) + + def get_main_chain(self): + """Returns the main chain node ID list.""" + pre_node = {} + distance = {} + for i in range(self.n_nodes): + distance[i] = 0 + pre_node[i] = i + for i in range(self.n_nodes - 1): + for u in range(self.n_nodes): + for v, _ in self.adj_list[u]: + if distance[u] + 1 > distance[v]: + distance[v] = distance[u] + 1 + pre_node[v] = u + temp_id = 0 + for i in range(self.n_nodes): + if distance[i] > distance[temp_id]: + temp_id = i + ret = [] + for i in range(self.n_nodes + 5): + ret.append(temp_id) + if pre_node[temp_id] == temp_id: + break + temp_id = pre_node[temp_id] + assert temp_id == pre_node[temp_id] + ret.reverse() + return ret + + +class TorchModel(torch.nn.Module): + """A neural network class using pytorch constructed from an instance of Graph.""" + + def __init__(self, graph): + super(TorchModel, self).__init__() + self.graph = graph + self.layers = [] + for layer in graph.layer_list: + self.layers.append(layer.to_real_layer()) + if graph.weighted: + for index, layer in enumerate(self.layers): + set_stub_weight_to_torch(self.graph.layer_list[index], layer) + for index, layer in enumerate(self.layers): + self.add_module(str(index), layer) + + def forward(self, input_tensor): + topo_node_list = self.graph.topological_order + output_id = topo_node_list[-1] + input_id = topo_node_list[0] + + node_list = deepcopy(self.graph.node_list) + node_list[input_id] = input_tensor + + for v in topo_node_list: + for u, layer_id in self.graph.reverse_adj_list[v]: + layer = self.graph.layer_list[layer_id] + torch_layer = self.layers[layer_id] + + if isinstance(layer, (StubAdd, StubConcatenate)): + edge_input_tensor = list( + map( + lambda x: node_list[x], + self.graph.layer_id_to_input_node_ids[layer_id], + ) + ) + else: + edge_input_tensor = node_list[u] + + temp_tensor = torch_layer(edge_input_tensor) + node_list[v] = temp_tensor + return node_list[output_id] + + def set_weight_to_graph(self): + self.graph.weighted = True + for index, layer in enumerate(self.layers): + set_torch_weight_to_stub(layer, self.graph.layer_list[index]) + + +class KerasModel: + def __init__(self, graph): + import keras + + self.graph = graph + self.layers = [] + for layer in graph.layer_list: + self.layers.append(to_real_keras_layer(layer)) + + # Construct the keras graph. + # Input + topo_node_list = self.graph.topological_order + output_id = topo_node_list[-1] + input_id = topo_node_list[0] + input_tensor = keras.layers.Input(shape=graph.node_list[input_id].shape) + + node_list = deepcopy(self.graph.node_list) + node_list[input_id] = input_tensor + + # Output + for v in topo_node_list: + for u, layer_id in self.graph.reverse_adj_list[v]: + layer = self.graph.layer_list[layer_id] + keras_layer = self.layers[layer_id] + + if isinstance(layer, (StubAdd, StubConcatenate)): + edge_input_tensor = list( + map( + lambda x: node_list[x], + self.graph.layer_id_to_input_node_ids[layer_id], + ) + ) + else: + edge_input_tensor = node_list[u] + + temp_tensor = keras_layer(edge_input_tensor) + node_list[v] = temp_tensor + + output_tensor = node_list[output_id] + output_tensor = keras.layers.Activation("softmax", name="activation_add")( + output_tensor + ) + self.model = keras.models.Model(inputs=input_tensor, outputs=output_tensor) + + if graph.weighted: + for index, layer in enumerate(self.layers): + set_stub_weight_to_keras(self.graph.layer_list[index], layer) + + def set_weight_to_graph(self): + self.graph.weighted = True + for index, layer in enumerate(self.layers): + set_keras_weight_to_stub(layer, self.graph.layer_list[index]) + + +class ONNXModel: + # to do in the future using onnx ir + def __init__(self, graph): + pass + + +class JSONModel: + def __init__(self, graph): + data = dict() + node_list = list() + layer_list = list() + operation_history = list() + + data["input_shape"] = graph.input_shape + vis = graph.vis + data["vis"] = list(vis.keys()) if vis is not None else None + data["weighted"] = graph.weighted + + for item in graph.operation_history: + if item[0] == "to_deeper_model": + operation_history.append( + [ + item[0], + item[1], + layer_description_extractor(item[2], graph.node_to_id), + ] + ) + else: + operation_history.append(item) + data["operation_history"] = operation_history + data["layer_id_to_input_node_ids"] = graph.layer_id_to_input_node_ids + data["layer_id_to_output_node_ids"] = graph.layer_id_to_output_node_ids + data["adj_list"] = graph.adj_list + data["reverse_adj_list"] = graph.reverse_adj_list + + for node in graph.node_list: + node_id = graph.node_to_id[node] + node_information = node.shape + node_list.append((node_id, node_information)) + + for layer_id, item in enumerate(graph.layer_list): + layer = graph.layer_list[layer_id] + layer_information = layer_description_extractor(layer, graph.node_to_id) + layer_list.append((layer_id, layer_information)) + + data["node_list"] = node_list + data["layer_list"] = layer_list + + self.data = data + + +def graph_to_onnx(graph, onnx_model_path): + import onnx + # to do in the future using onnx ir + onnx_out = graph.produce_onnx_model() + onnx.save(onnx_out, onnx_model_path) + return onnx_out + + +def onnx_to_graph(onnx_model, input_shape): + import onnx + # to do in the future using onnx ir + graph = Graph(input_shape, False) + graph.parsing_onnx_model(onnx_model) + return graph + + +def graph_to_json(graph, json_model_path): + json_out = graph.produce_json_model() + with open(json_model_path, "w") as fout: + json.dump(json_out, fout) + json_out = json.dumps(json_out) + return json_out + + +def json_to_graph(json_model: str): + json_model = json.loads(json_model) + # restore graph data from json data + input_shape = tuple(json_model["input_shape"]) + node_list = list() + node_to_id = dict() + id_to_node = dict() + layer_list = list() + layer_to_id = dict() + operation_history = list() + graph = Graph(input_shape, False) + + graph.input_shape = input_shape + vis = json_model["vis"] + graph.vis = {tuple(item): True for item in vis} if vis is not None else None + graph.weighted = json_model["weighted"] + layer_id_to_input_node_ids = json_model["layer_id_to_input_node_ids"] + graph.layer_id_to_input_node_ids = { + int(k): v for k, v in layer_id_to_input_node_ids.items() + } + layer_id_to_output_node_ids = json_model["layer_id_to_output_node_ids"] + graph.layer_id_to_output_node_ids = { + int(k): v for k, v in layer_id_to_output_node_ids.items() + } + adj_list = {} + for k, v in json_model["adj_list"].items(): + adj_list[int(k)] = [tuple(i) for i in v] + graph.adj_list = adj_list + reverse_adj_list = {} + for k, v in json_model["reverse_adj_list"].items(): + reverse_adj_list[int(k)] = [tuple(i) for i in v] + graph.reverse_adj_list = reverse_adj_list + + for item in json_model["node_list"]: + new_node = Node(tuple(item[1])) + node_id = item[0] + node_list.append(new_node) + node_to_id[new_node] = node_id + id_to_node[node_id] = new_node + + for item in json_model["operation_history"]: + if item[0] == "to_deeper_model": + operation_history.append( + (item[0], item[1], layer_description_builder(item[2], id_to_node)) + ) + else: + operation_history.append(item) + graph.operation_history = operation_history + + for item in json_model["layer_list"]: + new_layer = layer_description_builder(item[1], id_to_node) + layer_id = int(item[0]) + layer_list.append(new_layer) + layer_to_id[new_layer] = layer_id + + graph.node_list = node_list + graph.node_to_id = node_to_id + graph.layer_list = layer_list + graph.layer_to_id = layer_to_id + + return graph diff --git a/src/sdk/pynni/nni/networkmorphism_tuner/graph_transformer.py b/src/sdk/pynni/nni/networkmorphism_tuner/graph_transformer.py new file mode 100644 index 0000000000..a318188f3e --- /dev/null +++ b/src/sdk/pynni/nni/networkmorphism_tuner/graph_transformer.py @@ -0,0 +1,176 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# +# MIT License +# +# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and +# associated documentation files (the "Software"), to deal in the Software without restriction, +# including without limitation the rights to use, copy, modify, merge, publish, distribute, +# sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all copies or +# substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT +# NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, +# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT +# OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +# ================================================================================================== + +from copy import deepcopy + +from random import randrange, sample + +from nni.networkmorphism_tuner.graph import NetworkDescriptor +from nni.networkmorphism_tuner.layers import ( + StubDense, + StubReLU, + get_batch_norm_class, + get_conv_class, + get_dropout_class, + get_pooling_class, + is_layer, +) +from nni.networkmorphism_tuner.utils import Constant + + +def to_wider_graph(graph): + ''' wider graph + ''' + weighted_layer_ids = graph.wide_layer_ids() + weighted_layer_ids = list( + filter(lambda x: graph.layer_list[x].output.shape[-1], weighted_layer_ids) + ) + wider_layers = sample(weighted_layer_ids, 1) + + for layer_id in wider_layers: + layer = graph.layer_list[layer_id] + if is_layer(layer, "Conv"): + n_add = layer.filters + else: + n_add = layer.units + + graph.to_wider_model(layer_id, n_add) + return graph + + +def to_skip_connection_graph(graph): + ''' skip connection graph + ''' + # The last conv layer cannot be widen since wider operator cannot be done over the two sides of flatten. + weighted_layer_ids = graph.skip_connection_layer_ids() + valid_connection = [] + for skip_type in sorted([NetworkDescriptor.ADD_CONNECT, NetworkDescriptor.CONCAT_CONNECT]): + for index_a in range(len(weighted_layer_ids)): + for index_b in range(len(weighted_layer_ids))[index_a + 1 :]: + valid_connection.append((index_a, index_b, skip_type)) + + if not valid_connection: + return graph + for index_a, index_b, skip_type in sample(valid_connection, 1): + a_id = weighted_layer_ids[index_a] + b_id = weighted_layer_ids[index_b] + if skip_type == NetworkDescriptor.ADD_CONNECT: + graph.to_add_skip_model(a_id, b_id) + else: + graph.to_concat_skip_model(a_id, b_id) + return graph + + +def create_new_layer(layer, n_dim): + ''' create new layer for the graph + ''' + + input_shape = layer.output.shape + dense_deeper_classes = [StubDense, get_dropout_class(n_dim), StubReLU] + conv_deeper_classes = [get_conv_class(n_dim), get_batch_norm_class(n_dim), StubReLU] + if is_layer(layer, "ReLU"): + conv_deeper_classes = [get_conv_class(n_dim), get_batch_norm_class(n_dim)] + dense_deeper_classes = [StubDense, get_dropout_class(n_dim)] + elif is_layer(layer, "Dropout"): + dense_deeper_classes = [StubDense, StubReLU] + elif is_layer(layer, "BatchNormalization"): + conv_deeper_classes = [get_conv_class(n_dim), StubReLU] + + layer_class = None + if len(input_shape) == 1: + # It is in the dense layer part. + layer_class = sample(dense_deeper_classes, 1)[0] + else: + # It is in the conv layer part. + layer_class = sample(conv_deeper_classes, 1)[0] + + if layer_class == StubDense: + new_layer = StubDense(input_shape[0], input_shape[0]) + + elif layer_class == get_dropout_class(n_dim): + new_layer = layer_class(Constant.DENSE_DROPOUT_RATE) + + elif layer_class == get_conv_class(n_dim): + new_layer = layer_class( + input_shape[-1], input_shape[-1], sample((1, 3, 5), 1)[0], stride=1 + ) + + elif layer_class == get_batch_norm_class(n_dim): + new_layer = layer_class(input_shape[-1]) + + elif layer_class == get_pooling_class(n_dim): + new_layer = layer_class(sample((1, 3, 5), 1)[0]) + + else: + new_layer = layer_class() + + return new_layer + + +def to_deeper_graph(graph): + ''' deeper graph + ''' + + weighted_layer_ids = graph.deep_layer_ids() + if len(weighted_layer_ids) >= Constant.MAX_LAYERS: + return None + + deeper_layer_ids = sample(weighted_layer_ids, 1) + + for layer_id in deeper_layer_ids: + layer = graph.layer_list[layer_id] + new_layer = create_new_layer(layer, graph.n_dim) + graph.to_deeper_model(layer_id, new_layer) + return graph + + +def legal_graph(graph): + '''judge if a graph is legal or not. + ''' + + descriptor = graph.extract_descriptor() + skips = descriptor.skip_connections + if len(skips) != len(set(skips)): + return False + return True + + +def transform(graph): + '''core transform function for graph. + ''' + + graphs = [] + for _ in range(Constant.N_NEIGHBOURS * 2): + random_num = randrange(3) + temp_graph = None + if random_num == 0: + temp_graph = to_deeper_graph(deepcopy(graph)) + elif random_num == 1: + temp_graph = to_wider_graph(deepcopy(graph)) + elif random_num == 2: + temp_graph = to_skip_connection_graph(deepcopy(graph)) + + if temp_graph is not None and temp_graph.size() <= Constant.MAX_MODEL_SIZE: + graphs.append(temp_graph) + + if len(graphs) >= Constant.N_NEIGHBOURS: + break + + return graphs diff --git a/src/sdk/pynni/nni/networkmorphism_tuner/layer_transformer.py b/src/sdk/pynni/nni/networkmorphism_tuner/layer_transformer.py new file mode 100644 index 0000000000..8775431e58 --- /dev/null +++ b/src/sdk/pynni/nni/networkmorphism_tuner/layer_transformer.py @@ -0,0 +1,273 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# +# MIT License +# +# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and +# associated documentation files (the "Software"), to deal in the Software without restriction, +# including without limitation the rights to use, copy, modify, merge, publish, distribute, +# sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all copies or +# substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT +# NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, +# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT +# OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +# ================================================================================================== + +import numpy as np + +from nni.networkmorphism_tuner.layers import ( + StubDense, + StubReLU, + get_batch_norm_class, + get_conv_class, + get_n_dim, +) + +NOISE_RATIO = 1e-4 + + +def deeper_conv_block(conv_layer, kernel_size, weighted=True): + '''deeper conv layer. + ''' + n_dim = get_n_dim(conv_layer) + filter_shape = (kernel_size,) * 2 + n_filters = conv_layer.filters + weight = np.zeros((n_filters, n_filters) + filter_shape) + center = tuple(map(lambda x: int((x - 1) / 2), filter_shape)) + for i in range(n_filters): + filter_weight = np.zeros((n_filters,) + filter_shape) + index = (i,) + center + filter_weight[index] = 1 + weight[i, ...] = filter_weight + bias = np.zeros(n_filters) + new_conv_layer = get_conv_class(n_dim)( + conv_layer.filters, n_filters, kernel_size=kernel_size + ) + bn = get_batch_norm_class(n_dim)(n_filters) + + if weighted: + new_conv_layer.set_weights( + (add_noise(weight, np.array([0, 1])), add_noise(bias, np.array([0, 1]))) + ) + new_weights = [ + add_noise(np.ones(n_filters, dtype=np.float32), np.array([0, 1])), + add_noise(np.zeros(n_filters, dtype=np.float32), np.array([0, 1])), + add_noise(np.zeros(n_filters, dtype=np.float32), np.array([0, 1])), + add_noise(np.ones(n_filters, dtype=np.float32), np.array([0, 1])), + ] + bn.set_weights(new_weights) + + return [StubReLU(), new_conv_layer, bn] + + +def dense_to_deeper_block(dense_layer, weighted=True): + '''deeper dense layer. + ''' + units = dense_layer.units + weight = np.eye(units) + bias = np.zeros(units) + new_dense_layer = StubDense(units, units) + if weighted: + new_dense_layer.set_weights( + (add_noise(weight, np.array([0, 1])), add_noise(bias, np.array([0, 1]))) + ) + return [StubReLU(), new_dense_layer] + + +def wider_pre_dense(layer, n_add, weighted=True): + '''wider previous dense layer. + ''' + if not weighted: + return StubDense(layer.input_units, layer.units + n_add) + + n_units2 = layer.units + + teacher_w, teacher_b = layer.get_weights() + rand = np.random.randint(n_units2, size=n_add) + student_w = teacher_w.copy() + student_b = teacher_b.copy() + + # target layer update (i) + for i in range(n_add): + teacher_index = rand[i] + new_weight = teacher_w[teacher_index, :] + new_weight = new_weight[np.newaxis, :] + student_w = np.concatenate((student_w, add_noise(new_weight, student_w)), axis=0) + student_b = np.append(student_b, add_noise(teacher_b[teacher_index], student_b)) + + new_pre_layer = StubDense(layer.input_units, n_units2 + n_add) + new_pre_layer.set_weights((student_w, student_b)) + + return new_pre_layer + + +def wider_pre_conv(layer, n_add_filters, weighted=True): + '''wider previous conv layer. + ''' + n_dim = get_n_dim(layer) + if not weighted: + return get_conv_class(n_dim)( + layer.input_channel, + layer.filters + n_add_filters, + kernel_size=layer.kernel_size, + ) + + n_pre_filters = layer.filters + rand = np.random.randint(n_pre_filters, size=n_add_filters) + teacher_w, teacher_b = layer.get_weights() + + student_w = teacher_w.copy() + student_b = teacher_b.copy() + # target layer update (i) + for i in range(len(rand)): + teacher_index = rand[i] + new_weight = teacher_w[teacher_index, ...] + new_weight = new_weight[np.newaxis, ...] + student_w = np.concatenate((student_w, new_weight), axis=0) + student_b = np.append(student_b, teacher_b[teacher_index]) + new_pre_layer = get_conv_class(n_dim)( + layer.input_channel, n_pre_filters + n_add_filters, layer.kernel_size + ) + new_pre_layer.set_weights( + (add_noise(student_w, teacher_w), add_noise(student_b, teacher_b)) + ) + return new_pre_layer + + +def wider_next_conv(layer, start_dim, total_dim, n_add, weighted=True): + '''wider next conv layer. + ''' + n_dim = get_n_dim(layer) + if not weighted: + return get_conv_class(n_dim)(layer.input_channel + n_add, + layer.filters, + kernel_size=layer.kernel_size, + stride=layer.stride) + n_filters = layer.filters + teacher_w, teacher_b = layer.get_weights() + + new_weight_shape = list(teacher_w.shape) + new_weight_shape[1] = n_add + new_weight = np.zeros(tuple(new_weight_shape)) + + student_w = np.concatenate((teacher_w[:, :start_dim, ...].copy(), + add_noise(new_weight, teacher_w), + teacher_w[:, start_dim:total_dim, ...].copy()), axis=1) + new_layer = get_conv_class(n_dim)(layer.input_channel + n_add, + n_filters, + kernel_size=layer.kernel_size, + stride=layer.stride) + new_layer.set_weights((student_w, teacher_b)) + return new_layer + + +def wider_bn(layer, start_dim, total_dim, n_add, weighted=True): + '''wider batch norm layer. + ''' + n_dim = get_n_dim(layer) + if not weighted: + return get_batch_norm_class(n_dim)(layer.num_features + n_add) + + weights = layer.get_weights() + + new_weights = [ + add_noise(np.ones(n_add, dtype=np.float32), np.array([0, 1])), + add_noise(np.zeros(n_add, dtype=np.float32), np.array([0, 1])), + add_noise(np.zeros(n_add, dtype=np.float32), np.array([0, 1])), + add_noise(np.ones(n_add, dtype=np.float32), np.array([0, 1])), + ] + + student_w = tuple() + for weight, new_weight in zip(weights, new_weights): + temp_w = weight.copy() + temp_w = np.concatenate( + (temp_w[:start_dim], new_weight, temp_w[start_dim:total_dim]) + ) + student_w += (temp_w,) + new_layer = get_batch_norm_class(n_dim)(layer.num_features + n_add) + new_layer.set_weights(student_w) + return new_layer + + +def wider_next_dense(layer, start_dim, total_dim, n_add, weighted=True): + '''wider next dense layer. + ''' + if not weighted: + return StubDense(layer.input_units + n_add, layer.units) + teacher_w, teacher_b = layer.get_weights() + student_w = teacher_w.copy() + n_units_each_channel = int(teacher_w.shape[1] / total_dim) + + new_weight = np.zeros((teacher_w.shape[0], n_add * n_units_each_channel)) + student_w = np.concatenate( + ( + student_w[:, : start_dim * n_units_each_channel], + add_noise(new_weight, student_w), + student_w[ + :, start_dim * n_units_each_channel : total_dim * n_units_each_channel + ], + ), + axis=1, + ) + + new_layer = StubDense(layer.input_units + n_add, layer.units) + new_layer.set_weights((student_w, teacher_b)) + return new_layer + + +def add_noise(weights, other_weights): + '''add noise to the layer. + ''' + w_range = np.ptp(other_weights.flatten()) + noise_range = NOISE_RATIO * w_range + noise = np.random.uniform(-noise_range / 2.0, noise_range / 2.0, weights.shape) + return np.add(noise, weights) + + +def init_dense_weight(layer): + '''initilize dense layer weight. + ''' + units = layer.units + weight = np.eye(units) + bias = np.zeros(units) + layer.set_weights( + (add_noise(weight, np.array([0, 1])), add_noise(bias, np.array([0, 1]))) + ) + + +def init_conv_weight(layer): + '''initilize conv layer weight. + ''' + n_filters = layer.filters + filter_shape = (layer.kernel_size,) * get_n_dim(layer) + weight = np.zeros((n_filters, n_filters) + filter_shape) + + center = tuple(map(lambda x: int((x - 1) / 2), filter_shape)) + for i in range(n_filters): + filter_weight = np.zeros((n_filters,) + filter_shape) + index = (i,) + center + filter_weight[index] = 1 + weight[i, ...] = filter_weight + bias = np.zeros(n_filters) + + layer.set_weights( + (add_noise(weight, np.array([0, 1])), add_noise(bias, np.array([0, 1]))) + ) + + +def init_bn_weight(layer): + '''initilize batch norm layer weight. + ''' + n_filters = layer.num_features + new_weights = [ + add_noise(np.ones(n_filters, dtype=np.float32), np.array([0, 1])), + add_noise(np.zeros(n_filters, dtype=np.float32), np.array([0, 1])), + add_noise(np.zeros(n_filters, dtype=np.float32), np.array([0, 1])), + add_noise(np.ones(n_filters, dtype=np.float32), np.array([0, 1])), + ] + layer.set_weights(new_weights) diff --git a/src/sdk/pynni/nni/networkmorphism_tuner/layers.py b/src/sdk/pynni/nni/networkmorphism_tuner/layers.py new file mode 100644 index 0000000000..a849cdb9db --- /dev/null +++ b/src/sdk/pynni/nni/networkmorphism_tuner/layers.py @@ -0,0 +1,788 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# +# MIT License +# +# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and +# associated documentation files (the "Software"), to deal in the Software without restriction, +# including without limitation the rights to use, copy, modify, merge, publish, distribute, +# sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all copies or +# substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT +# NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, +# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT +# OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +# ================================================================================================== + +from abc import abstractmethod +from collections import Iterable + +import torch +from torch import nn +from torch.nn import functional +from nni.networkmorphism_tuner.utils import Constant + + +class AvgPool(nn.Module): + '''AvgPool Module. + ''' + def __init__(self): + super().__init__() + + @abstractmethod + def forward(self, input_tensor): + pass + + +class GlobalAvgPool1d(AvgPool): + '''GlobalAvgPool1d Module. + ''' + def forward(self, input_tensor): + return functional.avg_pool1d(input_tensor, input_tensor.size()[2:]).view( + input_tensor.size()[:2] + ) + + +class GlobalAvgPool2d(AvgPool): + '''GlobalAvgPool2d Module. + ''' + def forward(self, input_tensor): + return functional.avg_pool2d(input_tensor, input_tensor.size()[2:]).view( + input_tensor.size()[:2] + ) + + +class GlobalAvgPool3d(AvgPool): + '''GlobalAvgPool3d Module. + ''' + def forward(self, input_tensor): + return functional.avg_pool3d(input_tensor, input_tensor.size()[2:]).view( + input_tensor.size()[:2] + ) + + +class StubLayer: + '''StubLayer Module. Base Module. + ''' + def __init__(self, input_node=None, output_node=None): + self.input = input_node + self.output = output_node + self.weights = None + + def build(self, shape): + '''build shape. + ''' + pass + + def set_weights(self, weights): + '''set weights. + ''' + self.weights = weights + + def import_weights(self, torch_layer): + '''import weights. + ''' + pass + + def import_weights_keras(self, keras_layer): + '''import weights from keras layer. + ''' + pass + + def export_weights(self, torch_layer): + '''export weights. + ''' + pass + + def export_weights_keras(self, keras_layer): + '''export weights to keras layer. + ''' + pass + + def get_weights(self): + '''get weights. + ''' + return self.weights + + def size(self): + '''size(). + ''' + return 0 + + @property + def output_shape(self): + '''output shape. + ''' + return self.input.shape + + def to_real_layer(self): + '''to real layer. + ''' + pass + + def __str__(self): + '''str() function to print. + ''' + return type(self).__name__[4:] + + +class StubWeightBiasLayer(StubLayer): + '''StubWeightBiasLayer Module to set the bias. + ''' + def import_weights(self, torch_layer): + self.set_weights( + (torch_layer.weight.data.cpu().numpy(), torch_layer.bias.data.cpu().numpy()) + ) + + def import_weights_keras(self, keras_layer): + self.set_weights(keras_layer.get_weights()) + + def export_weights(self, torch_layer): + torch_layer.weight.data = torch.Tensor(self.weights[0]) + torch_layer.bias.data = torch.Tensor(self.weights[1]) + + def export_weights_keras(self, keras_layer): + keras_layer.set_weights(self.weights) + + +class StubBatchNormalization(StubWeightBiasLayer): + '''StubBatchNormalization Module. Batch Norm. + ''' + def __init__(self, num_features, input_node=None, output_node=None): + super().__init__(input_node, output_node) + self.num_features = num_features + + def import_weights(self, torch_layer): + self.set_weights( + ( + torch_layer.weight.data.cpu().numpy(), + torch_layer.bias.data.cpu().numpy(), + torch_layer.running_mean.cpu().numpy(), + torch_layer.running_var.cpu().numpy(), + ) + ) + + def export_weights(self, torch_layer): + torch_layer.weight.data = torch.Tensor(self.weights[0]) + torch_layer.bias.data = torch.Tensor(self.weights[1]) + torch_layer.running_mean = torch.Tensor(self.weights[2]) + torch_layer.running_var = torch.Tensor(self.weights[3]) + + def size(self): + return self.num_features * 4 + + @abstractmethod + def to_real_layer(self): + pass + + +class StubBatchNormalization1d(StubBatchNormalization): + '''StubBatchNormalization1d Module. + ''' + def to_real_layer(self): + return torch.nn.BatchNorm1d(self.num_features) + + +class StubBatchNormalization2d(StubBatchNormalization): + '''StubBatchNormalization2d Module. + ''' + def to_real_layer(self): + return torch.nn.BatchNorm2d(self.num_features) + + +class StubBatchNormalization3d(StubBatchNormalization): + '''StubBatchNormalization3d Module. + ''' + def to_real_layer(self): + return torch.nn.BatchNorm3d(self.num_features) + + +class StubDense(StubWeightBiasLayer): + '''StubDense Module. Linear. + ''' + def __init__(self, input_units, units, input_node=None, output_node=None): + super().__init__(input_node, output_node) + self.input_units = input_units + self.units = units + + @property + def output_shape(self): + return (self.units,) + + def import_weights_keras(self, keras_layer): + self.set_weights((keras_layer.get_weights()[0].T, keras_layer.get_weights()[1])) + + def export_weights_keras(self, keras_layer): + keras_layer.set_weights((self.weights[0].T, self.weights[1])) + + def size(self): + return self.input_units * self.units + self.units + + def to_real_layer(self): + return torch.nn.Linear(self.input_units, self.units) + + +class StubConv(StubWeightBiasLayer): + '''StubConv Module. Conv. + ''' + def __init__(self, input_channel, filters, kernel_size, stride=1, input_node=None, output_node=None): + super().__init__(input_node, output_node) + self.input_channel = input_channel + self.filters = filters + self.kernel_size = kernel_size + self.stride = stride + self.padding = int(self.kernel_size / 2) + + @property + def output_shape(self): + ret = list(self.input.shape[:-1]) + for index, dim in enumerate(ret): + ret[index] = ( + int((dim + 2 * self.padding - self.kernel_size) / self.stride) + 1 + ) + ret = ret + [self.filters] + return tuple(ret) + + def import_weights_keras(self, keras_layer): + self.set_weights((keras_layer.get_weights()[0].T, keras_layer.get_weights()[1])) + + def export_weights_keras(self, keras_layer): + keras_layer.set_weights((self.weights[0].T, self.weights[1])) + + def size(self): + return self.filters * self.kernel_size * self.kernel_size + self.filters + + @abstractmethod + def to_real_layer(self): + pass + + def __str__(self): + return ( + super().__str__() + + "(" + + ", ".join( + str(item) + for item in [ + self.input_channel, + self.filters, + self.kernel_size, + self.stride, + ] + ) + + ")" + ) + + +class StubConv1d(StubConv): + '''StubConv1d Module. + ''' + def to_real_layer(self): + return torch.nn.Conv1d( + self.input_channel, + self.filters, + self.kernel_size, + stride=self.stride, + padding=self.padding, + ) + + +class StubConv2d(StubConv): + '''StubConv2d Module. + ''' + def to_real_layer(self): + return torch.nn.Conv2d( + self.input_channel, + self.filters, + self.kernel_size, + stride=self.stride, + padding=self.padding, + ) + + +class StubConv3d(StubConv): + '''StubConv3d Module. + ''' + def to_real_layer(self): + return torch.nn.Conv3d( + self.input_channel, + self.filters, + self.kernel_size, + stride=self.stride, + padding=self.padding, + ) + + +class StubAggregateLayer(StubLayer): + '''StubAggregateLayer Module. + ''' + def __init__(self, input_nodes=None, output_node=None): + if input_nodes is None: + input_nodes = [] + super().__init__(input_nodes, output_node) + + +class StubConcatenate(StubAggregateLayer): + '''StubConcatenate Module. + ''' + @property + def output_shape(self): + ret = 0 + for current_input in self.input: + ret += current_input.shape[-1] + ret = self.input[0].shape[:-1] + (ret,) + return ret + + def to_real_layer(self): + return TorchConcatenate() + + +class StubAdd(StubAggregateLayer): + '''StubAdd Module. + ''' + @property + def output_shape(self): + return self.input[0].shape + + def to_real_layer(self): + return TorchAdd() + + +class StubFlatten(StubLayer): + '''StubFlatten Module. + ''' + @property + def output_shape(self): + ret = 1 + for dim in self.input.shape: + ret *= dim + return (ret,) + + def to_real_layer(self): + return TorchFlatten() + + +class StubReLU(StubLayer): + '''StubReLU Module. + ''' + def to_real_layer(self): + return torch.nn.ReLU() + + +class StubSoftmax(StubLayer): + '''StubSoftmax Module. + ''' + def to_real_layer(self): + return torch.nn.LogSoftmax(dim=1) + + +class StubDropout(StubLayer): + '''StubDropout Module. + ''' + def __init__(self, rate, input_node=None, output_node=None): + super().__init__(input_node, output_node) + self.rate = rate + + @abstractmethod + def to_real_layer(self): + pass + + +class StubDropout1d(StubDropout): + '''StubDropout1d Module. + ''' + def to_real_layer(self): + return torch.nn.Dropout(self.rate) + + +class StubDropout2d(StubDropout): + '''StubDropout2d Module. + ''' + def to_real_layer(self): + return torch.nn.Dropout2d(self.rate) + + +class StubDropout3d(StubDropout): + '''StubDropout3d Module. + ''' + def to_real_layer(self): + return torch.nn.Dropout3d(self.rate) + + +class StubInput(StubLayer): + '''StubInput Module. + ''' + def __init__(self, input_node=None, output_node=None): + super().__init__(input_node, output_node) + + +class StubPooling(StubLayer): + '''StubPooling Module. + ''' + + def __init__(self, + kernel_size=None, + stride=None, + padding=0, + input_node=None, + output_node=None): + super().__init__(input_node, output_node) + self.kernel_size = ( + kernel_size if kernel_size is not None else Constant.POOLING_KERNEL_SIZE + ) + self.stride = stride if stride is not None else self.kernel_size + self.padding = padding + + @property + def output_shape(self): + ret = tuple() + for dim in self.input.shape[:-1]: + ret = ret + (max(int(dim / self.kernel_size), 1),) + ret = ret + (self.input.shape[-1],) + return ret + + @abstractmethod + def to_real_layer(self): + pass + + +class StubPooling1d(StubPooling): + '''StubPooling1d Module. + ''' + + def to_real_layer(self): + return torch.nn.MaxPool1d(self.kernel_size, stride=self.stride) + + +class StubPooling2d(StubPooling): + '''StubPooling2d Module. + ''' + def to_real_layer(self): + return torch.nn.MaxPool2d(self.kernel_size, stride=self.stride) + + +class StubPooling3d(StubPooling): + '''StubPooling3d Module. + ''' + def to_real_layer(self): + return torch.nn.MaxPool3d(self.kernel_size, stride=self.stride) + + +class StubGlobalPooling(StubLayer): + '''StubGlobalPooling Module. + ''' + def __init__(self, input_node=None, output_node=None): + super().__init__(input_node, output_node) + + @property + def output_shape(self): + return (self.input.shape[-1],) + + @abstractmethod + def to_real_layer(self): + pass + + +class StubGlobalPooling1d(StubGlobalPooling): + '''StubGlobalPooling1d Module. + ''' + def to_real_layer(self): + return GlobalAvgPool1d() + + +class StubGlobalPooling2d(StubGlobalPooling): + '''StubGlobalPooling2d Module. + ''' + def to_real_layer(self): + return GlobalAvgPool2d() + + +class StubGlobalPooling3d(StubGlobalPooling): + '''StubGlobalPooling3d Module. + ''' + def to_real_layer(self): + return GlobalAvgPool3d() + + +class TorchConcatenate(nn.Module): + '''TorchConcatenate Module. + ''' + def forward(self, input_list): + return torch.cat(input_list, dim=1) + + +class TorchAdd(nn.Module): + '''TorchAdd Module. + ''' + def forward(self, input_list): + return input_list[0] + input_list[1] + + +class TorchFlatten(nn.Module): + '''TorchFlatten Module. + ''' + def forward(self, input_tensor): + return input_tensor.view(input_tensor.size(0), -1) + +def keras_dropout(layer, rate): + '''keras dropout layer. + ''' + + from keras import layers + + input_dim = len(layer.input.shape) + if input_dim == 2: + return layers.SpatialDropout1D(rate) + elif input_dim == 3: + return layers.SpatialDropout2D(rate) + elif input_dim == 4: + return layers.SpatialDropout3D(rate) + else: + return layers.Dropout(rate) + + +def to_real_keras_layer(layer): + ''' real keras layer. + ''' + from keras import layers + + if is_layer(layer, "Dense"): + return layers.Dense(layer.units, input_shape=(layer.input_units,)) + if is_layer(layer, "Conv"): + return layers.Conv2D( + layer.filters, + layer.kernel_size, + input_shape=layer.input.shape, + padding="same", + ) # padding + if is_layer(layer, "Pooling"): + return layers.MaxPool2D(2) + if is_layer(layer, "BatchNormalization"): + return layers.BatchNormalization(input_shape=layer.input.shape) + if is_layer(layer, "Concatenate"): + return layers.Concatenate() + if is_layer(layer, "Add"): + return layers.Add() + if is_layer(layer, "Dropout"): + return keras_dropout(layer, layer.rate) + if is_layer(layer, "ReLU"): + return layers.Activation("relu") + if is_layer(layer, "Softmax"): + return layers.Activation("softmax") + if is_layer(layer, "Flatten"): + return layers.Flatten() + if is_layer(layer, "GlobalAveragePooling"): + return layers.GlobalAveragePooling2D() + + +def is_layer(layer, layer_type): + '''judge the layer type. + Returns: + boolean -- True or False + ''' + + if layer_type == "Input": + return isinstance(layer, StubInput) + elif layer_type == "Conv": + return isinstance(layer, StubConv) + elif layer_type == "Dense": + return isinstance(layer, (StubDense,)) + elif layer_type == "BatchNormalization": + return isinstance(layer, (StubBatchNormalization,)) + elif layer_type == "Concatenate": + return isinstance(layer, (StubConcatenate,)) + elif layer_type == "Add": + return isinstance(layer, (StubAdd,)) + elif layer_type == "Pooling": + return isinstance(layer, StubPooling) + elif layer_type == "Dropout": + return isinstance(layer, (StubDropout,)) + elif layer_type == "Softmax": + return isinstance(layer, (StubSoftmax,)) + elif layer_type == "ReLU": + return isinstance(layer, (StubReLU,)) + elif layer_type == "Flatten": + return isinstance(layer, (StubFlatten,)) + elif layer_type == "GlobalAveragePooling": + return isinstance(layer, StubGlobalPooling) + + +def layer_description_extractor(layer, node_to_id): + '''get layer description. + ''' + + layer_input = layer.input + layer_output = layer.output + if layer_input is not None: + if isinstance(layer_input, Iterable): + layer_input = list(map(lambda x: node_to_id[x], layer_input)) + else: + layer_input = node_to_id[layer_input] + + if layer_output is not None: + layer_output = node_to_id[layer_output] + + if isinstance(layer, StubConv): + return ( + type(layer).__name__, + layer_input, + layer_output, + layer.input_channel, + layer.filters, + layer.kernel_size, + layer.stride, + layer.padding, + ) + elif isinstance(layer, (StubDense,)): + return [ + type(layer).__name__, + layer_input, + layer_output, + layer.input_units, + layer.units, + ] + elif isinstance(layer, (StubBatchNormalization,)): + return (type(layer).__name__, layer_input, layer_output, layer.num_features) + elif isinstance(layer, (StubDropout,)): + return (type(layer).__name__, layer_input, layer_output, layer.rate) + elif isinstance(layer, StubPooling): + return ( + type(layer).__name__, + layer_input, + layer_output, + layer.kernel_size, + layer.stride, + layer.padding, + ) + else: + return (type(layer).__name__, layer_input, layer_output) + + +def layer_description_builder(layer_information, id_to_node): + '''build layer from description. + ''' + # pylint: disable=W0123 + layer_type = layer_information[0] + + layer_input_ids = layer_information[1] + if isinstance(layer_input_ids, Iterable): + layer_input = list(map(lambda x: id_to_node[x], layer_input_ids)) + else: + layer_input = id_to_node[layer_input_ids] + layer_output = id_to_node[layer_information[2]] + if layer_type.startswith("StubConv"): + input_channel = layer_information[3] + filters = layer_information[4] + kernel_size = layer_information[5] + stride = layer_information[6] + return eval(layer_type)( + input_channel, filters, kernel_size, stride, layer_input, layer_output + ) + elif layer_type.startswith("StubDense"): + input_units = layer_information[3] + units = layer_information[4] + return eval(layer_type)(input_units, units, layer_input, layer_output) + elif layer_type.startswith("StubBatchNormalization"): + num_features = layer_information[3] + return eval(layer_type)(num_features, layer_input, layer_output) + elif layer_type.startswith("StubDropout"): + rate = layer_information[3] + return eval(layer_type)(rate, layer_input, layer_output) + elif layer_type.startswith("StubPooling"): + kernel_size = layer_information[3] + stride = layer_information[4] + padding = layer_information[5] + return eval(layer_type)(kernel_size, stride, padding, layer_input, layer_output) + else: + return eval(layer_type)(layer_input, layer_output) + + +def layer_width(layer): + '''get layer width. + ''' + + if is_layer(layer, "Dense"): + return layer.units + if is_layer(layer, "Conv"): + return layer.filters + raise TypeError("The layer should be either Dense or Conv layer.") + + +def set_torch_weight_to_stub(torch_layer, stub_layer): + stub_layer.import_weights(torch_layer) + + +def set_keras_weight_to_stub(keras_layer, stub_layer): + stub_layer.import_weights_keras(keras_layer) + + +def set_stub_weight_to_torch(stub_layer, torch_layer): + stub_layer.export_weights(torch_layer) + + +def set_stub_weight_to_keras(stub_layer, keras_layer): + stub_layer.export_weights_keras(keras_layer) + + +def get_conv_class(n_dim): + conv_class_list = [StubConv1d, StubConv2d, StubConv3d] + return conv_class_list[n_dim - 1] + + +def get_dropout_class(n_dim): + dropout_class_list = [StubDropout1d, StubDropout2d, StubDropout3d] + return dropout_class_list[n_dim - 1] + + +def get_global_avg_pooling_class(n_dim): + global_avg_pooling_class_list = [ + StubGlobalPooling1d, + StubGlobalPooling2d, + StubGlobalPooling3d, + ] + return global_avg_pooling_class_list[n_dim - 1] + + +def get_pooling_class(n_dim): + pooling_class_list = [StubPooling1d, StubPooling2d, StubPooling3d] + return pooling_class_list[n_dim - 1] + + +def get_batch_norm_class(n_dim): + batch_norm_class_list = [ + StubBatchNormalization1d, + StubBatchNormalization2d, + StubBatchNormalization3d, + ] + return batch_norm_class_list[n_dim - 1] + + +def get_n_dim(layer): + if isinstance(layer, ( + StubConv1d, + StubDropout1d, + StubGlobalPooling1d, + StubPooling1d, + StubBatchNormalization1d, + )): + return 1 + if isinstance(layer, ( + StubConv2d, + StubDropout2d, + StubGlobalPooling2d, + StubPooling2d, + StubBatchNormalization2d, + )): + return 2 + if isinstance(layer, ( + StubConv3d, + StubDropout3d, + StubGlobalPooling3d, + StubPooling3d, + StubBatchNormalization3d, + )): + return 3 + return -1 diff --git a/src/sdk/pynni/nni/networkmorphism_tuner/networkmorphism_tuner.py b/src/sdk/pynni/nni/networkmorphism_tuner/networkmorphism_tuner.py new file mode 100644 index 0000000000..33e273db18 --- /dev/null +++ b/src/sdk/pynni/nni/networkmorphism_tuner/networkmorphism_tuner.py @@ -0,0 +1,273 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# +# MIT License +# +# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and +# associated documentation files (the "Software"), to deal in the Software without restriction, +# including without limitation the rights to use, copy, modify, merge, publish, distribute, +# sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all copies or +# substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT +# NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, +# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT +# OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +# ================================================================================================== + +import logging +import os + + +from nni.tuner import Tuner +from nni.networkmorphism_tuner.bayesian import BayesianOptimizer +from nni.networkmorphism_tuner.nn import CnnGenerator, MlpGenerator +from nni.networkmorphism_tuner.utils import Constant, OptimizeMode + +from nni.networkmorphism_tuner.graph import graph_to_json, json_to_graph + +logger = logging.getLogger("NetworkMorphism_AutoML") + + +class NetworkMorphismTuner(Tuner): + """NetworkMorphismTuner is a tuner which using network morphism techniques.""" + + def __init__( + self, + task="cv", + input_width=32, + input_channel=3, + n_output_node=10, + algorithm_name="Bayesian", + optimize_mode="maximize", + path="model_path", + verbose=True, + beta=Constant.BETA, + t_min=Constant.T_MIN, + max_model_size=Constant.MAX_MODEL_SIZE, + default_model_len=Constant.MODEL_LEN, + default_model_width=Constant.MODEL_WIDTH, + ): + """ initilizer of the NetworkMorphismTuner. + Keyword Arguments: + task {str} -- [task mode, such as "cv","common" etc.] (default: {"cv"}) + input_width {int} -- [input sample shape] (default: {32}) + input_channel {int} -- [input sample shape] (default: {3}) + n_output_node {int} -- [output node number] (default: {10}) + algorithm_name {str} -- [algorithm name used in the network morphism] (default: {"Bayesian"}) + optimize_mode {str} -- [optimize mode "minimize" or "maximize"] (default: {"minimize"}) + path {str} -- [default mode path to save the model file] (default: {"model_path"}) + verbose {bool} -- [verbose to print the log] (default: {True}) + beta {float} -- [The beta in acquisition function. (refer to our paper)] (default: {Constant.BETA}) + t_min {float} -- [The minimum temperature for simulated annealing.] (default: {Constant.T_MIN}) + max_model_size {int} -- [max model size to the graph] (default: {Constant.MAX_MODEL_SIZE}) + default_model_len {int} -- [default model length] (default: {Constant.MODEL_LEN}) + default_model_width {int} -- [default model width] (default: {Constant.MODEL_WIDTH}) + """ + + if not os.path.exists(path): + os.makedirs(path) + self.path = os.path.join(os.getcwd(), path) + if task == "cv": + self.generators = [CnnGenerator] + elif task == "common": + self.generators = [MlpGenerator] + else: + raise NotImplementedError('{} task not supported in List ["cv","common"]') + + self.n_classes = n_output_node + self.input_shape = (input_width, input_width, input_channel) + + self.t_min = t_min + self.beta = beta + self.algorithm_name = algorithm_name + self.optimize_mode = OptimizeMode(optimize_mode) + self.json = None + self.total_data = {} + self.verbose = verbose + self.model_count = 0 + + self.bo = BayesianOptimizer(self, self.t_min, self.optimize_mode, self.beta) + self.training_queue = [] + # self.x_queue = [] + # self.y_queue = [] + self.descriptors = [] + self.history = [] + + self.max_model_size = max_model_size + self.default_model_len = default_model_len + self.default_model_width = default_model_width + + self.search_space = dict() + + def update_search_space(self, search_space): + """ + Update search space definition in tuner by search_space in neural architecture. + """ + self.search_space = search_space + + def generate_parameters(self, parameter_id): + """ + Returns a set of trial neural architecture, as a serializable object. + parameter_id : int + """ + if not self.history: + self.init_search() + + new_father_id = None + generated_graph = None + if not self.training_queue: + new_father_id, generated_graph = self.generate() + new_model_id = self.model_count + self.model_count += 1 + self.training_queue.append((generated_graph, new_father_id, new_model_id)) + self.descriptors.append(generated_graph.extract_descriptor()) + + graph, father_id, model_id = self.training_queue.pop(0) + + # from graph to json + json_model_path = os.path.join(self.path, str(model_id) + ".json") + json_out = graph_to_json(graph, json_model_path) + self.total_data[parameter_id] = (json_out, father_id, model_id) + + return json_out + + def receive_trial_result(self, parameter_id, parameters, value): + """ Record an observation of the objective function. + Arguments: + parameter_id : int + parameters : dict of parameters + value: final metrics of the trial, including reward + Raises: + RuntimeError -- Received parameter_id not in total_data. + """ + + reward = self.extract_scalar_reward(value) + + if parameter_id not in self.total_data: + raise RuntimeError("Received parameter_id not in total_data.") + + (_, father_id, model_id) = self.total_data[parameter_id] + + graph = self.bo.searcher.load_model_by_id(model_id) + + # to use the value and graph + self.add_model(reward, model_id) + self.update(father_id, graph, reward, model_id) + + def init_search(self): + """Call the generators to generate the initial architectures for the search.""" + if self.verbose: + logger.info("Initializing search.") + for generator in self.generators: + graph = generator(self.n_classes, self.input_shape).generate( + self.default_model_len, self.default_model_width + ) + model_id = self.model_count + self.model_count += 1 + self.training_queue.append((graph, -1, model_id)) + self.descriptors.append(graph.extract_descriptor()) + + if self.verbose: + logger.info("Initialization finished.") + + def generate(self): + """Generate the next neural architecture. + Returns: + other_info: Anything to be saved in the training queue together with the architecture. + generated_graph: An instance of Graph. + """ + generated_graph, new_father_id = self.bo.generate(self.descriptors) + if new_father_id is None: + new_father_id = 0 + generated_graph = self.generators[0]( + self.n_classes, self.input_shape + ).generate(self.default_model_len, self.default_model_width) + + return new_father_id, generated_graph + + def update(self, other_info, graph, metric_value, model_id): + """ Update the controller with evaluation result of a neural architecture. + Args: + other_info: Anything. In our case it is the father ID in the search tree. + graph: An instance of Graph. The trained neural architecture. + metric_value: The final evaluated metric value. + model_id: An integer. + """ + father_id = other_info + self.bo.fit([graph.extract_descriptor()], [metric_value]) + self.bo.add_child(father_id, model_id) + + def add_model(self, metric_value, model_id): + """ Add model to the history, x_queue and y_queue + + Arguments: + metric_value: int --metric_value + graph: dict -- graph + model_id: int -- model_id + + Returns: + model dict + """ + + if self.verbose: + logger.info("Saving model.") + + # Update best_model text file + ret = {"model_id": model_id, "metric_value": metric_value} + self.history.append(ret) + if model_id == self.get_best_model_id(): + file = open(os.path.join(self.path, "best_model.txt"), "w") + file.write("best model: " + str(model_id)) + file.close() + + # descriptor = graph.extract_descriptor() + # self.x_queue.append(descriptor) + # self.y_queue.append(metric_value) + return ret + + def get_best_model_id(self): + """ Get the best model_id from history using the metric value + Returns: + int -- the best model_id + """ + + if self.optimize_mode is OptimizeMode.Maximize: + return max(self.history, key=lambda x: x["metric_value"])["model_id"] + return min(self.history, key=lambda x: x["metric_value"])["model_id"] + + def load_model_by_id(self, model_id): + """Get the model by model_id + Arguments: + model_id {int} -- model index + Returns: + Graph -- the model graph representation + """ + + with open(os.path.join(self.path, str(model_id) + ".json")) as fin: + json_str = fin.read().replace("\n", "") + + load_model = json_to_graph(json_str) + return load_model + + def load_best_model(self): + """ Get the best model by model id + Returns: + Graph -- the best model graph representation + """ + return self.load_model_by_id(self.get_best_model_id()) + + def get_metric_value_by_id(self, model_id): + """ Get the model metric valud by its model_id + Arguments: + model_id {int} -- model index + Returns: + float -- the model metric + """ + for item in self.history: + if item["model_id"] == model_id: + return item["metric_value"] + return None diff --git a/src/sdk/pynni/nni/networkmorphism_tuner/nn.py b/src/sdk/pynni/nni/networkmorphism_tuner/nn.py new file mode 100644 index 0000000000..363c06be5a --- /dev/null +++ b/src/sdk/pynni/nni/networkmorphism_tuner/nn.py @@ -0,0 +1,166 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# +# MIT License +# +# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and +# associated documentation files (the "Software"), to deal in the Software without restriction, +# including without limitation the rights to use, copy, modify, merge, publish, distribute, +# sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all copies or +# substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT +# NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, +# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT +# OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +# ================================================================================================== + +from abc import abstractmethod + +from nni.networkmorphism_tuner.graph import Graph +from nni.networkmorphism_tuner.layers import (StubDense, StubDropout1d, + StubReLU, get_batch_norm_class, + get_conv_class, + get_dropout_class, + get_global_avg_pooling_class, + get_pooling_class) +from nni.networkmorphism_tuner.utils import Constant + + +class NetworkGenerator: + """The base class for generating a network. + It can be used to generate a CNN or Multi-Layer Perceptron. + Attributes: + n_output_node: Number of output nodes in the network. + input_shape: A tuple to represent the input shape. + """ + + def __init__(self, n_output_node, input_shape): + self.n_output_node = n_output_node + self.input_shape = input_shape + + @abstractmethod + def generate(self, model_len, model_width): + pass + + +class CnnGenerator(NetworkGenerator): + """A class to generate CNN. + Attributes: + n_dim: `len(self.input_shape) - 1` + conv: A class that represents `(n_dim-1)` dimensional convolution. + dropout: A class that represents `(n_dim-1)` dimensional dropout. + global_avg_pooling: A class that represents `(n_dim-1)` dimensional Global Average Pooling. + pooling: A class that represents `(n_dim-1)` dimensional pooling. + batch_norm: A class that represents `(n_dim-1)` dimensional batch normalization. + """ + + def __init__(self, n_output_node, input_shape): + super(CnnGenerator, self).__init__(n_output_node, input_shape) + self.n_dim = len(self.input_shape) - 1 + if len(self.input_shape) > 4: + raise ValueError("The input dimension is too high.") + if len(self.input_shape) < 2: + raise ValueError("The input dimension is too low.") + self.conv = get_conv_class(self.n_dim) + self.dropout = get_dropout_class(self.n_dim) + self.global_avg_pooling = get_global_avg_pooling_class(self.n_dim) + self.pooling = get_pooling_class(self.n_dim) + self.batch_norm = get_batch_norm_class(self.n_dim) + + def generate(self, model_len=None, model_width=None): + """Generates a CNN. + Args: + model_len: An integer. Number of convolutional layers. + model_width: An integer. Number of filters for the convolutional layers. + Returns: + An instance of the class Graph. Represents the neural architecture graph of the generated model. + """ + + if model_len is None: + model_len = Constant.MODEL_LEN + if model_width is None: + model_width = Constant.MODEL_WIDTH + pooling_len = int(model_len / 4) + graph = Graph(self.input_shape, False) + temp_input_channel = self.input_shape[-1] + output_node_id = 0 + stride = 1 + for i in range(model_len): + output_node_id = graph.add_layer(StubReLU(), output_node_id) + output_node_id = graph.add_layer( + self.batch_norm(graph.node_list[output_node_id].shape[-1]), output_node_id + ) + output_node_id = graph.add_layer( + self.conv(temp_input_channel, model_width, kernel_size=3, stride=stride), + output_node_id, + ) + temp_input_channel = model_width + if pooling_len == 0 or ((i + 1) % pooling_len == 0 and i != model_len - 1): + output_node_id = graph.add_layer(self.pooling(), output_node_id) + + output_node_id = graph.add_layer(self.global_avg_pooling(), output_node_id) + output_node_id = graph.add_layer( + self.dropout(Constant.CONV_DROPOUT_RATE), output_node_id + ) + output_node_id = graph.add_layer( + StubDense(graph.node_list[output_node_id].shape[0], model_width), + output_node_id, + ) + output_node_id = graph.add_layer(StubReLU(), output_node_id) + graph.add_layer(StubDense(model_width, self.n_output_node), output_node_id) + return graph + + +class MlpGenerator(NetworkGenerator): + """A class to generate Multi-Layer Perceptron. + """ + + def __init__(self, n_output_node, input_shape): + """Initialize the instance. + Args: + n_output_node: An integer. Number of output nodes in the network. + input_shape: A tuple. Input shape of the network. If it is 1D, ensure the value is appended by a comma + in the tuple. + """ + super(MlpGenerator, self).__init__(n_output_node, input_shape) + if len(self.input_shape) > 1: + raise ValueError("The input dimension is too high.") + + def generate(self, model_len=None, model_width=None): + """Generates a Multi-Layer Perceptron. + Args: + model_len: An integer. Number of hidden layers. + model_width: An integer or a list of integers of length `model_len`. If it is a list, it represents the + number of nodes in each hidden layer. If it is an integer, all hidden layers have nodes equal to this + value. + Returns: + An instance of the class Graph. Represents the neural architecture graph of the generated model. + """ + if model_len is None: + model_len = Constant.MODEL_LEN + if model_width is None: + model_width = Constant.MODEL_WIDTH + if isinstance(model_width, list) and not len(model_width) == model_len: + raise ValueError("The length of 'model_width' does not match 'model_len'") + elif isinstance(model_width, int): + model_width = [model_width] * model_len + + graph = Graph(self.input_shape, False) + output_node_id = 0 + n_nodes_prev_layer = self.input_shape[0] + for width in model_width: + output_node_id = graph.add_layer( + StubDense(n_nodes_prev_layer, width), output_node_id + ) + output_node_id = graph.add_layer( + StubDropout1d(Constant.MLP_DROPOUT_RATE), output_node_id + ) + output_node_id = graph.add_layer(StubReLU(), output_node_id) + n_nodes_prev_layer = width + + graph.add_layer(StubDense(n_nodes_prev_layer, self.n_output_node), output_node_id) + return graph diff --git a/src/sdk/pynni/nni/networkmorphism_tuner/utils.py b/src/sdk/pynni/nni/networkmorphism_tuner/utils.py new file mode 100644 index 0000000000..6e4970e5b0 --- /dev/null +++ b/src/sdk/pynni/nni/networkmorphism_tuner/utils.py @@ -0,0 +1,50 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# +# MIT License +# +# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and +# associated documentation files (the "Software"), to deal in the Software without restriction, +# including without limitation the rights to use, copy, modify, merge, publish, distribute, +# sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all copies or +# substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT +# NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, +# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT +# OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +# ================================================================================================== + +from enum import Enum, unique + +@unique +class OptimizeMode(Enum): + """ + Oprimize Mode class + """ + + Minimize = "minimize" + Maximize = "maximize" + +class Constant: + '''Constant for the Tuner. + ''' + MAX_LAYERS = 100 + N_NEIGHBOURS = 8 + MAX_MODEL_SIZE = 1 << 24 + KERNEL_LAMBDA = 1.0 + BETA = 2.576 + MLP_MODEL_LEN = 3 + MLP_MODEL_WIDTH = 5 + MODEL_LEN = 3 + MODEL_WIDTH = 64 + POOLING_KERNEL_SIZE = 2 + DENSE_DROPOUT_RATE = 0.5 + CONV_DROPOUT_RATE = 0.25 + MLP_DROPOUT_RATE = 0.25 + CONV_BLOCK_DISTANCE = 2 + BATCH_SIZE = 128 + T_MIN = 0.0001 diff --git a/src/sdk/pynni/tests/test_assessor.py b/src/sdk/pynni/tests/test_assessor.py index fd4e4b7e4f..6898e7d3b4 100644 --- a/src/sdk/pynni/tests/test_assessor.py +++ b/src/sdk/pynni/tests/test_assessor.py @@ -90,4 +90,4 @@ def test_assessor(self): if __name__ == '__main__': - main() + main() \ No newline at end of file diff --git a/src/sdk/pynni/tests/test_multi_phase_tuner.py b/src/sdk/pynni/tests/test_multi_phase_tuner.py index cf4737fd04..5bf8eda3c5 100644 --- a/src/sdk/pynni/tests/test_multi_phase_tuner.py +++ b/src/sdk/pynni/tests/test_multi_phase_tuner.py @@ -1,3 +1,24 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# +# MIT License +# +# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and +# associated documentation files (the "Software"), to deal in the Software without restriction, +# including without limitation the rights to use, copy, modify, merge, publish, distribute, +# sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all copies or +# substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT +# NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, +# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT +# OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +# ================================================================================================== + + import logging import random from io import BytesIO diff --git a/src/sdk/pynni/tests/test_networkmorphism_tuner.py b/src/sdk/pynni/tests/test_networkmorphism_tuner.py new file mode 100644 index 0000000000..09bbe820a9 --- /dev/null +++ b/src/sdk/pynni/tests/test_networkmorphism_tuner.py @@ -0,0 +1,201 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# +# MIT License +# +# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and +# associated documentation files (the "Software"), to deal in the Software without restriction, +# including without limitation the rights to use, copy, modify, merge, publish, distribute, +# sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all copies or +# substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT +# NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, +# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT +# OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +# ================================================================================================== + + +import json +from unittest import TestCase, main +from copy import deepcopy +import torch + +from nni.networkmorphism_tuner.graph import graph_to_json, json_to_graph +from nni.networkmorphism_tuner.graph_transformer import ( + to_deeper_graph, + to_skip_connection_graph, + to_wider_graph, +) +from nni.networkmorphism_tuner.layers import layer_description_extractor +from nni.networkmorphism_tuner.networkmorphism_tuner import NetworkMorphismTuner +from nni.networkmorphism_tuner.nn import CnnGenerator + + +class NetworkMorphismTestCase(TestCase): + """ unittest for NetworkMorphismTuner + """ + + def test_graph_json_transform(self): + """ unittest for graph_json_transform function + """ + + graph_init = CnnGenerator(10, (32, 32, 3)).generate() + graph_init = to_wider_graph(deepcopy(graph_init)) + graph_init = to_deeper_graph(deepcopy(graph_init)) + graph_init = to_skip_connection_graph(deepcopy(graph_init)) + json_out = graph_to_json(graph_init, "temp.json") + + graph_recover = json_to_graph(json_out) + + # compare all data in graph + self.assertEqual(graph_init.input_shape, graph_recover.input_shape) + self.assertEqual(graph_init.weighted, graph_recover.weighted) + self.assertEqual( + graph_init.layer_id_to_input_node_ids, + graph_recover.layer_id_to_input_node_ids, + ) + self.assertEqual(graph_init.adj_list, graph_recover.adj_list) + self.assertEqual(graph_init.reverse_adj_list, graph_recover.reverse_adj_list) + self.assertEqual( + len(graph_init.operation_history), len(graph_recover.operation_history) + ) + self.assertEqual(graph_init.n_dim, graph_recover.n_dim) + self.assertEqual(graph_init.conv, graph_recover.conv) + self.assertEqual(graph_init.batch_norm, graph_recover.batch_norm) + self.assertEqual(graph_init.vis, graph_recover.vis) + + node_list_init = [node.shape for node in graph_init.node_list] + node_list_recover = [node.shape for node in graph_recover.node_list] + self.assertEqual(node_list_init, node_list_recover) + self.assertEqual(len(graph_init.node_to_id), len(graph_recover.node_to_id)) + layer_list_init = [ + layer_description_extractor(item, graph_init.node_to_id) + for item in graph_init.layer_list + ] + layer_list_recover = [ + layer_description_extractor(item, graph_recover.node_to_id) + for item in graph_recover.layer_list + ] + self.assertEqual(layer_list_init, layer_list_recover) + + node_to_id_init = [graph_init.node_to_id[node] for node in graph_init.node_list] + node_to_id_recover = [ + graph_recover.node_to_id[node] for node in graph_recover.node_list + ] + self.assertEqual(node_to_id_init, node_to_id_recover) + + layer_to_id_init = [ + graph_init.layer_to_id[layer] for layer in graph_init.layer_list + ] + layer_to_id_recover = [ + graph_recover.layer_to_id[layer] for layer in graph_recover.layer_list + ] + self.assertEqual(layer_to_id_init, layer_to_id_recover) + + def test_to_wider_graph(self): + """ unittest for to_wider_graph function + """ + + graph_init = CnnGenerator(10, (32, 32, 3)).generate() + json_out = graph_to_json(graph_init, "temp.json") + graph_recover = json_to_graph(json_out) + wider_graph = to_wider_graph(deepcopy(graph_recover)) + model = wider_graph.produce_torch_model() + out = model(torch.ones(1, 3, 32, 32)) + self.assertEqual(out.shape, torch.Size([1, 10])) + + def test_to_deeper_graph(self): + """ unittest for to_deeper_graph function + """ + + graph_init = CnnGenerator(10, (32, 32, 3)).generate() + json_out = graph_to_json(graph_init, "temp.json") + graph_recover = json_to_graph(json_out) + deeper_graph = to_wider_graph(deepcopy(graph_recover)) + model = deeper_graph.produce_torch_model() + out = model(torch.ones(1, 3, 32, 32)) + self.assertEqual(out.shape, torch.Size([1, 10])) + + def test_to_skip_connection_graph(self): + """ unittest for to_skip_connection_graph function + """ + + graph_init = CnnGenerator(10, (32, 32, 3)).generate() + json_out = graph_to_json(graph_init, "temp.json") + graph_recover = json_to_graph(json_out) + skip_connection_graph = to_wider_graph(deepcopy(graph_recover)) + model = skip_connection_graph.produce_torch_model() + out = model(torch.ones(1, 3, 32, 32)) + self.assertEqual(out.shape, torch.Size([1, 10])) + + def test_generate_parameters(self): + """ unittest for generate_parameters function + """ + + tuner = NetworkMorphismTuner() + model_json = tuner.generate_parameters(0) + model_json = json.loads(model_json) + self.assertEqual(model_json["input_shape"], [32, 32, 3]) + self.assertEqual(tuner.total_data[0][1:], (-1, 0)) + + def test_receive_trial_result(self): + """ unittest for receive_trial_result function + """ + + tuner = NetworkMorphismTuner() + model_json = tuner.generate_parameters(0) + tuner.receive_trial_result(0, {}, 0.7) + (json_out, father_id, model_id) = tuner.total_data[0] + + self.assertEqual(father_id, -1) + self.assertEqual(model_json, json_out) + + ret = {"model_id": 0, "metric_value": 0.7} + self.assertEqual(tuner.bo.search_tree.adj_list[model_id], []) + self.assertEqual(tuner.history[-1], ret) + + def test_update_search_space(self): + """ unittest for update_search_space function + """ + + tuner = NetworkMorphismTuner() + self.assertEqual(tuner.search_space, dict()) + tuner.update_search_space("Test") + self.assertEqual(tuner.search_space, "Test") + + def test_init_search(self): + """ unittest for init_search function + """ + + tuner = NetworkMorphismTuner() + self.assertEqual(tuner.history, []) + tuner.init_search() + self.assertEqual(tuner.model_count, 1) + self.assertEqual(len(tuner.training_queue), 1) + self.assertEqual(len(tuner.descriptors), 1) + + def test_add_model(self): + """ unittest for add_model function + """ + + tuner = NetworkMorphismTuner() + tuner.add_model(0.8, 0) + ret = {"model_id": 0, "metric_value": 0.8} + self.assertEqual(tuner.history[-1], ret) + + def test_get_best_model_id(self): + """ unittest for get_best_model_id function + """ + + tuner = NetworkMorphismTuner() + tuner.add_model(0.8, 0) + tuner.add_model(0.9, 1) + self.assertEqual(tuner.get_best_model_id(), 1) + + +if __name__ == "__main__": + main() diff --git a/src/sdk/pynni/tests/test_trial.py b/src/sdk/pynni/tests/test_trial.py index f7f854123b..b228d6add3 100644 --- a/src/sdk/pynni/tests/test_trial.py +++ b/src/sdk/pynni/tests/test_trial.py @@ -81,4 +81,4 @@ def _test_report_final_result(self, in_, out): if __name__ == '__main__': - main() + main() \ No newline at end of file diff --git a/src/sdk/pynni/tests/test_tuner.py b/src/sdk/pynni/tests/test_tuner.py index 9be308e071..8dc9aea810 100644 --- a/src/sdk/pynni/tests/test_tuner.py +++ b/src/sdk/pynni/tests/test_tuner.py @@ -23,7 +23,6 @@ from nni.protocol import CommandType, send, receive from nni.tuner import Tuner from nni.msg_dispatcher import MsgDispatcher - from io import BytesIO import json from unittest import TestCase, main diff --git a/tools/nni_cmd/config_schema.py b/tools/nni_cmd/config_schema.py index 49630b84e8..e9f2e4c259 100644 --- a/tools/nni_cmd/config_schema.py +++ b/tools/nni_cmd/config_schema.py @@ -58,6 +58,16 @@ },{ 'builtinTunerName': Or('BatchTuner', 'GridSearch'), Optional('gpuNum'): And(int, lambda x: 0 <= x <= 99999), +},{ + 'builtinTunerName': 'NetworkMorphism', + 'classArgs': { + Optional('optimize_mode'): Or('maximize', 'minimize'), + Optional('task'): And(str, lambda x: x in ['cv','nlp','common']), + Optional('input_width'): int, + Optional('input_channel'): int, + Optional('n_output_node'): int, + }, + Optional('gpuNum'): And(int, lambda x: 0 <= x <= 99999), },{ 'codeDir': os.path.exists, 'classFileName': str, diff --git a/tools/nni_cmd/launcher_utils.py b/tools/nni_cmd/launcher_utils.py index ee6aeaaa41..5a6d287339 100644 --- a/tools/nni_cmd/launcher_utils.py +++ b/tools/nni_cmd/launcher_utils.py @@ -191,6 +191,8 @@ def validate_annotation_content(experiment_config, spec_key, builtin_name): exit(1) else: # validate searchSpaceFile + if experiment_config[spec_key].get(builtin_name) == 'NetworkMorphism': + return if experiment_config[spec_key].get(builtin_name): if experiment_config.get('searchSpacePath') is None: print_error('Please set searchSpacePath!')