From 362d9140c01d89b95e9e762196a6d04b54ea7152 Mon Sep 17 00:00:00 2001 From: qingqing01 Date: Tue, 18 Aug 2020 04:19:04 +0000 Subject: [PATCH] Update API to 2.0 test=develop --- python/paddle/incubate/hapi/model.py | 52 ++++++++++++++-------------- python/paddle/metric/metrics.py | 27 ++++++++------- 2 files changed, 40 insertions(+), 39 deletions(-) diff --git a/python/paddle/incubate/hapi/model.py b/python/paddle/incubate/hapi/model.py index 6e89ad179494d..32d069c525616 100644 --- a/python/paddle/incubate/hapi/model.py +++ b/python/paddle/incubate/hapi/model.py @@ -47,7 +47,7 @@ ] -class Input(fluid.dygraph.Layer): +class Input(paddle.nn.Layer): """ Define inputs the model. @@ -665,14 +665,14 @@ class Model(object): """ An Model object is network with training and inference features. Dynamic graph and static graph are supported at the same time, - switched by `fluid.enable_dygraph()`. The usage is as follows. + switched by `paddle.disable_static()`. The usage is as follows. But note, the switching between dynamic and static should be before instantiating a Model. The input description, i.e, hapi.Input, must be required for static graph. Args: - network (fluid.dygraph.Layer): The network is an instance of - fluid.dygraph.Layer. + network (paddle.nn.Layer): The network is an instance of + paddle.nn.Layer. inputs (Input|list|dict|None): `inputs`, entry points of network, could be a Input layer, or lits of Input layers, or dict (name: Input), or None. For static graph, @@ -690,7 +690,7 @@ class Model(object): import paddle.fluid as fluid import paddle.incubate.hapi as hapi - class MyNet(fluid.dygraph.Layer): + class MyNet(paddle.nn.Layer): def __init__(self, classifier_act=None): super(MyNet, self).__init__() self._fc1 = fluid.dygraph.Linear(784, 200, act=classifier_act) @@ -701,7 +701,7 @@ def forward(self, x): device = hapi.set_device('gpu') # if use static graph, do not set - fluid.enable_dygraph(device) + paddle.disable_static(device) # inputs and labels are not required for dynamic graph. input = hapi.Input('x', [None, 784], 'float32') @@ -775,7 +775,7 @@ def train_batch(self, inputs, labels=None): import paddle.fluid as fluid import paddle.incubate.hapi as hapi - class MyNet(fluid.dygraph.Layer): + class MyNet(paddle.nn.Layer): def __init__(self, classifier_act=None): super(MyNet, self).__init__() self._fc = fluid.dygraph.Linear(784, 10, act=classifier_act) @@ -785,7 +785,7 @@ def forward(self, x): return y device = hapi.set_device('gpu') - fluid.enable_dygraph(device) + paddle.disable_static(device) input = hapi.Input('x', [None, 784], 'float32') label = hapi.Input('label', [None, 1], 'int64') @@ -824,7 +824,7 @@ def eval_batch(self, inputs, labels=None): import paddle.fluid as fluid import paddle.incubate.hapi as hapi - class MyNet(fluid.dygraph.Layer): + class MyNet(paddle.nn.Layer): def __init__(self, classifier_act=None): super(MyNet, self).__init__() self._fc = fluid.dygraph.Linear(784, 10, act=classifier_act) @@ -834,7 +834,7 @@ def forward(self, x): return y device = hapi.set_device('gpu') - fluid.enable_dygraph(device) + paddle.disable_static(device) input = hapi.Input('x', [None, 784], 'float32') label = hapi.Input('label', [None, 1], 'int64') @@ -870,7 +870,7 @@ def test_batch(self, inputs): import paddle.fluid as fluid import paddle.incubate.hapi as hapi - class MyNet(fluid.dygraph.Layer): + class MyNet(paddle.nn.Layer): def __init__(self): super(MyNet, self).__init__() self._fc = fluid.dygraph.Linear(784, 1, act='softmax') @@ -879,7 +879,7 @@ def forward(self, x): return y device = hapi.set_device('gpu') - fluid.enable_dygraph(device) + paddle.disable_static(device) model = hapi.Model(MyNet()) model.prepare() @@ -918,7 +918,7 @@ def save(self, path): import paddle.fluid as fluid import paddle.incubate.hapi as hapi - class MyNet(fluid.dygraph.Layer): + class MyNet(paddle.nn.Layer): def __init__(self): super(MyNet, self).__init__() self._fc = fluid.dygraph.Linear(784, 1, act='softmax') @@ -927,7 +927,7 @@ def forward(self, x): return y device = hapi.set_device('cpu') - fluid.enable_dygraph(device) + paddle.disable_static(device) model = hapi.Model(MyNet()) model.save('checkpoint/test') """ @@ -970,7 +970,7 @@ def load(self, path, skip_mismatch=False, reset_optimizer=False): import paddle.fluid as fluid import paddle.incubate.hapi as hapi - class MyNet(fluid.dygraph.Layer): + class MyNet(paddle.nn.Layer): def __init__(self): super(MyNet, self).__init__() self._fc = fluid.dygraph.Linear(784, 1, act='softmax') @@ -979,7 +979,7 @@ def forward(self, x): return y device = hapi.set_device('cpu') - fluid.enable_dygraph(device) + paddle.disable_static(device) model = hapi.Model(MyNet()) model.load('checkpoint/test') """ @@ -1045,7 +1045,7 @@ def parameters(self, *args, **kwargs): import paddle.fluid as fluid from paddle.incubate.hapi import Model - class MyNet(fluid.dygraph.Layer): + class MyNet(paddle.nn.Layer): def __init__(self): super(MyNet, self).__init__() self._fc = fluid.dygraph.Linear(20, 10, act='softmax') @@ -1053,7 +1053,7 @@ def forward(self, x): y = self._fc(x) return y - fluid.enable_dygraph() + paddle.disable_static() model = Model(MyNet()) params = model.parameters() """ @@ -1068,7 +1068,7 @@ def prepare(self, optimizer=None, loss=None, metrics=None): and should be a Optimizer instance. It can be None in eval and test mode. loss (Loss|callable function|None): Loss function can - be a `fluid.dygraph.Layer` instance or any callable function + be a `paddle.nn.Layer` instance or any callable function taken the predicted values and ground truth values as input. It can be None when there is no loss. metrics (Metric|list of Metric|None): If metrics is set, all @@ -1087,7 +1087,7 @@ def prepare(self, optimizer=None, loss=None, metrics=None): startup_prog_seed = fluid.default_startup_program( ).random_seed fluid.disable_dygraph() - fluid.enable_dygraph(self._place) + paddle.disable_static(self._place) # enable_dygraph would create and switch to a new program, # thus also copy seed to the new program fluid.default_main_program().random_seed = main_prog_seed @@ -1100,9 +1100,9 @@ def prepare(self, optimizer=None, loss=None, metrics=None): self._optimizer = optimizer if loss is not None: - if not isinstance(loss, fluid.dygraph.Layer) and not callable(loss): + if not isinstance(loss, paddle.nn.Layer) and not callable(loss): raise TypeError("'loss' must be sub classes of " \ - "`fluid.dygraph.Layer` or any callable function.") + "`paddle.nn.Layer` or any callable function.") self._loss = loss metrics = metrics or [] @@ -1188,7 +1188,7 @@ def fit( dynamic = True device = hapi.set_device('gpu') - fluid.enable_dygraph(device) if dynamic else None + paddle.disable_static(device) if dynamic else None train_dataset = hapi.datasets.MNIST(mode='train') val_dataset = hapi.datasets.MNIST(mode='test') @@ -1221,7 +1221,7 @@ def fit( dynamic = True device = hapi.set_device('gpu') - fluid.enable_dygraph(device) if dynamic else None + paddle.disable_static(device) if dynamic else None train_dataset = hapi.datasets.MNIST(mode='train') train_loader = fluid.io.DataLoader(train_dataset, @@ -1368,7 +1368,7 @@ def evaluate( print(result) # imperative mode - fluid.enable_dygraph() + paddle.disable_static() model = hapi.Model(hapi.vision.LeNet()) model.prepare(metrics=paddle.metric.Accuracy()) result = model.evaluate(val_dataset, batch_size=64) @@ -1475,7 +1475,7 @@ def __len__(self): # imperative mode device = hapi.set_device('cpu') - fluid.enable_dygraph(device) + paddle.disable_static(device) model = hapi.Model(hapi.vision.LeNet()) model.prepare() result = model.predict(test_dataset, batch_size=64) diff --git a/python/paddle/metric/metrics.py b/python/paddle/metric/metrics.py index d6ffa7f403dcc..20d6b32d38baa 100644 --- a/python/paddle/metric/metrics.py +++ b/python/paddle/metric/metrics.py @@ -77,10 +77,10 @@ class Metric(object): .. code-block:: python def compute(pred, label): # sort prediction and slice the top-5 scores - pred = fluid.layers.argsort(pred, descending=True)[1][:, :5] + pred = paddle.argsort(pred, descending=True)[1][:, :5] # calculate whether the predictions are correct correct = pred == label - return fluid.layers.cast(correct, dtype='float32') + return paddle.cast(correct, dtype='float32') With the :code:`compute`, we split some calculations to OPs (which may run on GPU devices, will be faster), and only fetch 1 tensor with @@ -184,12 +184,12 @@ def __init__(self, topk=(1, ), name='acc', *args, **kwargs): import paddle paddle.disable_static() - x = paddle.to_variable(np.array([ + x = paddle.to_tensor(np.array([ [0.1, 0.2, 0.3, 0.4], [0.1, 0.4, 0.3, 0.2], [0.1, 0.2, 0.4, 0.3], [0.1, 0.2, 0.3, 0.4]])) - y = paddle.to_variable(np.array([[0], [1], [2], [3]])) + y = paddle.to_tensor(np.array([[0], [1], [2], [3]])) m = paddle.metric.Accuracy() correct = m.compute(x, y) @@ -203,14 +203,13 @@ def __init__(self, topk=(1, ), name='acc', *args, **kwargs): .. code-block:: python import paddle - import paddle.fluid as fluid import paddle.incubate.hapi as hapi paddle.disable_static() train_dataset = hapi.datasets.MNIST(mode='train') model = hapi.Model(hapi.vision.LeNet(classifier_activation=None)) - optim = fluid.optimizer.Adam( + optim = paddle.optimizer.Adam( learning_rate=0.001, parameter_list=model.parameters()) model.prepare( optim, @@ -241,9 +240,9 @@ def compute(self, pred, label, *args): Return: Tensor: Correct mask, a tensor with shape [batch_size, topk]. """ - pred = fluid.layers.argsort(pred, descending=True)[1][:, :self.maxk] + pred = paddle.argsort(pred, descending=True)[1][:, :self.maxk] correct = pred == label - return fluid.layers.cast(correct, dtype='float32') + return paddle.cast(correct, dtype='float32') def update(self, correct, *args): """ @@ -446,7 +445,7 @@ class Recall(Metric): Args: name (str, optional): String name of the metric instance. - Default is `precision`. + Default is `recall`. Example by standalone: @@ -583,10 +582,13 @@ class Auc(Metric): computed using the height of the precision values by the recall. Args: - name (str, optional): String name of the metric instance. Default - is `acc`. curve (str): Specifies the mode of the curve to be computed, 'ROC' or 'PR' for the Precision-Recall-curve. Default is 'ROC'. + num_thresholds (int): The number of thresholds to use when + discretizing the roc curve. Default is 4095. + 'ROC' or 'PR' for the Precision-Recall-curve. Default is 'ROC'. + name (str, optional): String name of the metric instance. Default + is `auc`. "NOTE: only implement the ROC curve type via Python now." @@ -615,7 +617,6 @@ class Auc(Metric): import numpy as np import paddle - import paddle.fluid as fluid import paddle.nn as nn import paddle.incubate.hapi as hapi @@ -640,7 +641,7 @@ def __len__(self): learning_rate=0.001, parameter_list=model.parameters()) def loss(x, y): - return fluid.layers.cross_entropy(x, y) + return nn.functional.nll_loss(paddle.log(x), y) model.prepare( optim,