Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

rename conv_transposeXd-->convXd_transpose #28198

Merged
merged 1 commit into from
Oct 21, 2020
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -92,7 +92,7 @@ def functional(self, place):
"weight", self.weight_shape, dtype=self.dtype)
b_var = fluid.data(
"bias", (self.out_channels, ), dtype=self.dtype)
y_var = F.conv_transpose1d(
y_var = F.conv1d_transpose(
x_var,
w_var,
None if self.no_bias else b_var,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -128,7 +128,7 @@ def functional(self, place):
else:
output_size = self.output_size

y_var = F.conv_transpose2d(
y_var = F.conv2d_transpose(
x_var,
w_var,
None if self.no_bias else b_var,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -119,7 +119,7 @@ def functional(self, place):
"weight", self.weight_shape, dtype=self.dtype)
b_var = fluid.data(
"bias", (self.num_filters, ), dtype=self.dtype)
y_var = F.conv_transpose3d(
y_var = F.conv3d_transpose(
x_var,
w_var,
None if self.no_bias else b_var,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -111,7 +111,7 @@ def static_graph_case_2(self):
"weight", self.weight.shape, dtype=self.dtype)
if not self.no_bias:
bias = fluid.data("bias", self.bias.shape, dtype=self.dtype)
y = F.conv_transpose2d(
y = F.conv2d_transpose(
x,
weight,
None if self.no_bias else bias,
Expand All @@ -134,7 +134,7 @@ def dygraph_case(self):
x = dg.to_variable(self.input)
weight = dg.to_variable(self.weight)
bias = None if self.no_bias else dg.to_variable(self.bias)
y = F.conv_transpose2d(
y = F.conv2d_transpose(
x,
weight,
bias,
Expand Down Expand Up @@ -215,7 +215,7 @@ def static_graph_case(self):
"weight", self.weight_shape, dtype=self.dtype)
if not self.no_bias:
bias = fluid.data("bias", self.bias_shape, dtype=self.dtype)
y = F.conv_transpose2d(
y = F.conv2d_transpose(
x,
weight,
None if self.no_bias else bias,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -113,7 +113,7 @@ def static_graph_case_2(self):
"weight", self.weight.shape, dtype=self.dtype)
if not self.no_bias:
bias = fluid.data("bias", self.bias.shape, dtype=self.dtype)
y = F.conv_transpose3d(
y = F.conv3d_transpose(
x,
weight,
None if self.no_bias else bias,
Expand All @@ -138,7 +138,7 @@ def dygraph_case(self):
x = dg.to_variable(self.input)
weight = dg.to_variable(self.weight)
bias = None if self.no_bias else dg.to_variable(self.bias)
y = F.conv_transpose3d(
y = F.conv3d_transpose(
x,
weight,
bias,
Expand Down Expand Up @@ -222,7 +222,7 @@ def static_graph_case(self):
"weight", self.weight_shape, dtype=self.dtype)
if not self.no_bias:
bias = fluid.data("bias", self.bias_shape, dtype=self.dtype)
y = F.conv_transpose3d(
y = F.conv3d_transpose(
x,
weight,
None if self.no_bias else bias,
Expand Down
6 changes: 3 additions & 3 deletions python/paddle/nn/functional/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -73,12 +73,12 @@
from .common import upsample #DEFINE_ALIAS
from .common import bilinear #DEFINE_ALIAS
from .conv import conv1d #DEFINE_ALIAS
from .conv import conv_transpose1d #DEFINE_ALIAS
from .conv import conv1d_transpose #DEFINE_ALIAS
from .common import linear #DEFINE_ALIAS
from .conv import conv2d #DEFINE_ALIAS
from .conv import conv_transpose2d #DEFINE_ALIAS
from .conv import conv2d_transpose #DEFINE_ALIAS
from .conv import conv3d #DEFINE_ALIAS
from .conv import conv_transpose3d #DEFINE_ALIAS
from .conv import conv3d_transpose #DEFINE_ALIAS
# from .extension import add_position_encoding #DEFINE_ALIAS
# from .extension import autoincreased_step_counter #DEFINE_ALIAS
# from .extension import continuous_value_model #DEFINE_ALIAS
Expand Down
22 changes: 11 additions & 11 deletions python/paddle/nn/functional/conv.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,11 +15,11 @@

__all__ = [
'conv1d',
'conv_transpose1d',
'conv1d_transpose',
'conv2d',
'conv_transpose2d',
'conv2d_transpose',
'conv3d',
'conv_transpose3d',
'conv3d_transpose',
]

import numpy as np
Expand Down Expand Up @@ -541,7 +541,7 @@ def conv2d(x,
return out


def conv_transpose1d(x,
def conv1d_transpose(x,
weight,
bias=None,
stride=1,
Expand Down Expand Up @@ -682,7 +682,7 @@ def conv_transpose1d(x,
[[4, 2]]]).astype(np.float32)
x_var = paddle.to_tensor(x)
w_var = paddle.to_tensor(w)
y_var = F.conv_transpose1d(x_var, w_var)
y_var = F.conv1d_transpose(x_var, w_var)
y_np = y_var.numpy()
print y_np

Expand Down Expand Up @@ -802,7 +802,7 @@ def conv_transpose1d(x,
return out


def conv_transpose2d(x,
def conv2d_transpose(x,
weight,
bias=None,
stride=1,
Expand Down Expand Up @@ -920,7 +920,7 @@ def conv_transpose2d(x,
None by default.

Returns:
A Tensor representing the conv_transpose2d, whose
A Tensor representing the conv2d_transpose, whose
data type is the same with input and shape is (num_batches, channels, out_h,
out_w) or (num_batches, out_h, out_w, channels). The tensor variable storing
transposed convolution result.
Expand All @@ -946,7 +946,7 @@ def conv_transpose2d(x,
x_var = paddle.randn((2, 3, 8, 8), dtype='float32')
w_var = paddle.randn((3, 6, 3, 3), dtype='float32')

y_var = F.conv_transpose2d(x_var, w_var)
y_var = F.conv2d_transpose(x_var, w_var)
y_np = y_var.numpy()

print(y_np.shape)
Expand Down Expand Up @@ -1242,7 +1242,7 @@ def conv3d(x,
return out


def conv_transpose3d(x,
def conv3d_transpose(x,
weight,
bias=None,
stride=1,
Expand Down Expand Up @@ -1364,7 +1364,7 @@ def conv_transpose3d(x,
None by default.

Returns:
A Tensor representing the conv_transpose3d, whose data
A Tensor representing the conv3d_transpose, whose data
type is the same with input and shape is (num_batches, channels, out_d, out_h,
out_w) or (num_batches, out_d, out_h, out_w, channels). If act is None, the tensor
variable storing the transposed convolution result, and if act is not None, the tensor
Expand All @@ -1391,7 +1391,7 @@ def conv_transpose3d(x,
x_var = paddle.randn((2, 3, 8, 8, 8), dtype='float32')
w_var = paddle.randn((3, 6, 3, 3, 3), dtype='float32')

y_var = F.conv_transpose3d(x_var, w_var)
y_var = F.conv3d_transpose(x_var, w_var)
y_np = y_var.numpy()

print(y_np.shape)
Expand Down
12 changes: 6 additions & 6 deletions python/paddle/nn/layer/conv.py
Original file line number Diff line number Diff line change
Expand Up @@ -427,7 +427,7 @@ def __init__(self,
data_format=data_format)

def forward(self, x, output_size=None):
out = F.conv_transpose1d(
out = F.conv1d_transpose(
x,
self.weight,
bias=self.bias,
Expand Down Expand Up @@ -744,7 +744,7 @@ def forward(self, x, output_size=None):
else:
output_padding = 0

out = F.conv_transpose2d(
out = F.conv2d_transpose(
x,
self.weight,
bias=self.bias,
Expand Down Expand Up @@ -948,16 +948,16 @@ class ConvTranspose3d(_ConvNd):

**Note**:

The conv_transpose3d can be seen as the backward of the conv3d. For conv3d,
The conv3d_transpose can be seen as the backward of the conv3d. For conv3d,
when stride > 1, conv3d maps multiple input shape to the same output shape,
so for conv_transpose3d, when stride > 1, input shape maps multiple output shape.
so for conv3d_transpose, when stride > 1, input shape maps multiple output shape.
If output_size is None, :math:`H_{out} = H^\prime_{out}, :math:`H_{out} = \
H^\prime_{out}, W_{out} = W^\prime_{out}`; else, the :math:`D_{out}` of the output
size must between :math:`D^\prime_{out}` and :math:`D^\prime_{out} + strides[0]`,
the :math:`H_{out}` of the output size must between :math:`H^\prime_{out}`
and :math:`H^\prime_{out} + strides[1]`, and the :math:`W_{out}` of the output size must
between :math:`W^\prime_{out}` and :math:`W^\prime_{out} + strides[2]`,
conv_transpose3d can compute the kernel size automatically.
conv3d_transpose can compute the kernel size automatically.

Parameters:
in_channels(int): The number of channels in the input image.
Expand Down Expand Up @@ -1078,7 +1078,7 @@ def forward(self, x, output_size=None):
else:
output_padding = 0

out = F.conv_transpose3d(
out = F.conv3d_transpose(
x,
self.weight,
bias=self.bias,
Expand Down