Skip to content

Commit

Permalink
Move fused_attention and fused_feedforward functional api path to inc…
Browse files Browse the repository at this point in the history
…ubate (#36704)

将 #35905#35843 PR中新增的的python api接口移到incubate目录下。
  • Loading branch information
limin2021 authored and zkh2016 committed Oct 26, 2021
1 parent ceb0cab commit 4fd7cd2
Show file tree
Hide file tree
Showing 7 changed files with 31 additions and 19 deletions.
2 changes: 0 additions & 2 deletions paddle/fluid/operators/fused/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -80,10 +80,8 @@ if (WITH_GPU OR WITH_ROCM)
nv_test(test_fused_dropout_act_bias SRCS fused_dropout_act_bias_test.cu DEPS tensor op_registry dropout_op layer_norm_op device_context generator memory)
nv_test(test_fused_layernorm_residual_dropout_bias SRCS fused_layernorm_residual_dropout_bias_test.cu DEPS tensor op_registry dropout_op layer_norm_op device_context generator memory)


op_library(fused_feedforward_op)
file(APPEND ${pybind_file} "USE_CUDA_ONLY_OP(fused_feedforward);\n")

# fused_attention_op
op_library(fused_attention_op)
file(APPEND ${pybind_file} "USE_CUDA_ONLY_OP(fused_attention);\n")
Expand Down
1 change: 0 additions & 1 deletion python/paddle/fluid/tests/unittests/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -91,7 +91,6 @@ foreach(TEST_OP ${MIXED_DIST_TEST_OPS})
endforeach()

if(NOT WITH_GPU)

LIST(REMOVE_ITEM TEST_OPS test_fused_feedforward_op)
LIST(REMOVE_ITEM TEST_OPS test_fused_attention_op)
endif()
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@
import paddle.nn as nn
import paddle.fluid.core as core
import paddle.nn.functional as F
import paddle.incubate.nn.functional as incubate_f
from paddle.nn.layer.norm import LayerNorm
from paddle.nn.layer.common import Linear, Dropout
from paddle.nn.layer.transformer import _convert_attention_mask
Expand Down Expand Up @@ -190,7 +191,7 @@ def GetFusedAttentionOut(self):

if attn_mask is not None:
attn_mask = _convert_attention_mask(attn_mask, x.dtype)
final_out = F.fused_multi_head_attention(
final_out = incubate_f.fused_multi_head_attention(
x, qkv_weight_tensor, out_linear_weight, self.pre_layer_norm,
ln1_scale, ln1_bias, ln2_scale, ln2_bias, epsilon, qkv_bias_tensor,
out_linear_bias, attn_mask, self.dropout_prob,
Expand Down
12 changes: 6 additions & 6 deletions python/paddle/fluid/tests/unittests/test_fused_feedforward_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@
import paddle.fluid.core as core
from paddle.nn.layer import transformer
import paddle.nn.functional as F
import paddle.incubate.nn.functional as incubate_f
from paddle.nn.layer.norm import LayerNorm
from paddle.nn.layer.common import Linear, Dropout
import unittest
Expand Down Expand Up @@ -121,7 +122,7 @@ def FusedFFN(self):
ln2_scale = paddle.to_tensor(self.norm2.weight, stop_gradient=False)
ln2_bias = paddle.to_tensor(self.norm2.bias, stop_gradient=False)
x = paddle.to_tensor(self.src, stop_gradient=False)
out = F.fused_feedforward(
out = incubate_f.fused_feedforward(
x,
linear1_weight,
linear2_weight,
Expand Down Expand Up @@ -215,7 +216,7 @@ def test_static(self):
ln2_scale = paddle.static.data(name='ln2_scale', shape=[d_model])
ln2_bias = paddle.static.data(name='ln2_scale', shape=[d_model])

fused_out = F.fused_feedforward(
fused_out = incubate_f.fused_feedforward(
x,
linear1_weight,
linear2_weight,
Expand Down Expand Up @@ -295,8 +296,7 @@ def test_dtype():
name='linear1_weight', shape=[1, 10, 10], dtype="float32")
linear2_weight = paddle.static.data(
name='linear2_weight', shape=[1, 10, 10], dtype="float32")
paddle.nn.functional.fused_feedforward(x, linear1_weight,
linear2_weight)
incubate_f.fused_feedforward(x, linear1_weight, linear2_weight)

self.assertRaises(TypeError, test_dtype)

Expand All @@ -307,7 +307,7 @@ def test_dropout_rate_type():
name='linear1_weight1', shape=[10, 10], dtype="float32")
linear2_weight = paddle.static.data(
name='linear2_weight1', shape=[10, 10], dtype="float32")
paddle.nn.functional.fused_feedforward(
incubate_f.fused_feedforward(
x, linear1_weight, linear2_weight, dropout1_rate="a")

self.assertRaises(TypeError, test_dropout_rate_type)
Expand All @@ -319,7 +319,7 @@ def test_dropout_rate_value():
name='linear1_weight2', shape=[10, 10], dtype="float32")
linear2_weight = paddle.static.data(
name='linear2_weight2', shape=[10, 10], dtype="float32")
paddle.nn.functional.fused_feedforward(
incubate_f.fused_feedforward(
x, linear1_weight, linear2_weight, dropout2_rate=-1)

self.assertRaises(ValueError, test_dropout_rate_value)
Expand Down
18 changes: 18 additions & 0 deletions python/paddle/incubate/nn/functional/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,18 @@
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

from .fused_transformer import fused_multi_head_attention
from .fused_transformer import fused_feedforward

__all__ = ['fused_multi_head_attention', 'fused_feedforward']
Original file line number Diff line number Diff line change
Expand Up @@ -12,9 +12,9 @@
# See the License for the specific language governing permissions and
# limitations under the License.

from ...fluid.layer_helper import LayerHelper
from ...fluid.framework import in_dygraph_mode
from ...fluid.data_feeder import check_variable_and_dtype, check_dtype
from paddle.fluid.layer_helper import LayerHelper
from paddle.fluid.framework import in_dygraph_mode
from paddle.fluid.data_feeder import check_variable_and_dtype, check_dtype
from paddle import _C_ops

__all__ = []
Expand Down Expand Up @@ -90,7 +90,7 @@ def fused_feedforward(x,
x = paddle.to_tensor(x_data)
linear1_weight = paddle.to_tensor(linear1_weight_data)
linear2_weight = paddle.to_tensor(linear2_weight_data)
out = paddle.nn.functional.fused_feedforward(x, linear1_weight, linear2_weight)
out = paddle.incubate.nn.functional.fused_feedforward(x, linear1_weight, linear2_weight)
print(out.numpy().shape)
# (1, 8, 8)
"""
Expand Down Expand Up @@ -244,7 +244,7 @@ def fused_multi_head_attention(x,
# required: gpu
import paddle
import paddle.nn.functional as F
import paddle.incubate.nn.functional as F
# input: [batch_size, seq_len, embed_dim]
x = paddle.rand(shape=(2, 4, 128), dtype="float32")
Expand Down
4 changes: 0 additions & 4 deletions python/paddle/nn/functional/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -60,7 +60,6 @@
from .conv import conv1d # noqa: F401
from .conv import conv1d_transpose # noqa: F401
from .common import linear # noqa: F401
from .fused_transformer import fused_multi_head_attention # noqa: F401
from .conv import conv2d # noqa: F401
from .conv import conv2d_transpose # noqa: F401
from .conv import conv3d # noqa: F401
Expand Down Expand Up @@ -110,7 +109,6 @@
from .vision import pixel_shuffle # noqa: F401
from .input import one_hot # noqa: F401
from .input import embedding # noqa: F401
from .fused_transformer import fused_feedforward # noqa: F401
from ...fluid.layers import gather_tree # noqa: F401
from ...fluid.layers import temporal_shift # noqa: F401

Expand Down Expand Up @@ -211,7 +209,5 @@
'layer_norm',
'instance_norm',
'class_center_sample',
'fused_feedforward',
'fused_multi_head_attention',
'sparse_attention',
]

1 comment on commit 4fd7cd2

@paddle-bot-old
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Congratulation! Your pull request passed all required CI. You could ask reviewer(s) to approve and merge. 🎉

Please sign in to comment.