Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[Frontend][PaddlePaddle] Remove unused parameters and fix doc string #9283

Merged
merged 38 commits into from
Oct 15, 2021
Merged
Changes from 1 commit
Commits
Show all changes
38 commits
Select commit Hold shift + click to select a range
5b39c79
Merge pull request #2 from apache/main
jiangjiajun Aug 3, 2021
74cc942
Merge pull request #8 from apache/main
jiangjiajun Aug 13, 2021
e0420bd
Merge branch 'apache:main' into main
jiangjiajun Sep 23, 2021
f181b0a
Merge branch 'apache:main' into main
jiangjiajun Sep 24, 2021
800b187
Merge branch 'apache:main' into main
jiangjiajun Sep 26, 2021
87c6d3d
add part of operators
jiangjiajun Sep 26, 2021
39b96fc
remove part of operators
jiangjiajun Sep 26, 2021
50e3c41
add lookup
jiangjiajun Sep 26, 2021
555406e
add test
jiangjiajun Sep 26, 2021
75956db
Update paddlepaddle.py
jiangjiajun Sep 27, 2021
a3aa170
modify error message for SAME padding
jiangjiajun Sep 27, 2021
326383a
Remove some function and old version operator
jiangjiajun Sep 28, 2021
6e275c2
Remove some function and old version operator
jiangjiajun Sep 28, 2021
f8e93cb
Remove some function and old version operator
jiangjiajun Sep 28, 2021
cd4ef59
Remove some function and old version operator
jiangjiajun Sep 28, 2021
56f4ccb
Merge pull request #55 from jiangjiajun/pr001_1
jiangjiajun Sep 28, 2021
67e9816
add dot test
jiangjiajun Sep 28, 2021
98fb38a
modify doc
jiangjiajun Sep 28, 2021
201be45
remove unreviewed code
jiangjiajun Sep 29, 2021
8d865b7
Update paddlepaddle.py
jiangjiajun Sep 29, 2021
96afd3d
Update test_forward.py
jiangjiajun Sep 29, 2021
ef7a003
Update paddlepaddle.py
jiangjiajun Sep 29, 2021
43ae5ab
Update paddlepaddle.py
jiangjiajun Sep 29, 2021
8d0af49
Update test_forward.py
jiangjiajun Sep 29, 2021
509e023
Update test_forward.py
jiangjiajun Sep 29, 2021
4139fb3
Merge pull request #57 from jiangjiajun/unreviewed
jiangjiajun Sep 29, 2021
2a5e30d
Merge branch 'apache:main' into pr001
jiangjiajun Oct 4, 2021
3c34b5a
add more cases for tests
jiangjiajun Oct 4, 2021
e600036
add more cases for tests
jiangjiajun Oct 4, 2021
4887e96
Merge pull request #60 from jiangjiajun/add-more-cases
jiangjiajun Oct 4, 2021
ef4c84b
remove annotation
jiangjiajun Oct 4, 2021
5d1aa7c
reduce test case sizes
jiangjiajun Oct 5, 2021
69ddea7
Merge branch 'apache:main' into pr001
jiangjiajun Oct 8, 2021
89bf515
Merge branch 'apache:main' into paddle_frontend_1008
jiangjiajun Oct 11, 2021
c87a22e
Remove unused parameters and fix doc string for paddle frontend
jiangjiajun Oct 13, 2021
5dde918
remove blank line
jiangjiajun Oct 13, 2021
2a528ab
fix code error
jiangjiajun Oct 14, 2021
19c6b9a
modify test_forward.py
jiangjiajun Oct 14, 2021
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Prev Previous commit
Next Next commit
add lookup
jiangjiajun committed Sep 26, 2021

Verified

This commit was created on GitHub.com and signed with GitHub’s verified signature.
commit 50e3c41f380c3c6e6e7f101c244df67cd1d89603
109 changes: 28 additions & 81 deletions python/tvm/relay/frontend/paddlepaddle.py
Original file line number Diff line number Diff line change
@@ -135,29 +135,13 @@ def convert_unary_op(g, op, block):
g.add_node(op.output("Out")[0], out)


def convert_addmm(g, op, block):
"""Operator converter for addmm."""

input_x = g.get_node(op.input("Input")[0])
x = g.get_node(op.input("X")[0])
y = g.get_node(op.input("Y")[0])

alpha = op.attr("Alpha")
beta = op.attr("Beta")
dtype = block.var(op.output("Out")[0]).dtype
dtype = str(dtype).strip().split(".")[1]

if not isinstance(alpha, _expr.Expr) and alpha != 1:
alpha = _expr.const(alpha, dtype)
x *= alpha

if not isinstance(beta, _expr.Expr) and beta != 1:
beta = _expr.const(beta, dtype)
input_x *= beta
def convert_binary_logical_op(g, op, block):
"""Operator converter for logical op."""

transposed_y = _op.transpose(y, axes=[1, 0])
dense_out = _op.nn.dense(x, transposed_y)
out = dense_out + input_x
ipt0 = g.get_node(op.input("X")[0])
ipt1 = g.get_node(op.input("Y")[0])
op_func = get_relay_op(op.type)
out = op_func(ipt0, ipt1)
g.add_node(op.output("Out")[0], out)


@@ -257,16 +241,6 @@ def convert_batch_norm(g, op, block):
g.add_node(op.output("Y")[0], out[0])


def convert_bmm(g, op, block):
"""Operator converter for bmm."""

x = g.get_node(op.input("X")[0])
y = g.get_node(op.input("Y")[0])
y = _op.transpose(y, [0, 2, 1])
out = _op.nn.batch_matmul(x, y)
g.add_node(op.output("Out")[0], out)


def convert_cast(g, op, block):
"""Operator converter for cast."""

@@ -499,18 +473,6 @@ def convert_gelu(g, op, block):
g.add_node(op.output("Out")[0], out)


def convert_hard_shrink(g, op, block):
"""Operator converter for hard_shrink."""

x = g.get_node(op.input("X")[0])
dtype = infer_type(x).checked_type.dtype
threshold = op.attr("threshold")
threshold = _op.const(threshold, dtype)
out = _op.logical_or(x < _op.const(-1.0, dtype) * threshold, x > threshold)
out = _op.cast(out, dtype) * x
g.add_node(op.output("Out")[0], out)


def convert_hard_sigmoid(g, op, block):
"""Operator converter for hard_sigmoid."""

@@ -539,16 +501,6 @@ def convert_hard_swish(g, op, block):
g.add_node(op.output("Out")[0], out)


def convert_hard_tanh(g, op, block):
"""Operator converter for hard_tanh."""

x = g.get_node(op.input("X")[0])
t_max = op.attr("t_max")
t_min = op.attr("t_min")
out = _op.tensor.clip(x, t_min, t_max)
g.add_node(op.output("Out")[0], out)


def convert_layer_norm(g, op, block):
"""Operator converter for layer_norm."""

@@ -599,13 +551,27 @@ def convert_log1p(g, op, block):
g.add_node(op.output("Out")[0], out)


def convert_binary_logical_op(g, op, block):
"""Operator converter for logical op."""
def convert_lookup_table(g, op, block):
"""Operator converter for lookup_table_v2."""

ipt0 = g.get_node(op.input("X")[0])
ipt1 = g.get_node(op.input("Y")[0])
op_func = get_relay_op(op.type)
out = op_func(ipt0, ipt1)
indices = g.get_node(op.input("Ids")[0])
padding_idx = op.attr("padding_idx")
weights = g.get_node(op.input("W")[0])
if padding_idx != -1:
if op.input("W")[0] in g.get_params():
weights = g.get_params(op.input("W")[0])
weights[padding_idx] = 0.0
weights = _expr.const(weights)
else:
shape = _infer_value(shape_of(weights), g.get_params())
assert not isinstance(
shape, _expr.Expr
), "Shape of weight has to be fixed for PaddlePaddle's lookup_table"
filters = np.ones(shape).astype(infer_type(weights).checked_type.dtype)
filters[padding_idx] = 0.0
filters = _expr.const(filters)
weights = weights * filters
out = _op.take(weights, indices.astype("int32"), axis=0)
g.add_node(op.output("Out")[0], out)


@@ -937,22 +903,6 @@ def convert_scale(g, op, block):
g.add_node(op.output("Out")[0], out)


def convert_selu(g, op, block):
"""Operator converter for selu."""

x = g.get_node(op.input("X")[0])
dtype = infer_type(x).checked_type.dtype
alpha = _op.const(op.attr("alpha"), dtype)
scale = _op.const(op.attr("scale"), dtype)
out = (
_expr.const(-1.0, dtype=dtype)
* alpha
* _op.nn.relu(_expr.const(1.0, dtype=dtype) - _op.exp(x))
)
out = scale * (out + _op.nn.relu(x))
g.add_node(op.output("Out")[0], out)


def convert_shape(g, op, block):
"""Operator converter for shape."""

@@ -1097,7 +1047,6 @@ def convert_unsqueeze(g, op, block):
_convert_map = {
"abs": convert_unary_op,
"acos": convert_unary_op,
"addmm": convert_addmm,
"arg_max": convert_arg_max,
"arg_min": convert_arg_min,
"argsort": convert_argsort,
@@ -1106,8 +1055,6 @@ def convert_unsqueeze(g, op, block):
"assign_value": convert_assign_value,
"atan": convert_unary_op,
"batch_norm": convert_batch_norm,
"bmm": convert_bmm,
"brelu": convert_hard_tanh,
"cast": convert_cast,
"ceil": convert_unary_op,
"concat": convert_concat,
@@ -1140,7 +1087,6 @@ def convert_unsqueeze(g, op, block):
"gelu": convert_gelu,
"greater_equal": convert_elementwise_op,
"greater_than": convert_elementwise_op,
"hard_shrink": convert_hard_shrink,
"hard_sigmoid": convert_hard_sigmoid,
"hard_swish": convert_hard_swish,
"isfinite": convert_unary_op,
@@ -1163,6 +1109,8 @@ def convert_unsqueeze(g, op, block):
"logical_xor": convert_binary_logical_op,
"logsigmoid": convert_logsigmoid,
"log_softmax": convert_logsoftmax,
"lookup_table": convert_lookup_table,
"lookup_table_v2": convert_lookup_table,
"matmul": convert_matmul,
"matmul_v2": convert_matmul,
"meshgrid": convert_meshgrid,
@@ -1174,7 +1122,6 @@ def convert_unsqueeze(g, op, block):
"round": convert_unary_op,
"rsqrt": convert_unary_op,
"scale": convert_scale,
"selu": convert_selu,
"shape": convert_shape,
"sigmoid": convert_unary_op,
"sign": convert_unary_op,