From 4e299b3cfb4d30bdfa6079609ff3ca2617a0c6fa Mon Sep 17 00:00:00 2001 From: Shushi Hong <820958424@qq.com> Date: Tue, 4 Feb 2025 17:03:22 +0800 Subject: [PATCH 01/13] Update fx_translator.py --- python/tvm/relax/frontend/torch/fx_translator.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/python/tvm/relax/frontend/torch/fx_translator.py b/python/tvm/relax/frontend/torch/fx_translator.py index 52122ce33369..d6685d8ce9bd 100644 --- a/python/tvm/relax/frontend/torch/fx_translator.py +++ b/python/tvm/relax/frontend/torch/fx_translator.py @@ -616,21 +616,25 @@ def create_convert_map( nn.Flatten: self._flatten_module, ## call_function and call_method # unary + "abs": self._unary_op(relax.op.abs), "acos": self._unary_op(relax.op.acos), "acosh": self._unary_op(relax.op.acosh), "asin": self._unary_op(relax.op.asin), "asinh": self._unary_op(relax.op.asinh), "atan": self._unary_op(relax.op.atan), "atanh": self._unary_op(relax.op.atanh), + "ceil": self._unary_op(relax.op.ceil), "clamp": self._clamp, "cos": self._unary_op(relax.op.cos), "cosh": self._unary_op(relax.op.cosh), "dropout": lambda node: self.env[node.args[0]], "exp": self._unary_op(relax.op.exp), + "floor": self._unary_op(relax.op.floor), "gelu": self._gelu, "hardsigmoid": self._hardsigmoid, "hardswish": self._hardswish, "leaky_relu": self._leakyrelu, + "log": self._unary_op(relax.op.log), "log_softmax": self._log_softmax, "neg": self._unary_op(relax.op.negative), "relu": self._unary_op(relax.op.nn.relu), From 2109b6f483e745ed32e3b191053170b864a8a933 Mon Sep 17 00:00:00 2001 From: Shushi Hong <820958424@qq.com> Date: Tue, 4 Feb 2025 17:05:43 +0800 Subject: [PATCH 02/13] Update test_frontend_from_fx.py --- tests/python/relax/test_frontend_from_fx.py | 80 +++++++++++++++++++++ 1 file changed, 80 insertions(+) diff --git a/tests/python/relax/test_frontend_from_fx.py b/tests/python/relax/test_frontend_from_fx.py index d9857723b1f5..b9381fa2ccce 100644 --- a/tests/python/relax/test_frontend_from_fx.py +++ b/tests/python/relax/test_frontend_from_fx.py @@ -4084,5 +4084,85 @@ def main( ) +def test_abs(): + class Abs(Module): + def forward(self, input): + return torch.abs(input) + + @I.ir_module + class Expected1: + @R.function + def main( + inp_0: R.Tensor((256, 256), dtype="float32") + ) -> R.Tensor((256, 256), dtype="float32"): + with R.dataflow(): + lv: R.Tensor((256, 256), dtype="float32") = R.abs(inp_0) + gv: R.Tensor((256, 256), dtype="float32") = lv + R.output(gv) + return gv + + verify_model(Abs(), [([256, 256], "float32")], {}, Expected1) + + +def test_ceil(): + class Ceil(Module): + def forward(self, input): + return torch.ceil(input) + + @I.ir_module + class Expected1: + @R.function + def main( + inp_0: R.Tensor((256, 256), dtype="float32") + ) -> R.Tensor((256, 256), dtype="float32"): + with R.dataflow(): + lv: R.Tensor((256, 256), dtype="float32") = R.ceil(inp_0) + gv: R.Tensor((256, 256), dtype="float32") = lv + R.output(gv) + return gv + + verify_model(Ceil(), [([256, 256], "float32")], {}, Expected1) + + +def test_floor(): + class Floor(Module): + def forward(self, input): + return torch.floor(input) + + @I.ir_module + class Expected1: + @R.function + def main( + inp_0: R.Tensor((256, 256), dtype="float32") + ) -> R.Tensor((256, 256), dtype="float32"): + with R.dataflow(): + lv: R.Tensor((256, 256), dtype="float32") = R.floor(inp_0) + gv: R.Tensor((256, 256), dtype="float32") = lv + R.output(gv) + return gv + + verify_model(Floor(), [([256, 256], "float32")], {}, Expected1) + + +def test_log(): + class Log(Module): + def forward(self, input): + return torch.log(input) + + @I.ir_module + class Expected1: + @R.function + def main( + inp_0: R.Tensor((256, 256), dtype="float32") + ) -> R.Tensor((256, 256), dtype="float32"): + with R.dataflow(): + lv: R.Tensor((256, 256), dtype="float32") = R.log(inp_0) + gv: R.Tensor((256, 256), dtype="float32") = lv + R.output(gv) + return gv + + verify_model(Log(), [([256, 256], "float32")], {}, Expected1) + + if __name__ == "__main__": tvm.testing.main() From d435c821ccd16863a64f4e63767acc095681b82d Mon Sep 17 00:00:00 2001 From: Shushi Hong <820958424@qq.com> Date: Fri, 7 Feb 2025 13:13:41 +0800 Subject: [PATCH 03/13] Update test_frontend_from_fx.py --- tests/python/relax/test_frontend_from_fx.py | 1026 +++++++++---------- 1 file changed, 480 insertions(+), 546 deletions(-) diff --git a/tests/python/relax/test_frontend_from_fx.py b/tests/python/relax/test_frontend_from_fx.py index b9381fa2ccce..04b437d006a2 100644 --- a/tests/python/relax/test_frontend_from_fx.py +++ b/tests/python/relax/test_frontend_from_fx.py @@ -746,37 +746,6 @@ def main( verify_model(Einsum2(), [([5], "float32"), ([4], "float32")], {}, Expected2) -def test_relu(): - class ReLU0(Module): - def __init__(self): - super().__init__() - self.relu = torch.nn.ReLU() - - def forward(self, input): - return self.relu(input) - - class ReLU1(Module): - def forward(self, input): - return torch.nn.functional.relu(input) - - @tvm.script.ir_module - class expected: - @R.function - def main( - input_1: R.Tensor((10, 10), dtype="float32") - ) -> R.Tensor((10, 10), dtype="float32"): - # block 0 - with R.dataflow(): - lv: R.Tensor((10, 10), dtype="float32") = R.nn.relu(input_1) - gv: R.Tensor((10, 10), dtype="float32") = lv - R.output(gv) - return gv - - input_info = [([10, 10], "float32")] - verify_model(ReLU0(), input_info, {}, expected) - verify_model(ReLU1(), input_info, {}, expected) - - @tvm.testing.requires_gpu def test_leakyrelu(): import torch @@ -1182,37 +1151,6 @@ def main( verify_model(model, input_info, binding, expected1) -def test_dropout(): - input_info = [([1, 3, 10, 10], "float32")] - - class Dropout1(Module): - def __init__(self): - super().__init__() - self.dropout = torch.nn.Dropout(0.5) - - def forward(self, input): - return self.dropout(input) - - class Dropout2(Module): - def forward(self, input): - return torch.dropout(input, 0.5, train=True) - - @tvm.script.ir_module - class expected1: - @R.function - def main( - input_1: R.Tensor((1, 3, 10, 10), dtype="float32") - ) -> R.Tensor((1, 3, 10, 10), dtype="float32"): - # block 0 - with R.dataflow(): - gv: R.Tensor((1, 3, 10, 10), dtype="float32") = input_1 - R.output(gv) - return gv - - verify_model(Dropout1(), input_info, {}, expected1) - verify_model(Dropout2(), input_info, {}, expected1) - - def test_stochastic_depth(): input_info = [([1, 3, 10, 10], "float32")] @@ -1519,109 +1457,6 @@ def main( verify_model(model, input_info, {}, expected1) -def test_silu(): - input_info = [([1, 3, 10, 10], "float32")] - - class SiLU(Module): - def __init__(self): - super().__init__() - self.silu = torch.nn.SiLU() - - def forward(self, input): - return self.silu(input) - - class SiLU2(Module): - def forward(self, input): - return torch.nn.functional.silu(input) - - @tvm.script.ir_module - class expected1: - @R.function - def main( - input_1: R.Tensor((1, 3, 10, 10), dtype="float32") - ) -> R.Tensor((1, 3, 10, 10), dtype="float32"): - # block 0 - with R.dataflow(): - lv: R.Tensor((1, 3, 10, 10), dtype="float32") = R.nn.silu(input_1) - gv: R.Tensor((1, 3, 10, 10), dtype="float32") = lv - R.output(gv) - return gv - - verify_model(SiLU(), input_info, {}, expected1) - verify_model(SiLU2(), input_info, {}, expected1) - - -def test_hardsigmoid(): - input_info = [([1, 3, 10, 10], "float32")] - - class Hardsigmoid(torch.nn.Module): - def __init__(self): - super().__init__() - self.hs = torch.nn.Hardsigmoid() - - def forward(self, input): - return self.hs(input) - - class Hardsigmoid2(torch.nn.Module): - def forward(self, input): - return torch.nn.functional.hardsigmoid(input) - - @tvm.script.ir_module - class expected1: - @R.function - def main( - inp_0: R.Tensor((1, 3, 10, 10), dtype="float32") - ) -> R.Tensor((1, 3, 10, 10), dtype="float32"): - with R.dataflow(): - lv: R.Tensor((1, 3, 10, 10), dtype="float32") = R.add(inp_0, R.const(3, "float32")) - lv1: R.Tensor((1, 3, 10, 10), dtype="float32") = R.clip(lv, 0, 6) - lv2: R.Tensor((1, 3, 10, 10), dtype="float32") = R.divide( - lv1, R.const(6, "float32") - ) - gv: R.Tensor((1, 3, 10, 10), dtype="float32") = lv2 - R.output(gv) - return gv - - verify_model(Hardsigmoid(), input_info, {}, expected1) - verify_model(Hardsigmoid2(), input_info, {}, expected1) - - -def test_hardswish(): - input_info = [([1, 3, 10, 10], "float32")] - - class Hardswish(torch.nn.Module): - def __init__(self): - super().__init__() - self.hs = torch.nn.Hardswish() - - def forward(self, input): - return self.hs(input) - - class Hardswish2(torch.nn.Module): - def forward(self, input): - return torch.nn.functional.hardswish(input) - - @tvm.script.ir_module - class expected1: - @R.function - def main( - inp_0: R.Tensor((1, 3, 10, 10), dtype="float32") - ) -> R.Tensor((1, 3, 10, 10), dtype="float32"): - with R.dataflow(): - lv: R.Tensor((1, 3, 10, 10), dtype="float32") = R.add(inp_0, R.const(3, "float32")) - lv1: R.Tensor((1, 3, 10, 10), dtype="float32") = R.clip(lv, 0, 6) - lv2: R.Tensor((1, 3, 10, 10), dtype="float32") = R.divide( - lv1, R.const(6, "float32") - ) - lv3: R.Tensor((1, 3, 10, 10), dtype="float32") = R.multiply(inp_0, lv2) - gv: R.Tensor((1, 3, 10, 10), dtype="float32") = lv3 - R.output(gv) - return gv - - verify_model(Hardswish(), input_info, {}, expected1) - verify_model(Hardswish2(), input_info, {}, expected1) - - def test_groupnorm(): import torch from torch.nn import Module @@ -1671,70 +1506,6 @@ def main( verify_model(model, input_info, binding, expected1) -def test_softmax(): - input_info = [([1, 3, 10, 10], "float32")] - - class Softmax(Module): - def __init__(self): - super().__init__() - self.sm = torch.nn.Softmax(dim=1) - - def forward(self, input): - return self.sm(input) - - class Softmax2(Module): - def forward(self, input): - return torch.nn.functional.softmax(input, dim=1) - - @tvm.script.ir_module - class expected1: - @R.function - def main( - input_1: R.Tensor((1, 3, 10, 10), dtype="float32") - ) -> R.Tensor((1, 3, 10, 10), dtype="float32"): - # block 0 - with R.dataflow(): - lv: R.Tensor((1, 3, 10, 10), dtype="float32") = R.nn.softmax(input_1, axis=1) - gv: R.Tensor((1, 3, 10, 10), dtype="float32") = lv - R.output(gv) - return gv - - verify_model(Softmax(), input_info, {}, expected1) - verify_model(Softmax2(), input_info, {}, expected1) - - -def test_logsoftmax(): - input_info = [([1, 3, 10, 10], "float32")] - - class LogSoftmax(Module): - def __init__(self): - super().__init__() - self.lsm = torch.nn.LogSoftmax(dim=1) - - def forward(self, input): - return self.lsm(input) - - class LogSoftmax2(Module): - def forward(self, input): - return torch.nn.functional.log_softmax(input, dim=1) - - @tvm.script.ir_module - class expected1: - @R.function - def main( - input_1: R.Tensor((1, 3, 10, 10), dtype="float32") - ) -> R.Tensor((1, 3, 10, 10), dtype="float32"): - # block 0 - with R.dataflow(): - lv: R.Tensor((1, 3, 10, 10), dtype="float32") = R.nn.log_softmax(input_1, axis=1) - gv: R.Tensor((1, 3, 10, 10), dtype="float32") = lv - R.output(gv) - return gv - - verify_model(LogSoftmax(), input_info, {}, expected1) - verify_model(LogSoftmax2(), input_info, {}, expected1) - - def test_binary(): input_info1 = [([1, 3, 10, 10], "float32"), ([1, 3, 10, 10], "float32")] input_info2 = [([1, 3, 10, 10], "float32")] @@ -2186,65 +1957,63 @@ def main( def test_unary(): input_info = [([1, 3, 10, 10], "float32")] - # sin - class Sin(Module): + #abs + class Abs(Module): def forward(self, input): - return torch.sin(input) + return torch.abs(input) @tvm.script.ir_module - class expected_sin: + class expected_abs: @R.function def main( input_1: R.Tensor((1, 3, 10, 10), dtype="float32") ) -> R.Tensor((1, 3, 10, 10), dtype="float32"): - # block 0 with R.dataflow(): - lv: R.Tensor((1, 3, 10, 10), dtype="float32") = R.sin(input_1) + lv: R.Tensor((1, 3, 10, 10), dtype="float32") = R.abs(input_1) gv: R.Tensor((1, 3, 10, 10), dtype="float32") = lv R.output(gv) return gv + verify_model(Abs(), input_info, {}, expected_abs) - verify_model(Sin(), input_info, {}, expected_sin) - - # cos - class Cos(Module): + # acos + class Acos(Module): def forward(self, input): - return torch.cos(input) + return torch.acos(input) @tvm.script.ir_module - class expected_cos: + class expected_acos: @R.function def main( input_1: R.Tensor((1, 3, 10, 10), dtype="float32") ) -> R.Tensor((1, 3, 10, 10), dtype="float32"): # block 0 with R.dataflow(): - lv: R.Tensor((1, 3, 10, 10), dtype="float32") = R.cos(input_1) + lv: R.Tensor((1, 3, 10, 10), dtype="float32") = R.acos(input_1) gv: R.Tensor((1, 3, 10, 10), dtype="float32") = lv R.output(gv) return gv - verify_model(Cos(), input_info, {}, expected_cos) + verify_model(Acos(), input_info, {}, expected_acos) - # tan - class Tan(Module): + # acosh + class Acosh(Module): def forward(self, input): - return torch.tan(input) + return torch.acosh(input) @tvm.script.ir_module - class expected_tan: + class expected_acosh: @R.function def main( input_1: R.Tensor((1, 3, 10, 10), dtype="float32") ) -> R.Tensor((1, 3, 10, 10), dtype="float32"): # block 0 with R.dataflow(): - lv: R.Tensor((1, 3, 10, 10), dtype="float32") = R.tan(input_1) + lv: R.Tensor((1, 3, 10, 10), dtype="float32") = R.acosh(input_1) gv: R.Tensor((1, 3, 10, 10), dtype="float32") = lv R.output(gv) return gv - verify_model(Tan(), input_info, {}, expected_tan) + verify_model(Acosh(), input_info, {}, expected_acosh) # asin class Asin(Module): @@ -2266,25 +2035,25 @@ def main( verify_model(Asin(), input_info, {}, expected_asin) - # acos - class Acos(Module): + # asinh + class Asinh(Module): def forward(self, input): - return torch.acos(input) + return torch.asinh(input) @tvm.script.ir_module - class expected_acos: + class expected_asinh: @R.function def main( input_1: R.Tensor((1, 3, 10, 10), dtype="float32") ) -> R.Tensor((1, 3, 10, 10), dtype="float32"): # block 0 with R.dataflow(): - lv: R.Tensor((1, 3, 10, 10), dtype="float32") = R.acos(input_1) + lv: R.Tensor((1, 3, 10, 10), dtype="float32") = R.asinh(input_1) gv: R.Tensor((1, 3, 10, 10), dtype="float32") = lv R.output(gv) return gv - verify_model(Acos(), input_info, {}, expected_acos) + verify_model(Asinh(), input_info, {}, expected_asinh) # atan class Atan(Module): @@ -2306,25 +2075,107 @@ def main( verify_model(Atan(), input_info, {}, expected_atan) - # sinh - class Sinh(Module): - def forward(self, input): - return torch.sinh(input) + # atanh + class Atanh(Module): + def forward(self, input): + return torch.atanh(input) @tvm.script.ir_module - class expected_sinh: + class expected_atanh: @R.function def main( input_1: R.Tensor((1, 3, 10, 10), dtype="float32") ) -> R.Tensor((1, 3, 10, 10), dtype="float32"): # block 0 with R.dataflow(): - lv: R.Tensor((1, 3, 10, 10), dtype="float32") = R.sinh(input_1) + lv: R.Tensor((1, 3, 10, 10), dtype="float32") = R.atanh(input_1) gv: R.Tensor((1, 3, 10, 10), dtype="float32") = lv R.output(gv) return gv - verify_model(Sinh(), input_info, {}, expected_sinh) + verify_model(Atanh(), input_info, {}, expected_atanh) + + #ceil + class Ceil(Module): + def forward(self, input): + return torch.ceil(input) + + @tvm.script.ir_module + class expected_ceil: + @R.function + def main( + input_1: R.Tensor((1, 3, 10, 10), dtype="float32") + ) -> R.Tensor((1, 3, 10, 10), dtype="float32"): + with R.dataflow(): + lv: R.Tensor((1, 3, 10, 10), dtype="float32") = R.ceil(input_1) + gv: R.Tensor((1, 3, 10, 10), dtype="float32") = lv + R.output(gv) + return gv + verify_model(Ceil(), input_info, {}, expected_ceil) + + #clamp + class Clamp(Module): + def forward(self, input): + return torch.clamp(input, min=0.1, max=0.5) + + @tvm.script.ir_module + class expected_clamp: + @R.function + def main( + input_1: R.Tensor((1, 3, 10, 10), dtype="float32") + ) -> R.Tensor((1, 3, 10, 10), dtype="float32"): + # block 0 + with R.dataflow(): + lv: R.Tensor((1, 3, 10, 10), dtype="float32") = R.clip(input_1, 0.1, 0.5) + gv: R.Tensor((1, 3, 10, 10), dtype="float32") = lv + R.output(gv) + return gv + + verify_model(Clamp(), input_info, {}, expected_clamp) + + from tvm.relax.frontend.torch import from_fx + + with pytest.raises( + ValueError, match="TVM only supports constant max value for torch.clamp/clip" + ): + + class Clamp_Error(Module): + def forward(self, input): + return torch.clamp(input, min=0.5, max=None) + + gm = fx.symbolic_trace(Clamp_Error()) + from_fx(gm, input_info) + + with pytest.raises( + ValueError, match="TVM only supports constant min value for torch.clamp/clip" + ): + + class Clamp_Error(Module): + def forward(self, input): + return torch.clamp(input, min=input, max=input) + + gm = fx.symbolic_trace(Clamp_Error()) + from_fx(gm, input_info) + + # cos + class Cos(Module): + def forward(self, input): + return torch.cos(input) + + @tvm.script.ir_module + class expected_cos: + @R.function + def main( + input_1: R.Tensor((1, 3, 10, 10), dtype="float32") + ) -> R.Tensor((1, 3, 10, 10), dtype="float32"): + # block 0 + with R.dataflow(): + lv: R.Tensor((1, 3, 10, 10), dtype="float32") = R.cos(input_1) + gv: R.Tensor((1, 3, 10, 10), dtype="float32") = lv + R.output(gv) + return gv + + verify_model(Cos(), input_info, {}, expected_cos) # cosh class Cosh(Module): @@ -2346,211 +2197,468 @@ def main( verify_model(Cosh(), input_info, {}, expected_cosh) - # tanh - class Tanh(Module): + #dropout + class Dropout1(Module): + def __init__(self): + super().__init__() + self.dropout = torch.nn.Dropout(0.5) + def forward(self, input): - return torch.tanh(input) + return self.dropout(input) + + class Dropout2(Module): + def forward(self, input): + return torch.dropout(input, 0.5, train=True) @tvm.script.ir_module - class expected_tanh: + class expected_dropout: @R.function def main( input_1: R.Tensor((1, 3, 10, 10), dtype="float32") ) -> R.Tensor((1, 3, 10, 10), dtype="float32"): # block 0 with R.dataflow(): - lv: R.Tensor((1, 3, 10, 10), dtype="float32") = R.tanh(input_1) + gv: R.Tensor((1, 3, 10, 10), dtype="float32") = input_1 + R.output(gv) + return gv + + verify_model(Dropout1(), input_info, {}, expected_dropout) + verify_model(Dropout2(), input_info, {}, expected_dropout) + + # exp + class Exp(Module): + def forward(self, input): + return torch.exp(input) + + @tvm.script.ir_module + class expected_exp: + @R.function + def main( + input_1: R.Tensor((1, 3, 10, 10), dtype="float32") + ) -> R.Tensor((1, 3, 10, 10), dtype="float32"): + # block 0 + with R.dataflow(): + lv: R.Tensor((1, 3, 10, 10), dtype="float32") = R.exp(input_1) gv: R.Tensor((1, 3, 10, 10), dtype="float32") = lv R.output(gv) return gv - verify_model(Tanh(), input_info, {}, expected_tanh) + verify_model(Exp(), input_info, {}, expected_exp) - # asinh - class Asinh(Module): + #floor + class Floor(Module): def forward(self, input): - return torch.asinh(input) + return torch.floor(input) @tvm.script.ir_module - class expected_asinh: + class expected_floor: + @R.function + def main( + input_1: R.Tensor((1, 3, 10, 10), dtype="float32") + ) -> R.Tensor((1, 3, 10, 10), dtype="float32"): + with R.dataflow(): + lv: R.Tensor((1, 3, 10, 10), dtype="float32") = R.floor(input_1) + gv: R.Tensor((1, 3, 10, 10), dtype="float32") = lv + R.output(gv) + return gv + verify_model(Floor(), input_info, {}, expected_floor) + + #gelu + class Gelu(Module): + def __init__(self): + super().__init__() + self.gelu = torch.nn.GELU() + + def forward(self, input): + return self.gelu(input) + + class Gelu2(Module): + def forward(self, input): + return torch.nn.functional.gelu(input) + + @tvm.script.ir_module + class expected_gelu: @R.function def main( input_1: R.Tensor((1, 3, 10, 10), dtype="float32") ) -> R.Tensor((1, 3, 10, 10), dtype="float32"): # block 0 with R.dataflow(): - lv: R.Tensor((1, 3, 10, 10), dtype="float32") = R.asinh(input_1) + lv: R.Tensor((1, 3, 10, 10), dtype="float32") = R.nn.gelu(input_1) gv: R.Tensor((1, 3, 10, 10), dtype="float32") = lv R.output(gv) return gv - verify_model(Asinh(), input_info, {}, expected_asinh) + verify_model(Gelu(), input_info, {}, expected_gelu) + verify_model(Gelu2(), input_info, {}, expected_gelu) + + #hardsigmoid + class Hardsigmoid(torch.nn.Module): + def __init__(self): + super().__init__() + self.hs = torch.nn.Hardsigmoid() - # acosh - class Acosh(Module): def forward(self, input): - return torch.acosh(input) + return self.hs(input) + + class Hardsigmoid2(torch.nn.Module): + def forward(self, input): + return torch.nn.functional.hardsigmoid(input) @tvm.script.ir_module - class expected_acosh: + class expected_hardsigmoid: + @R.function + def main( + inp_0: R.Tensor((1, 3, 10, 10), dtype="float32") + ) -> R.Tensor((1, 3, 10, 10), dtype="float32"): + with R.dataflow(): + lv: R.Tensor((1, 3, 10, 10), dtype="float32") = R.add(inp_0, R.const(3, "float32")) + lv1: R.Tensor((1, 3, 10, 10), dtype="float32") = R.clip(lv, 0, 6) + lv2: R.Tensor((1, 3, 10, 10), dtype="float32") = R.divide( + lv1, R.const(6, "float32") + ) + gv: R.Tensor((1, 3, 10, 10), dtype="float32") = lv2 + R.output(gv) + return gv + + verify_model(Hardsigmoid(), input_info, {}, expected_hardsigmoid) + verify_model(Hardsigmoid2(), input_info, {}, expected_hardsigmoid) + + #hardswish + class Hardswish(torch.nn.Module): + def __init__(self): + super().__init__() + self.hs = torch.nn.Hardswish() + + def forward(self, input): + return self.hs(input) + + class Hardswish2(torch.nn.Module): + def forward(self, input): + return torch.nn.functional.hardswish(input) + + @tvm.script.ir_module + class expected_hardswish: + @R.function + def main( + inp_0: R.Tensor((1, 3, 10, 10), dtype="float32") + ) -> R.Tensor((1, 3, 10, 10), dtype="float32"): + with R.dataflow(): + lv: R.Tensor((1, 3, 10, 10), dtype="float32") = R.add(inp_0, R.const(3, "float32")) + lv1: R.Tensor((1, 3, 10, 10), dtype="float32") = R.clip(lv, 0, 6) + lv2: R.Tensor((1, 3, 10, 10), dtype="float32") = R.divide( + lv1, R.const(6, "float32") + ) + lv3: R.Tensor((1, 3, 10, 10), dtype="float32") = R.multiply(inp_0, lv2) + gv: R.Tensor((1, 3, 10, 10), dtype="float32") = lv3 + R.output(gv) + return gv + + verify_model(Hardswish(), input_info, {}, expected_hardswish) + verify_model(Hardswish2(), input_info, {}, expected_hardswish) + + #log + class Log(Module): + def forward(self, input): + return torch.log(input) + + @tvm.script.ir_module + class expected_log: @R.function def main( input_1: R.Tensor((1, 3, 10, 10), dtype="float32") + ) -> R.Tensor((1, 3, 10, 10), dtype="float32"): + with R.dataflow(): + lv: R.Tensor((1, 3, 10, 10), dtype="float32") = R.log(input_1) + gv: R.Tensor((1, 3, 10, 10), dtype="float32") = lv + R.output(gv) + return gv + verify_model(Log(), input_info, {}, expected_log) + + #log_softmax + class LogSoftmax(Module): + def __init__(self): + super().__init__() + self.lsm = torch.nn.LogSoftmax(dim=1) + + def forward(self, input): + return self.lsm(input) + + class LogSoftmax2(Module): + def forward(self, input): + return torch.nn.functional.log_softmax(input, dim=1) + + @tvm.script.ir_module + class expected_log_softmax: + @R.function + def main( + input_1: R.Tensor((1, 3, 10, 10), dtype="float32") ) -> R.Tensor((1, 3, 10, 10), dtype="float32"): # block 0 with R.dataflow(): - lv: R.Tensor((1, 3, 10, 10), dtype="float32") = R.acosh(input_1) + lv: R.Tensor((1, 3, 10, 10), dtype="float32") = R.nn.log_softmax(input_1, axis=1) gv: R.Tensor((1, 3, 10, 10), dtype="float32") = lv R.output(gv) return gv - verify_model(Acosh(), input_info, {}, expected_acosh) + verify_model(LogSoftmax(), input_info, {}, expected_log_softmax) + verify_model(LogSoftmax2(), input_info, {}, expected_log_softmax) - # atanh - class Atanh(Module): + #neg + class Neg(Module): def forward(self, input): - return torch.atanh(input) + return -input @tvm.script.ir_module - class expected_atanh: + class expected_neg: @R.function def main( input_1: R.Tensor((1, 3, 10, 10), dtype="float32") + ) -> R.Tensor((1, 3, 10, 10), dtype="float32"): + with R.dataflow(): + lv: R.Tensor((1, 3, 10, 10), dtype="float32") = R.negative(input_1) + gv: R.Tensor((1, 3, 10, 10), dtype="float32") = lv + R.output(gv) + return gv + + verify_model(Neg(), input_info, {}, expected_neg) + + #relu + class ReLU0(Module): + def __init__(self): + super().__init__() + self.relu = torch.nn.ReLU() + + def forward(self, input): + return self.relu(input) + + class ReLU1(Module): + def forward(self, input): + return torch.nn.functional.relu(input) + + @tvm.script.ir_module + class expected_relu: + @R.function + def main( + input_1: R.Tensor((1, 3, 10, 10), dtype="float32") ) -> R.Tensor((1, 3, 10, 10), dtype="float32"): # block 0 with R.dataflow(): - lv: R.Tensor((1, 3, 10, 10), dtype="float32") = R.atanh(input_1) + lv: R.Tensor((1, 3, 10, 10), dtype="float32") = R.nn.relu(input_1) gv: R.Tensor((1, 3, 10, 10), dtype="float32") = lv R.output(gv) return gv - verify_model(Atanh(), input_info, {}, expected_atanh) + verify_model(ReLU0(), input_info, {}, expected_relu) + verify_model(ReLU1(), input_info, {}, expected_relu) - # exp - class Exp(Module): + # round + class Round(Module): def forward(self, input): - return torch.exp(input) + return torch.round(input) @tvm.script.ir_module - class expected_exp: + class expected_round: @R.function def main( input_1: R.Tensor((1, 3, 10, 10), dtype="float32") ) -> R.Tensor((1, 3, 10, 10), dtype="float32"): # block 0 with R.dataflow(): - lv: R.Tensor((1, 3, 10, 10), dtype="float32") = R.exp(input_1) + lv: R.Tensor((1, 3, 10, 10), dtype="float32") = R.round(input_1) gv: R.Tensor((1, 3, 10, 10), dtype="float32") = lv R.output(gv) return gv - verify_model(Exp(), input_info, {}, expected_exp) + verify_model(Round(), input_info, {}, expected_round) - # sqrt - class Sqrt(Module): + #rsqrt + class Rsqrt(Module): def forward(self, input): - return torch.sqrt(input) + return torch.rsqrt(input) + + @tvm.script.ir_module + class expected_rsqrt: + @R.function + def main( + input_1: R.Tensor((1, 3, 10, 10), dtype="float32") + ) -> R.Tensor((1, 3, 10, 10), dtype="float32"): + with R.dataflow(): + lv: R.Tensor((1, 3, 10, 10), dtype="float32") = R.rsqrt(input_1) + gv: R.Tensor((1, 3, 10, 10), dtype="float32") = lv + R.output(gv) + return gv + + verify_model(Rsqrt(), input_info, {}, expected_rsqrt) + + # sigmoid + class Sigmoid(Module): + def __init__(self): + super().__init__() + self.sigmoid = torch.nn.Sigmoid() + + def forward(self, input): + return self.sigmoid(input) + + class Sigmoid2(Module): + def forward(self, input): + return torch.sigmoid(input) + + @tvm.script.ir_module + class expected_sigmoid: + @R.function + def main( + input_1: R.Tensor((1, 3, 10, 10), dtype="float32") + ) -> R.Tensor((1, 3, 10, 10), dtype="float32"): + # block 0 + with R.dataflow(): + lv: R.Tensor((1, 3, 10, 10), dtype="float32") = R.sigmoid(input_1) + gv: R.Tensor((1, 3, 10, 10), dtype="float32") = lv + R.output(gv) + return gv + + verify_model(Sigmoid(), input_info, {}, expected_sigmoid) + verify_model(Sigmoid2(), input_info, {}, expected_sigmoid) + + #selu + class SiLU(Module): + def __init__(self): + super().__init__() + self.silu = torch.nn.SiLU() + + def forward(self, input): + return self.silu(input) + + class SiLU2(Module): + def forward(self, input): + return torch.nn.functional.silu(input) + + @tvm.script.ir_module + class expected_silu: + @R.function + def main( + input_1: R.Tensor((1, 3, 10, 10), dtype="float32") + ) -> R.Tensor((1, 3, 10, 10), dtype="float32"): + # block 0 + with R.dataflow(): + lv: R.Tensor((1, 3, 10, 10), dtype="float32") = R.nn.silu(input_1) + gv: R.Tensor((1, 3, 10, 10), dtype="float32") = lv + R.output(gv) + return gv + + verify_model(SiLU(), input_info, {}, expected_silu) + verify_model(SiLU2(), input_info, {}, expected_silu) + + # sin + class Sin(Module): + def forward(self, input): + return torch.sin(input) + + @tvm.script.ir_module + class expected_sin: + @R.function + def main( + input_1: R.Tensor((1, 3, 10, 10), dtype="float32") + ) -> R.Tensor((1, 3, 10, 10), dtype="float32"): + # block 0 + with R.dataflow(): + lv: R.Tensor((1, 3, 10, 10), dtype="float32") = R.sin(input_1) + gv: R.Tensor((1, 3, 10, 10), dtype="float32") = lv + R.output(gv) + return gv + + verify_model(Sin(), input_info, {}, expected_sin) + + # sinh + class Sinh(Module): + def forward(self, input): + return torch.sinh(input) @tvm.script.ir_module - class expected3: + class expected_sinh: @R.function def main( input_1: R.Tensor((1, 3, 10, 10), dtype="float32") ) -> R.Tensor((1, 3, 10, 10), dtype="float32"): # block 0 with R.dataflow(): - lv: R.Tensor((1, 3, 10, 10), dtype="float32") = R.sqrt(input_1) + lv: R.Tensor((1, 3, 10, 10), dtype="float32") = R.sinh(input_1) gv: R.Tensor((1, 3, 10, 10), dtype="float32") = lv R.output(gv) return gv - verify_model(Sqrt(), input_info, {}, expected3) + verify_model(Sinh(), input_info, {}, expected_sinh) - # sigmoid - class Sigmoid(Module): + # softmax + class Softmax(Module): def __init__(self): super().__init__() - self.sigmoid = torch.nn.Sigmoid() + self.sm = torch.nn.Softmax(dim=1) def forward(self, input): - return self.sigmoid(input) + return self.sm(input) - class Sigmoid2(Module): + class Softmax2(Module): def forward(self, input): - return torch.sigmoid(input) + return torch.nn.functional.softmax(input, dim=1) @tvm.script.ir_module - class expected4: + class expected_softmax: @R.function def main( - input_1: R.Tensor((1, 3, 10, 10), dtype="float32") + input_1: R.Tensor((1, 3, 10, 10), dtype="float32") ) -> R.Tensor((1, 3, 10, 10), dtype="float32"): # block 0 with R.dataflow(): - lv: R.Tensor((1, 3, 10, 10), dtype="float32") = R.sigmoid(input_1) + lv: R.Tensor((1, 3, 10, 10), dtype="float32") = R.nn.softmax(input_1, axis=1) gv: R.Tensor((1, 3, 10, 10), dtype="float32") = lv R.output(gv) return gv - verify_model(Sigmoid(), input_info, {}, expected4) - verify_model(Sigmoid2(), input_info, {}, expected4) + verify_model(Softmax(), input_info, {}, expected_softmax) + verify_model(Softmax2(), input_info, {}, expected_softmax) - # round - class Round(Module): + # sqrt + class Sqrt(Module): def forward(self, input): - return torch.round(input) + return torch.sqrt(input) @tvm.script.ir_module - class expected5: + class expected_sqrt: @R.function def main( input_1: R.Tensor((1, 3, 10, 10), dtype="float32") ) -> R.Tensor((1, 3, 10, 10), dtype="float32"): # block 0 with R.dataflow(): - lv: R.Tensor((1, 3, 10, 10), dtype="float32") = R.round(input_1) + lv: R.Tensor((1, 3, 10, 10), dtype="float32") = R.sqrt(input_1) gv: R.Tensor((1, 3, 10, 10), dtype="float32") = lv R.output(gv) return gv - verify_model(Round(), input_info, {}, expected5) - - -def test_gelu(): - input_info = [([1, 3, 10, 10], "float32")] - - class Gelu(Module): - def __init__(self): - super().__init__() - self.gelu = torch.nn.GELU() - - def forward(self, input): - return self.gelu(input) + verify_model(Sqrt(), input_info, {}, expected_sqrt) - class Gelu2(Module): + # tan + class Tan(Module): def forward(self, input): - return torch.nn.functional.gelu(input) + return torch.tan(input) @tvm.script.ir_module - class expected1: + class expected_tan: @R.function def main( input_1: R.Tensor((1, 3, 10, 10), dtype="float32") ) -> R.Tensor((1, 3, 10, 10), dtype="float32"): # block 0 with R.dataflow(): - lv: R.Tensor((1, 3, 10, 10), dtype="float32") = R.nn.gelu(input_1) + lv: R.Tensor((1, 3, 10, 10), dtype="float32") = R.tan(input_1) gv: R.Tensor((1, 3, 10, 10), dtype="float32") = lv R.output(gv) return gv - verify_model(Gelu(), input_info, {}, expected1) - verify_model(Gelu2(), input_info, {}, expected1) - - -def test_tanh(): - input_info = [([1, 3, 10, 10], "float32")] + verify_model(Tan(), input_info, {}, expected_tan) + #tanh class Tanh(Module): def __init__(self): super().__init__() @@ -2564,7 +2672,7 @@ def forward(self, input): return torch.tanh(input) @tvm.script.ir_module - class expected1: + class expected_tanh: @R.function def main( input_1: R.Tensor((1, 3, 10, 10), dtype="float32") @@ -2576,55 +2684,58 @@ def main( R.output(gv) return gv - verify_model(Tanh(), input_info, {}, expected1) - verify_model(Tanh2(), input_info, {}, expected1) - + verify_model(Tanh(), input_info, {}, expected_tanh) + verify_model(Tanh2(), input_info, {}, expected_tanh) -def test_clamp(): - input_info = [([1, 3, 10, 10], "float32")] + #tril + class Tril(Module): + def forward(self, input): + return torch.tril(input, 1) - class Clamp(Module): + class InplaceTril(Module): def forward(self, input): - return torch.clamp(input, min=0.1, max=0.5) + input.tril_(1) + return input @tvm.script.ir_module - class expected1: + class expected_tril: @R.function def main( input_1: R.Tensor((1, 3, 10, 10), dtype="float32") ) -> R.Tensor((1, 3, 10, 10), dtype="float32"): - # block 0 with R.dataflow(): - lv: R.Tensor((1, 3, 10, 10), dtype="float32") = R.clip(input_1, 0.1, 0.5) + lv: R.Tensor((1, 3, 10, 10), dtype="float32") = R.tril(input_1, 1) gv: R.Tensor((1, 3, 10, 10), dtype="float32") = lv R.output(gv) return gv - verify_model(Clamp(), input_info, {}, expected1) - - from tvm.relax.frontend.torch import from_fx - - with pytest.raises( - ValueError, match="TVM only supports constant max value for torch.clamp/clip" - ): - - class Clamp_Error(Module): - def forward(self, input): - return torch.clamp(input, min=0.5, max=None) + verify_model(Tril(), input_info, {}, expected_tril) + verify_model(InplaceTril(), input_info, {}, expected_tril) - gm = fx.symbolic_trace(Clamp_Error()) - from_fx(gm, input_info) + #triu + class Triu(Module): + def forward(self, input): + return torch.triu(input, 1) - with pytest.raises( - ValueError, match="TVM only supports constant min value for torch.clamp/clip" - ): + class InplaceTriu(Module): + def forward(self, input): + input.triu_(1) + return input - class Clamp_Error(Module): - def forward(self, input): - return torch.clamp(input, min=input, max=input) + @tvm.script.ir_module + class expected_triu: + @R.function + def main( + input_1: R.Tensor((1, 3, 10, 10), dtype="float32") + ) -> R.Tensor((1, 3, 10, 10), dtype="float32"): + with R.dataflow(): + lv: R.Tensor((1, 3, 10, 10), dtype="float32") = R.triu(input_1, 1) + gv: R.Tensor((1, 3, 10, 10), dtype="float32") = lv + R.output(gv) + return gv - gm = fx.symbolic_trace(Clamp_Error()) - from_fx(gm, input_info) + verify_model(Triu(), input_info, {}, expected_triu) + verify_model(InplaceTriu(), input_info, {}, expected_triu) def test_interpolate(): @@ -3085,64 +3196,6 @@ def forward(self, input): assert mod2["main"].body.blocks[0].bindings[0].value.data.dtype == "int64" -def test_tril(): - input_info = [([10, 10], "float32")] - - class Tril(Module): - def forward(self, input): - return torch.tril(input, 1) - - class InplaceTril(Module): - def forward(self, input): - input.tril_(1) - return input - - @tvm.script.ir_module - class expected1: - @R.function - def main( - input_1: R.Tensor((10, 10), dtype="float32") - ) -> R.Tensor((10, 10), dtype="float32"): - # block 0 - with R.dataflow(): - lv: R.Tensor((10, 10), dtype="float32") = R.tril(input_1, 1) - gv: R.Tensor((10, 10), dtype="float32") = lv - R.output(gv) - return gv - - verify_model(Tril(), input_info, {}, expected1) - verify_model(InplaceTril(), input_info, {}, expected1) - - -def test_triu(): - input_info = [([10, 10], "float32")] - - class Triu(Module): - def forward(self, input): - return torch.triu(input, 1) - - class InplaceTriu(Module): - def forward(self, input): - input.triu_(1) - return input - - @tvm.script.ir_module - class expected1: - @R.function - def main( - input_1: R.Tensor((10, 10), dtype="float32") - ) -> R.Tensor((10, 10), dtype="float32"): - # block 0 - with R.dataflow(): - lv: R.Tensor((10, 10), dtype="float32") = R.triu(input_1, 1) - gv: R.Tensor((10, 10), dtype="float32") = lv - R.output(gv) - return gv - - verify_model(Triu(), input_info, {}, expected1) - verify_model(InplaceTriu(), input_info, {}, expected1) - - def test_new_ones(): input_info = [([1, 2, 3], "float32")] @@ -3706,27 +3759,6 @@ def main( verify_model(Mean(), [([256, 256], "float32")], {}, Expected1) verify_model(MeanKeepDim(), [([256, 256], "float32")], {}, Expected2) - -def test_rsqrt(): - class Rsqrt(Module): - def forward(self, input): - return torch.rsqrt(input) - - @I.ir_module - class Expected1: - @R.function - def main( - inp_0: R.Tensor((256, 256), dtype="float32") - ) -> R.Tensor((256, 256), dtype="float32"): - with R.dataflow(): - lv: R.Tensor((256, 256), dtype="float32") = R.rsqrt(inp_0) - gv: R.Tensor((256, 256), dtype="float32") = lv - R.output(gv) - return gv - - verify_model(Rsqrt(), [([256, 256], "float32")], {}, Expected1) - - def test_cat(): class Cat0(Module): def forward(self, x, y): @@ -3776,24 +3808,7 @@ def main( verify_model(Cat3(), [([2, 3], "float32"), ([2, 3], "float32")], {}, Expected1) -def test_neg(): - class Neg(Module): - def forward(self, input): - return -input - - @I.ir_module - class Expected1: - @R.function - def main( - inp_0: R.Tensor((256, 256), dtype="float32") - ) -> R.Tensor((256, 256), dtype="float32"): - with R.dataflow(): - lv: R.Tensor((256, 256), dtype="float32") = R.negative(inp_0) - gv: R.Tensor((256, 256), dtype="float32") = lv - R.output(gv) - return gv - verify_model(Neg(), [([256, 256], "float32")], {}, Expected1) def test_max(): @@ -4083,86 +4098,5 @@ def main( expected2, ) - -def test_abs(): - class Abs(Module): - def forward(self, input): - return torch.abs(input) - - @I.ir_module - class Expected1: - @R.function - def main( - inp_0: R.Tensor((256, 256), dtype="float32") - ) -> R.Tensor((256, 256), dtype="float32"): - with R.dataflow(): - lv: R.Tensor((256, 256), dtype="float32") = R.abs(inp_0) - gv: R.Tensor((256, 256), dtype="float32") = lv - R.output(gv) - return gv - - verify_model(Abs(), [([256, 256], "float32")], {}, Expected1) - - -def test_ceil(): - class Ceil(Module): - def forward(self, input): - return torch.ceil(input) - - @I.ir_module - class Expected1: - @R.function - def main( - inp_0: R.Tensor((256, 256), dtype="float32") - ) -> R.Tensor((256, 256), dtype="float32"): - with R.dataflow(): - lv: R.Tensor((256, 256), dtype="float32") = R.ceil(inp_0) - gv: R.Tensor((256, 256), dtype="float32") = lv - R.output(gv) - return gv - - verify_model(Ceil(), [([256, 256], "float32")], {}, Expected1) - - -def test_floor(): - class Floor(Module): - def forward(self, input): - return torch.floor(input) - - @I.ir_module - class Expected1: - @R.function - def main( - inp_0: R.Tensor((256, 256), dtype="float32") - ) -> R.Tensor((256, 256), dtype="float32"): - with R.dataflow(): - lv: R.Tensor((256, 256), dtype="float32") = R.floor(inp_0) - gv: R.Tensor((256, 256), dtype="float32") = lv - R.output(gv) - return gv - - verify_model(Floor(), [([256, 256], "float32")], {}, Expected1) - - -def test_log(): - class Log(Module): - def forward(self, input): - return torch.log(input) - - @I.ir_module - class Expected1: - @R.function - def main( - inp_0: R.Tensor((256, 256), dtype="float32") - ) -> R.Tensor((256, 256), dtype="float32"): - with R.dataflow(): - lv: R.Tensor((256, 256), dtype="float32") = R.log(inp_0) - gv: R.Tensor((256, 256), dtype="float32") = lv - R.output(gv) - return gv - - verify_model(Log(), [([256, 256], "float32")], {}, Expected1) - - if __name__ == "__main__": tvm.testing.main() From d0a50466b5eea04e3558b152daa54462bc402a3f Mon Sep 17 00:00:00 2001 From: Shushi Hong <820958424@qq.com> Date: Fri, 7 Feb 2025 13:18:52 +0800 Subject: [PATCH 04/13] Update test_frontend_from_fx.py --- tests/python/relax/test_frontend_from_fx.py | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/python/relax/test_frontend_from_fx.py b/tests/python/relax/test_frontend_from_fx.py index 04b437d006a2..67a29d5f4ea2 100644 --- a/tests/python/relax/test_frontend_from_fx.py +++ b/tests/python/relax/test_frontend_from_fx.py @@ -4098,5 +4098,6 @@ def main( expected2, ) + if __name__ == "__main__": tvm.testing.main() From 22fde7c0e3e27d1d71a259797f22c570d79d3e44 Mon Sep 17 00:00:00 2001 From: Shushi Hong <820958424@qq.com> Date: Fri, 7 Feb 2025 13:40:32 +0800 Subject: [PATCH 05/13] Update test_frontend_from_fx.py --- tests/python/relax/test_frontend_from_fx.py | 48 ++++++++++----------- 1 file changed, 24 insertions(+), 24 deletions(-) diff --git a/tests/python/relax/test_frontend_from_fx.py b/tests/python/relax/test_frontend_from_fx.py index 67a29d5f4ea2..6cb5bddf5b63 100644 --- a/tests/python/relax/test_frontend_from_fx.py +++ b/tests/python/relax/test_frontend_from_fx.py @@ -1957,7 +1957,7 @@ def main( def test_unary(): input_info = [([1, 3, 10, 10], "float32")] - #abs + # abs class Abs(Module): def forward(self, input): return torch.abs(input) @@ -1973,6 +1973,7 @@ def main( gv: R.Tensor((1, 3, 10, 10), dtype="float32") = lv R.output(gv) return gv + verify_model(Abs(), input_info, {}, expected_abs) # acos @@ -2095,7 +2096,7 @@ def main( verify_model(Atanh(), input_info, {}, expected_atanh) - #ceil + # ceil class Ceil(Module): def forward(self, input): return torch.ceil(input) @@ -2113,7 +2114,7 @@ def main( return gv verify_model(Ceil(), input_info, {}, expected_ceil) - #clamp + # clamp class Clamp(Module): def forward(self, input): return torch.clamp(input, min=0.1, max=0.5) @@ -2197,7 +2198,7 @@ def main( verify_model(Cosh(), input_info, {}, expected_cosh) - #dropout + # dropout class Dropout1(Module): def __init__(self): super().__init__() @@ -2245,7 +2246,7 @@ def main( verify_model(Exp(), input_info, {}, expected_exp) - #floor + # floor class Floor(Module): def forward(self, input): return torch.floor(input) @@ -2263,7 +2264,7 @@ def main( return gv verify_model(Floor(), input_info, {}, expected_floor) - #gelu + # gelu class Gelu(Module): def __init__(self): super().__init__() @@ -2292,7 +2293,7 @@ def main( verify_model(Gelu(), input_info, {}, expected_gelu) verify_model(Gelu2(), input_info, {}, expected_gelu) - #hardsigmoid + # hardsigmoid class Hardsigmoid(torch.nn.Module): def __init__(self): super().__init__() @@ -2324,7 +2325,7 @@ def main( verify_model(Hardsigmoid(), input_info, {}, expected_hardsigmoid) verify_model(Hardsigmoid2(), input_info, {}, expected_hardsigmoid) - #hardswish + # hardswish class Hardswish(torch.nn.Module): def __init__(self): super().__init__() @@ -2357,7 +2358,7 @@ def main( verify_model(Hardswish(), input_info, {}, expected_hardswish) verify_model(Hardswish2(), input_info, {}, expected_hardswish) - #log + # log class Log(Module): def forward(self, input): return torch.log(input) @@ -2373,9 +2374,10 @@ def main( gv: R.Tensor((1, 3, 10, 10), dtype="float32") = lv R.output(gv) return gv + verify_model(Log(), input_info, {}, expected_log) - #log_softmax + # log_softmax class LogSoftmax(Module): def __init__(self): super().__init__() @@ -2392,7 +2394,7 @@ def forward(self, input): class expected_log_softmax: @R.function def main( - input_1: R.Tensor((1, 3, 10, 10), dtype="float32") + input_1: R.Tensor((1, 3, 10, 10), dtype="float32") ) -> R.Tensor((1, 3, 10, 10), dtype="float32"): # block 0 with R.dataflow(): @@ -2404,7 +2406,7 @@ def main( verify_model(LogSoftmax(), input_info, {}, expected_log_softmax) verify_model(LogSoftmax2(), input_info, {}, expected_log_softmax) - #neg + # neg class Neg(Module): def forward(self, input): return -input @@ -2423,7 +2425,7 @@ def main( verify_model(Neg(), input_info, {}, expected_neg) - #relu + # relu class ReLU0(Module): def __init__(self): super().__init__() @@ -2440,7 +2442,7 @@ def forward(self, input): class expected_relu: @R.function def main( - input_1: R.Tensor((1, 3, 10, 10), dtype="float32") + input_1: R.Tensor((1, 3, 10, 10), dtype="float32") ) -> R.Tensor((1, 3, 10, 10), dtype="float32"): # block 0 with R.dataflow(): @@ -2472,7 +2474,7 @@ def main( verify_model(Round(), input_info, {}, expected_round) - #rsqrt + # rsqrt class Rsqrt(Module): def forward(self, input): return torch.rsqrt(input) @@ -2481,7 +2483,7 @@ def forward(self, input): class expected_rsqrt: @R.function def main( - input_1: R.Tensor((1, 3, 10, 10), dtype="float32") + input_1: R.Tensor((1, 3, 10, 10), dtype="float32") ) -> R.Tensor((1, 3, 10, 10), dtype="float32"): with R.dataflow(): lv: R.Tensor((1, 3, 10, 10), dtype="float32") = R.rsqrt(input_1) @@ -2520,7 +2522,7 @@ def main( verify_model(Sigmoid(), input_info, {}, expected_sigmoid) verify_model(Sigmoid2(), input_info, {}, expected_sigmoid) - #selu + # silu class SiLU(Module): def __init__(self): super().__init__() @@ -2606,7 +2608,7 @@ def forward(self, input): class expected_softmax: @R.function def main( - input_1: R.Tensor((1, 3, 10, 10), dtype="float32") + input_1: R.Tensor((1, 3, 10, 10), dtype="float32") ) -> R.Tensor((1, 3, 10, 10), dtype="float32"): # block 0 with R.dataflow(): @@ -2658,7 +2660,7 @@ def main( verify_model(Tan(), input_info, {}, expected_tan) - #tanh + # tanh class Tanh(Module): def __init__(self): super().__init__() @@ -2687,7 +2689,7 @@ def main( verify_model(Tanh(), input_info, {}, expected_tanh) verify_model(Tanh2(), input_info, {}, expected_tanh) - #tril + # tril class Tril(Module): def forward(self, input): return torch.tril(input, 1) @@ -2712,7 +2714,7 @@ def main( verify_model(Tril(), input_info, {}, expected_tril) verify_model(InplaceTril(), input_info, {}, expected_tril) - #triu + # triu class Triu(Module): def forward(self, input): return torch.triu(input, 1) @@ -3759,6 +3761,7 @@ def main( verify_model(Mean(), [([256, 256], "float32")], {}, Expected1) verify_model(MeanKeepDim(), [([256, 256], "float32")], {}, Expected2) + def test_cat(): class Cat0(Module): def forward(self, x, y): @@ -3808,9 +3811,6 @@ def main( verify_model(Cat3(), [([2, 3], "float32"), ([2, 3], "float32")], {}, Expected1) - - - def test_max(): class Max(Module): def forward(self, x, y): From d4fbe6b373d846e764eb30d3a5a08c9d3c525098 Mon Sep 17 00:00:00 2001 From: Shushi Hong <820958424@qq.com> Date: Fri, 7 Feb 2025 14:04:06 +0800 Subject: [PATCH 06/13] Update test_frontend_from_fx.py --- tests/python/relax/test_frontend_from_fx.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tests/python/relax/test_frontend_from_fx.py b/tests/python/relax/test_frontend_from_fx.py index 6cb5bddf5b63..a86baa6fc542 100644 --- a/tests/python/relax/test_frontend_from_fx.py +++ b/tests/python/relax/test_frontend_from_fx.py @@ -2112,6 +2112,7 @@ def main( gv: R.Tensor((1, 3, 10, 10), dtype="float32") = lv R.output(gv) return gv + verify_model(Ceil(), input_info, {}, expected_ceil) # clamp @@ -2262,6 +2263,7 @@ def main( gv: R.Tensor((1, 3, 10, 10), dtype="float32") = lv R.output(gv) return gv + verify_model(Floor(), input_info, {}, expected_floor) # gelu From f2a2ab6d33b3390d79cdc8d0c7b9d4ecc7a1e625 Mon Sep 17 00:00:00 2001 From: Shushi Hong <820958424@qq.com> Date: Fri, 14 Feb 2025 18:02:40 +0800 Subject: [PATCH 07/13] Update erf ops --- python/tvm/relax/frontend/torch/fx_translator.py | 1 + 1 file changed, 1 insertion(+) diff --git a/python/tvm/relax/frontend/torch/fx_translator.py b/python/tvm/relax/frontend/torch/fx_translator.py index d6685d8ce9bd..ce1f284be6bc 100644 --- a/python/tvm/relax/frontend/torch/fx_translator.py +++ b/python/tvm/relax/frontend/torch/fx_translator.py @@ -628,6 +628,7 @@ def create_convert_map( "cos": self._unary_op(relax.op.cos), "cosh": self._unary_op(relax.op.cosh), "dropout": lambda node: self.env[node.args[0]], + "erf": self._unary_op(relax.op.erf), "exp": self._unary_op(relax.op.exp), "floor": self._unary_op(relax.op.floor), "gelu": self._gelu, From 5ba2e0be858683e40eb4e8d3947806e28d470a5b Mon Sep 17 00:00:00 2001 From: Shushi Hong <820958424@qq.com> Date: Fri, 14 Feb 2025 18:04:38 +0800 Subject: [PATCH 08/13] Update template for unary ops --- tests/python/relax/test_frontend_from_fx.py | 447 ++------------------ 1 file changed, 38 insertions(+), 409 deletions(-) diff --git a/tests/python/relax/test_frontend_from_fx.py b/tests/python/relax/test_frontend_from_fx.py index a86baa6fc542..65c2ef0c1e8f 100644 --- a/tests/python/relax/test_frontend_from_fx.py +++ b/tests/python/relax/test_frontend_from_fx.py @@ -1954,166 +1954,57 @@ def main( verify_model(Slice2(), [([8, 16], "float32")], {}, expected2) -def test_unary(): +operator_basic_unary = [ + (torch.abs, R.abs, "abs"), + (torch.acos, R.acos, "acos"), + (torch.acosh, R.acosh, "acosh"), + (torch.asin, R.asin, "asin"), + (torch.asinh, R.asinh, "asinh"), + (torch.atan, R.atan, "atan"), + (torch.atanh, R.atanh, "atanh"), + (torch.ceil, R.ceil, "ceil"), + (torch.cos, R.cos, "cos"), + (torch.cosh, R.cosh, "cosh"), + (torch.erf, R.erf, "erf"), + (torch.exp, R.exp, "exp"), + (torch.floor, R.floor, "floor"), + (torch.log, R.log, "log"), + (torch.neg, R.negative, "neg"), + (torch.round, R.round, "round"), + (torch.rsqrt, R.rsqrt, "rsqrt"), + (torch.sin, R.sin, "sin"), + (torch.sinh, R.sinh, "sinh"), + (torch.sqrt, R.sqrt, "sqrt"), + (torch.tan, R.tan, "tan"), + (torch.tanh, R.tanh, "tanh"), +] + + +@pytest.mark.parametrize("pytorch_op, relax_op, test_name", operator_basic_unary) +def test_basic_unary_ops(pytorch_op, relax_op, test_name): input_info = [([1, 3, 10, 10], "float32")] - # abs - class Abs(Module): + class Unary(Module): def forward(self, input): - return torch.abs(input) + return pytorch_op(input) @tvm.script.ir_module - class expected_abs: + class expected_unary: @R.function def main( input_1: R.Tensor((1, 3, 10, 10), dtype="float32") ) -> R.Tensor((1, 3, 10, 10), dtype="float32"): with R.dataflow(): - lv: R.Tensor((1, 3, 10, 10), dtype="float32") = R.abs(input_1) + lv: R.Tensor((1, 3, 10, 10), dtype="float32") = relax_op(input_1) gv: R.Tensor((1, 3, 10, 10), dtype="float32") = lv R.output(gv) return gv - verify_model(Abs(), input_info, {}, expected_abs) + verify_model(Unary(), input_info, {}, expected_unary) - # acos - class Acos(Module): - def forward(self, input): - return torch.acos(input) - - @tvm.script.ir_module - class expected_acos: - @R.function - def main( - input_1: R.Tensor((1, 3, 10, 10), dtype="float32") - ) -> R.Tensor((1, 3, 10, 10), dtype="float32"): - # block 0 - with R.dataflow(): - lv: R.Tensor((1, 3, 10, 10), dtype="float32") = R.acos(input_1) - gv: R.Tensor((1, 3, 10, 10), dtype="float32") = lv - R.output(gv) - return gv - - verify_model(Acos(), input_info, {}, expected_acos) - - # acosh - class Acosh(Module): - def forward(self, input): - return torch.acosh(input) - - @tvm.script.ir_module - class expected_acosh: - @R.function - def main( - input_1: R.Tensor((1, 3, 10, 10), dtype="float32") - ) -> R.Tensor((1, 3, 10, 10), dtype="float32"): - # block 0 - with R.dataflow(): - lv: R.Tensor((1, 3, 10, 10), dtype="float32") = R.acosh(input_1) - gv: R.Tensor((1, 3, 10, 10), dtype="float32") = lv - R.output(gv) - return gv - - verify_model(Acosh(), input_info, {}, expected_acosh) - - # asin - class Asin(Module): - def forward(self, input): - return torch.asin(input) - - @tvm.script.ir_module - class expected_asin: - @R.function - def main( - input_1: R.Tensor((1, 3, 10, 10), dtype="float32") - ) -> R.Tensor((1, 3, 10, 10), dtype="float32"): - # block 0 - with R.dataflow(): - lv: R.Tensor((1, 3, 10, 10), dtype="float32") = R.asin(input_1) - gv: R.Tensor((1, 3, 10, 10), dtype="float32") = lv - R.output(gv) - return gv - - verify_model(Asin(), input_info, {}, expected_asin) - - # asinh - class Asinh(Module): - def forward(self, input): - return torch.asinh(input) - - @tvm.script.ir_module - class expected_asinh: - @R.function - def main( - input_1: R.Tensor((1, 3, 10, 10), dtype="float32") - ) -> R.Tensor((1, 3, 10, 10), dtype="float32"): - # block 0 - with R.dataflow(): - lv: R.Tensor((1, 3, 10, 10), dtype="float32") = R.asinh(input_1) - gv: R.Tensor((1, 3, 10, 10), dtype="float32") = lv - R.output(gv) - return gv - - verify_model(Asinh(), input_info, {}, expected_asinh) - - # atan - class Atan(Module): - def forward(self, input): - return torch.atan(input) - - @tvm.script.ir_module - class expected_atan: - @R.function - def main( - input_1: R.Tensor((1, 3, 10, 10), dtype="float32") - ) -> R.Tensor((1, 3, 10, 10), dtype="float32"): - # block 0 - with R.dataflow(): - lv: R.Tensor((1, 3, 10, 10), dtype="float32") = R.atan(input_1) - gv: R.Tensor((1, 3, 10, 10), dtype="float32") = lv - R.output(gv) - return gv - - verify_model(Atan(), input_info, {}, expected_atan) - - # atanh - class Atanh(Module): - def forward(self, input): - return torch.atanh(input) - - @tvm.script.ir_module - class expected_atanh: - @R.function - def main( - input_1: R.Tensor((1, 3, 10, 10), dtype="float32") - ) -> R.Tensor((1, 3, 10, 10), dtype="float32"): - # block 0 - with R.dataflow(): - lv: R.Tensor((1, 3, 10, 10), dtype="float32") = R.atanh(input_1) - gv: R.Tensor((1, 3, 10, 10), dtype="float32") = lv - R.output(gv) - return gv - verify_model(Atanh(), input_info, {}, expected_atanh) - - # ceil - class Ceil(Module): - def forward(self, input): - return torch.ceil(input) - - @tvm.script.ir_module - class expected_ceil: - @R.function - def main( - input_1: R.Tensor((1, 3, 10, 10), dtype="float32") - ) -> R.Tensor((1, 3, 10, 10), dtype="float32"): - with R.dataflow(): - lv: R.Tensor((1, 3, 10, 10), dtype="float32") = R.ceil(input_1) - gv: R.Tensor((1, 3, 10, 10), dtype="float32") = lv - R.output(gv) - return gv - - verify_model(Ceil(), input_info, {}, expected_ceil) +def test_extended_unary_ops(): + input_info = [([1, 3, 10, 10], "float32")] # clamp class Clamp(Module): @@ -2159,46 +2050,6 @@ def forward(self, input): gm = fx.symbolic_trace(Clamp_Error()) from_fx(gm, input_info) - # cos - class Cos(Module): - def forward(self, input): - return torch.cos(input) - - @tvm.script.ir_module - class expected_cos: - @R.function - def main( - input_1: R.Tensor((1, 3, 10, 10), dtype="float32") - ) -> R.Tensor((1, 3, 10, 10), dtype="float32"): - # block 0 - with R.dataflow(): - lv: R.Tensor((1, 3, 10, 10), dtype="float32") = R.cos(input_1) - gv: R.Tensor((1, 3, 10, 10), dtype="float32") = lv - R.output(gv) - return gv - - verify_model(Cos(), input_info, {}, expected_cos) - - # cosh - class Cosh(Module): - def forward(self, input): - return torch.cosh(input) - - @tvm.script.ir_module - class expected_cosh: - @R.function - def main( - input_1: R.Tensor((1, 3, 10, 10), dtype="float32") - ) -> R.Tensor((1, 3, 10, 10), dtype="float32"): - # block 0 - with R.dataflow(): - lv: R.Tensor((1, 3, 10, 10), dtype="float32") = R.cosh(input_1) - gv: R.Tensor((1, 3, 10, 10), dtype="float32") = lv - R.output(gv) - return gv - - verify_model(Cosh(), input_info, {}, expected_cosh) - # dropout class Dropout1(Module): def __init__(self): @@ -2227,45 +2078,6 @@ def main( verify_model(Dropout1(), input_info, {}, expected_dropout) verify_model(Dropout2(), input_info, {}, expected_dropout) - # exp - class Exp(Module): - def forward(self, input): - return torch.exp(input) - - @tvm.script.ir_module - class expected_exp: - @R.function - def main( - input_1: R.Tensor((1, 3, 10, 10), dtype="float32") - ) -> R.Tensor((1, 3, 10, 10), dtype="float32"): - # block 0 - with R.dataflow(): - lv: R.Tensor((1, 3, 10, 10), dtype="float32") = R.exp(input_1) - gv: R.Tensor((1, 3, 10, 10), dtype="float32") = lv - R.output(gv) - return gv - - verify_model(Exp(), input_info, {}, expected_exp) - - # floor - class Floor(Module): - def forward(self, input): - return torch.floor(input) - - @tvm.script.ir_module - class expected_floor: - @R.function - def main( - input_1: R.Tensor((1, 3, 10, 10), dtype="float32") - ) -> R.Tensor((1, 3, 10, 10), dtype="float32"): - with R.dataflow(): - lv: R.Tensor((1, 3, 10, 10), dtype="float32") = R.floor(input_1) - gv: R.Tensor((1, 3, 10, 10), dtype="float32") = lv - R.output(gv) - return gv - - verify_model(Floor(), input_info, {}, expected_floor) - # gelu class Gelu(Module): def __init__(self): @@ -2295,6 +2107,9 @@ def main( verify_model(Gelu(), input_info, {}, expected_gelu) verify_model(Gelu2(), input_info, {}, expected_gelu) + # leaky_relu + test_leakyrelu() + # hardsigmoid class Hardsigmoid(torch.nn.Module): def __init__(self): @@ -2360,25 +2175,6 @@ def main( verify_model(Hardswish(), input_info, {}, expected_hardswish) verify_model(Hardswish2(), input_info, {}, expected_hardswish) - # log - class Log(Module): - def forward(self, input): - return torch.log(input) - - @tvm.script.ir_module - class expected_log: - @R.function - def main( - input_1: R.Tensor((1, 3, 10, 10), dtype="float32") - ) -> R.Tensor((1, 3, 10, 10), dtype="float32"): - with R.dataflow(): - lv: R.Tensor((1, 3, 10, 10), dtype="float32") = R.log(input_1) - gv: R.Tensor((1, 3, 10, 10), dtype="float32") = lv - R.output(gv) - return gv - - verify_model(Log(), input_info, {}, expected_log) - # log_softmax class LogSoftmax(Module): def __init__(self): @@ -2408,25 +2204,6 @@ def main( verify_model(LogSoftmax(), input_info, {}, expected_log_softmax) verify_model(LogSoftmax2(), input_info, {}, expected_log_softmax) - # neg - class Neg(Module): - def forward(self, input): - return -input - - @tvm.script.ir_module - class expected_neg: - @R.function - def main( - input_1: R.Tensor((1, 3, 10, 10), dtype="float32") - ) -> R.Tensor((1, 3, 10, 10), dtype="float32"): - with R.dataflow(): - lv: R.Tensor((1, 3, 10, 10), dtype="float32") = R.negative(input_1) - gv: R.Tensor((1, 3, 10, 10), dtype="float32") = lv - R.output(gv) - return gv - - verify_model(Neg(), input_info, {}, expected_neg) - # relu class ReLU0(Module): def __init__(self): @@ -2456,45 +2233,6 @@ def main( verify_model(ReLU0(), input_info, {}, expected_relu) verify_model(ReLU1(), input_info, {}, expected_relu) - # round - class Round(Module): - def forward(self, input): - return torch.round(input) - - @tvm.script.ir_module - class expected_round: - @R.function - def main( - input_1: R.Tensor((1, 3, 10, 10), dtype="float32") - ) -> R.Tensor((1, 3, 10, 10), dtype="float32"): - # block 0 - with R.dataflow(): - lv: R.Tensor((1, 3, 10, 10), dtype="float32") = R.round(input_1) - gv: R.Tensor((1, 3, 10, 10), dtype="float32") = lv - R.output(gv) - return gv - - verify_model(Round(), input_info, {}, expected_round) - - # rsqrt - class Rsqrt(Module): - def forward(self, input): - return torch.rsqrt(input) - - @tvm.script.ir_module - class expected_rsqrt: - @R.function - def main( - input_1: R.Tensor((1, 3, 10, 10), dtype="float32") - ) -> R.Tensor((1, 3, 10, 10), dtype="float32"): - with R.dataflow(): - lv: R.Tensor((1, 3, 10, 10), dtype="float32") = R.rsqrt(input_1) - gv: R.Tensor((1, 3, 10, 10), dtype="float32") = lv - R.output(gv) - return gv - - verify_model(Rsqrt(), input_info, {}, expected_rsqrt) - # sigmoid class Sigmoid(Module): def __init__(self): @@ -2553,46 +2291,6 @@ def main( verify_model(SiLU(), input_info, {}, expected_silu) verify_model(SiLU2(), input_info, {}, expected_silu) - # sin - class Sin(Module): - def forward(self, input): - return torch.sin(input) - - @tvm.script.ir_module - class expected_sin: - @R.function - def main( - input_1: R.Tensor((1, 3, 10, 10), dtype="float32") - ) -> R.Tensor((1, 3, 10, 10), dtype="float32"): - # block 0 - with R.dataflow(): - lv: R.Tensor((1, 3, 10, 10), dtype="float32") = R.sin(input_1) - gv: R.Tensor((1, 3, 10, 10), dtype="float32") = lv - R.output(gv) - return gv - - verify_model(Sin(), input_info, {}, expected_sin) - - # sinh - class Sinh(Module): - def forward(self, input): - return torch.sinh(input) - - @tvm.script.ir_module - class expected_sinh: - @R.function - def main( - input_1: R.Tensor((1, 3, 10, 10), dtype="float32") - ) -> R.Tensor((1, 3, 10, 10), dtype="float32"): - # block 0 - with R.dataflow(): - lv: R.Tensor((1, 3, 10, 10), dtype="float32") = R.sinh(input_1) - gv: R.Tensor((1, 3, 10, 10), dtype="float32") = lv - R.output(gv) - return gv - - verify_model(Sinh(), input_info, {}, expected_sinh) - # softmax class Softmax(Module): def __init__(self): @@ -2622,75 +2320,6 @@ def main( verify_model(Softmax(), input_info, {}, expected_softmax) verify_model(Softmax2(), input_info, {}, expected_softmax) - # sqrt - class Sqrt(Module): - def forward(self, input): - return torch.sqrt(input) - - @tvm.script.ir_module - class expected_sqrt: - @R.function - def main( - input_1: R.Tensor((1, 3, 10, 10), dtype="float32") - ) -> R.Tensor((1, 3, 10, 10), dtype="float32"): - # block 0 - with R.dataflow(): - lv: R.Tensor((1, 3, 10, 10), dtype="float32") = R.sqrt(input_1) - gv: R.Tensor((1, 3, 10, 10), dtype="float32") = lv - R.output(gv) - return gv - - verify_model(Sqrt(), input_info, {}, expected_sqrt) - - # tan - class Tan(Module): - def forward(self, input): - return torch.tan(input) - - @tvm.script.ir_module - class expected_tan: - @R.function - def main( - input_1: R.Tensor((1, 3, 10, 10), dtype="float32") - ) -> R.Tensor((1, 3, 10, 10), dtype="float32"): - # block 0 - with R.dataflow(): - lv: R.Tensor((1, 3, 10, 10), dtype="float32") = R.tan(input_1) - gv: R.Tensor((1, 3, 10, 10), dtype="float32") = lv - R.output(gv) - return gv - - verify_model(Tan(), input_info, {}, expected_tan) - - # tanh - class Tanh(Module): - def __init__(self): - super().__init__() - self.tanh = torch.nn.Tanh() - - def forward(self, input): - return self.tanh(input) - - class Tanh2(Module): - def forward(self, input): - return torch.tanh(input) - - @tvm.script.ir_module - class expected_tanh: - @R.function - def main( - input_1: R.Tensor((1, 3, 10, 10), dtype="float32") - ) -> R.Tensor((1, 3, 10, 10), dtype="float32"): - # block 0 - with R.dataflow(): - lv: R.Tensor((1, 3, 10, 10), dtype="float32") = R.tanh(input_1) - gv: R.Tensor((1, 3, 10, 10), dtype="float32") = lv - R.output(gv) - return gv - - verify_model(Tanh(), input_info, {}, expected_tanh) - verify_model(Tanh2(), input_info, {}, expected_tanh) - # tril class Tril(Module): def forward(self, input): From 8a389471792e21424c7ae2b1ac513176c2260a1f Mon Sep 17 00:00:00 2001 From: Shushi Hong <820958424@qq.com> Date: Fri, 14 Feb 2025 18:31:03 +0800 Subject: [PATCH 09/13] Update test_frontend_from_fx.py --- tests/python/relax/test_frontend_from_fx.py | 77 ++++++++++++++------- 1 file changed, 52 insertions(+), 25 deletions(-) diff --git a/tests/python/relax/test_frontend_from_fx.py b/tests/python/relax/test_frontend_from_fx.py index 65c2ef0c1e8f..cc4b1e766e23 100644 --- a/tests/python/relax/test_frontend_from_fx.py +++ b/tests/python/relax/test_frontend_from_fx.py @@ -783,30 +783,6 @@ def main( verify_model(LeakyReLU1(), input_info, {}, expected) -def test_relu6(): - class ReLU6(Module): - def __init__(self): - super().__init__() - self.relu6 = torch.nn.ReLU6() - - def forward(self, input): - return self.relu6(input) - - @tvm.script.ir_module - class expected: - @R.function - def main(input: R.Tensor((10, 10), dtype="float32")) -> R.Tensor((10, 10), dtype="float32"): - # block 0 - with R.dataflow(): - lv: R.Tensor((10, 10), dtype="float32") = R.clip(input, 0, 6) - gv: R.Tensor((10, 10), dtype="float32") = lv - R.output(gv) - return gv - - input_info = [([10, 10], "float32")] - verify_model(ReLU6(), input_info, {}, expected) - - def test_maxpool2d(): input_info = [([1, 3, 10, 10], "float32")] @@ -1976,7 +1952,6 @@ def main( (torch.sinh, R.sinh, "sinh"), (torch.sqrt, R.sqrt, "sqrt"), (torch.tan, R.tan, "tan"), - (torch.tanh, R.tanh, "tanh"), ] @@ -2233,6 +2208,29 @@ def main( verify_model(ReLU0(), input_info, {}, expected_relu) verify_model(ReLU1(), input_info, {}, expected_relu) + # relu6 + class ReLU6(Module): + def __init__(self): + super().__init__() + self.relu6 = torch.nn.ReLU6() + + def forward(self, input): + return self.relu6(input) + + @tvm.script.ir_module + class expected_relu6: + @R.function + def main( + input_1: R.Tensor((1, 3, 10, 10), dtype="float32") + ) -> R.Tensor((1, 3, 10, 10), dtype="float32"): + with R.dataflow(): + lv: R.Tensor((1, 3, 10, 10), dtype="float32") = R.clip(input_1, 0, 6) + gv: R.Tensor((1, 3, 10, 10), dtype="float32") = lv + R.output(gv) + return gv + + verify_model(ReLU6(), input_info, {}, expected_relu6) + # sigmoid class Sigmoid(Module): def __init__(self): @@ -2320,6 +2318,35 @@ def main( verify_model(Softmax(), input_info, {}, expected_softmax) verify_model(Softmax2(), input_info, {}, expected_softmax) + # tanh + class Tanh(Module): + def __init__(self): + super().__init__() + self.tanh = torch.nn.Tanh() + + def forward(self, input): + return self.tanh(input) + + class Tanh2(Module): + def forward(self, input): + return torch.tanh(input) + + @tvm.script.ir_module + class expected_tanh: + @R.function + def main( + input_1: R.Tensor((1, 3, 10, 10), dtype="float32") + ) -> R.Tensor((1, 3, 10, 10), dtype="float32"): + # block 0 + with R.dataflow(): + lv: R.Tensor((1, 3, 10, 10), dtype="float32") = R.tanh(input_1) + gv: R.Tensor((1, 3, 10, 10), dtype="float32") = lv + R.output(gv) + return gv + + verify_model(Tanh(), input_info, {}, expected_tanh) + verify_model(Tanh2(), input_info, {}, expected_tanh) + # tril class Tril(Module): def forward(self, input): From 6a6304dd00ca9e1c25af6b07a4fb97f0782e6329 Mon Sep 17 00:00:00 2001 From: Shushi Hong <820958424@qq.com> Date: Fri, 14 Feb 2025 23:05:45 +0800 Subject: [PATCH 10/13] delete test_name --- tests/python/relax/test_frontend_from_fx.py | 46 ++++++++++----------- 1 file changed, 23 insertions(+), 23 deletions(-) diff --git a/tests/python/relax/test_frontend_from_fx.py b/tests/python/relax/test_frontend_from_fx.py index cc4b1e766e23..c4fc5ae17ee9 100644 --- a/tests/python/relax/test_frontend_from_fx.py +++ b/tests/python/relax/test_frontend_from_fx.py @@ -1931,32 +1931,32 @@ def main( operator_basic_unary = [ - (torch.abs, R.abs, "abs"), - (torch.acos, R.acos, "acos"), - (torch.acosh, R.acosh, "acosh"), - (torch.asin, R.asin, "asin"), - (torch.asinh, R.asinh, "asinh"), - (torch.atan, R.atan, "atan"), - (torch.atanh, R.atanh, "atanh"), - (torch.ceil, R.ceil, "ceil"), - (torch.cos, R.cos, "cos"), - (torch.cosh, R.cosh, "cosh"), - (torch.erf, R.erf, "erf"), - (torch.exp, R.exp, "exp"), - (torch.floor, R.floor, "floor"), - (torch.log, R.log, "log"), - (torch.neg, R.negative, "neg"), - (torch.round, R.round, "round"), - (torch.rsqrt, R.rsqrt, "rsqrt"), - (torch.sin, R.sin, "sin"), - (torch.sinh, R.sinh, "sinh"), - (torch.sqrt, R.sqrt, "sqrt"), - (torch.tan, R.tan, "tan"), + (torch.abs, R.abs), + (torch.acos, R.acos), + (torch.acosh, R.acosh), + (torch.asin, R.asin), + (torch.asinh, R.asinh), + (torch.atan, R.atan), + (torch.atanh, R.atanh), + (torch.ceil, R.ceil), + (torch.cos, R.cos), + (torch.cosh, R.cosh), + (torch.erf, R.erf), + (torch.exp, R.exp,), + (torch.floor, R.floor), + (torch.log, R.log), + (torch.neg, R.negative), + (torch.round, R.round), + (torch.rsqrt, R.rsqrt), + (torch.sin, R.sin), + (torch.sinh, R.sinh), + (torch.sqrt, R.sqrt), + (torch.tan, R.tan), ] -@pytest.mark.parametrize("pytorch_op, relax_op, test_name", operator_basic_unary) -def test_basic_unary_ops(pytorch_op, relax_op, test_name): +@pytest.mark.parametrize("pytorch_op, relax_op", operator_basic_unary) +def test_basic_unary_ops(pytorch_op, relax_op): input_info = [([1, 3, 10, 10], "float32")] class Unary(Module): From 49d75a57345c15b14703142d364a9547c198b486 Mon Sep 17 00:00:00 2001 From: Shushi Hong <820958424@qq.com> Date: Fri, 14 Feb 2025 23:35:22 +0800 Subject: [PATCH 11/13] Update test_frontend_from_fx.py --- tests/python/relax/test_frontend_from_fx.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/python/relax/test_frontend_from_fx.py b/tests/python/relax/test_frontend_from_fx.py index c4fc5ae17ee9..3c932f86c582 100644 --- a/tests/python/relax/test_frontend_from_fx.py +++ b/tests/python/relax/test_frontend_from_fx.py @@ -1942,7 +1942,7 @@ def main( (torch.cos, R.cos), (torch.cosh, R.cosh), (torch.erf, R.erf), - (torch.exp, R.exp,), + (torch.exp, R.exp), (torch.floor, R.floor), (torch.log, R.log), (torch.neg, R.negative), From 219c2bf13993c59aca6284e8d57998066b6646e0 Mon Sep 17 00:00:00 2001 From: Shushi Hong <820958424@qq.com> Date: Sat, 15 Feb 2025 02:33:45 +0800 Subject: [PATCH 12/13] Update test_frontend_from_fx.py --- tests/python/relax/test_frontend_from_fx.py | 1 - 1 file changed, 1 deletion(-) diff --git a/tests/python/relax/test_frontend_from_fx.py b/tests/python/relax/test_frontend_from_fx.py index 3c932f86c582..14c8f93adedd 100644 --- a/tests/python/relax/test_frontend_from_fx.py +++ b/tests/python/relax/test_frontend_from_fx.py @@ -1954,7 +1954,6 @@ def main( (torch.tan, R.tan), ] - @pytest.mark.parametrize("pytorch_op, relax_op", operator_basic_unary) def test_basic_unary_ops(pytorch_op, relax_op): input_info = [([1, 3, 10, 10], "float32")] From 3c8a1f5f8fe0623fb243ffeeafb17498bf6aec66 Mon Sep 17 00:00:00 2001 From: Shushi Hong <820958424@qq.com> Date: Sat, 15 Feb 2025 02:34:24 +0800 Subject: [PATCH 13/13] Update test_frontend_from_fx.py --- tests/python/relax/test_frontend_from_fx.py | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/python/relax/test_frontend_from_fx.py b/tests/python/relax/test_frontend_from_fx.py index 14c8f93adedd..3c932f86c582 100644 --- a/tests/python/relax/test_frontend_from_fx.py +++ b/tests/python/relax/test_frontend_from_fx.py @@ -1954,6 +1954,7 @@ def main( (torch.tan, R.tan), ] + @pytest.mark.parametrize("pytorch_op, relax_op", operator_basic_unary) def test_basic_unary_ops(pytorch_op, relax_op): input_info = [([1, 3, 10, 10], "float32")]