diff --git a/tests/filecheck/dialects/onnx/onnx_invalid.mlir b/tests/filecheck/dialects/onnx/onnx_invalid.mlir index e46daed02a..663817a113 100644 --- a/tests/filecheck/dialects/onnx/onnx_invalid.mlir +++ b/tests/filecheck/dialects/onnx/onnx_invalid.mlir @@ -363,3 +363,124 @@ builtin.module { } +// ----- + +builtin.module { + %t0 = "test.op"(): () -> (f32) + + // CHECK: operand at position 0 does not verify! + // CHECK: Unexpected attribute f32 + %res_max_pool_single_out = "onnx.MaxPoolSingleOut"(%t0) {onnx_node_name = "/MaxPoolSingleOut"} : (f32) -> tensor<5x5x32x32xf32> +} + +// ----- + +builtin.module { + %t0= "test.op"(): () -> (tensor<5x5x32x32xf32>) + + // CHECK: result at position 0 does not verify! + // CHECK: Unexpected attribute tensor<5x5x32x32xi32> + %res_max_pool_single_out = "onnx.MaxPoolSingleOut"(%t0) {"onnx_node_name" = "/MaxPoolSingleOut"} : (tensor<5x5x32x32xf32>) -> tensor<5x5x32x32xi32> + +} + +// ----- + +builtin.module { + %t0 = "test.op"(): () -> (tensor<5x5x32x32xf32>) + + // CHECK: Operation does not verify: Invalid auto_pad string. Must be one of ['NOTSET', 'SAME_UPPER', 'SAME_LOWER', 'VALID'] + %res_max_pool_single_out = "onnx.MaxPoolSingleOut"(%t0) {"onnx_node_name" = "/MaxPoolSingleOut", "auto_pad" = "INVALID", "ceil_mode" = 0 : i64, "kernel_shape" = [3 : i64, 3 : i64], "dilations" = [1 : i64, 1 : i64], "pads" = [0 : i64, 0 : i64, 0 : i64, 0 : i64], "storage_order" = 0 : i64, "strides" = [1 : i64, 1 : i64]} : (tensor<5x5x32x32xf32>) -> tensor<5x5x32x32xf32> + +} + +// ----- + +builtin.module { + %t0 = "test.op"(): () -> (tensor<5x5x32x32xf32>) + + // CHECK: Operation does not verify: ceil value must be either zero or one + %res_max_pool_single_out = "onnx.MaxPoolSingleOut"(%t0) {"onnx_node_name" = "/MaxPoolSingleOut", "auto_pad" = "NOTSET", "ceil_mode" = 2 : i64, "kernel_shape" = [3 : i64, 3 : i64], "dilations" = [1 : i64, 1 : i64], "pads" = [0 : i64, 0 : i64, 0 : i64, 0 : i64], "storage_order" = 0 : i64, "strides" = [1 : i64, 1 : i64]} : (tensor<5x5x32x32xf32>) -> tensor<5x5x32x32xf32> + +} + +// ----- + +builtin.module { + %t0 = "test.op"(): () -> (tensor<5x5x32x32xf32>) + + // CHECK: Operation does not verify: input data and kernel shape rank mismatch: (2) vs (1) + %res_max_pool_single_out = "onnx.MaxPoolSingleOut"(%t0) {"onnx_node_name" = "/MaxPoolSingleOut", "auto_pad" = "NOTSET", "ceil_mode" = 0 : i64, "kernel_shape" = [3 : i64], "dilations" = [1 : i64, 1 : i64], "pads" = [0 : i64, 0 : i64, 0 : i64, 0 : i64], "storage_order" = 0 : i64, "strides" = [1 : i64, 1 : i64]} : (tensor<5x5x32x32xf32>) -> tensor<5x5x32x32xf32> + +} + +// ----- + +builtin.module { + %t0 = "test.op"(): () -> (tensor<5x5x32x32xf32>) + + // CHECK: Operation does not verify: dilation value must be non zero positive + %res_max_pool_single_out = "onnx.MaxPoolSingleOut"(%t0) {"onnx_node_name" = "/MaxPoolSingleOut", "auto_pad" = "NOTSET", "ceil_mode" = 0 : i64, "kernel_shape" = [3 : i64, 3 : i64], "dilations" = [-1 : i64, -1 : i64], "pads" = [0 : i64, 0 : i64, 0 : i64, 0 : i64], "storage_order" = 0 : i64, "strides" = [1 : i64, 1 : i64]} : (tensor<5x5x32x32xf32>) -> tensor<5x5x32x32xf32> + +} + +// ----- + +builtin.module { + %t0 = "test.op"(): () -> (tensor<5x5x32x32xf32>) + + // CHECK: Operation does not verify: dilations rank (3) and kernel shape rank (2) are not the same + %res_max_pool_single_out = "onnx.MaxPoolSingleOut"(%t0) {"onnx_node_name" = "/MaxPoolSingleOut", "auto_pad" = "NOTSET", "ceil_mode" = 0 : i64, "kernel_shape" = [3 : i64, 3 : i64], "dilations" = [1 : i64, 1 : i64, 1: i64], "pads" = [0 : i64, 0 : i64, 0 : i64, 0 : i64], "storage_order" = 0 : i64, "strides" = [1 : i64, 1 : i64]} : (tensor<5x5x32x32xf32>) -> tensor<5x5x32x32xf32> + +} + +// ----- + +builtin.module { + %t0 = "test.op"(): () -> (tensor<5x5x32x32xf32>) + + // CHECK: Operation does not verify: column major storage order not implemented yet + %res_max_pool_single_out = "onnx.MaxPoolSingleOut"(%t0) {"onnx_node_name" = "/MaxPoolSingleOut", "auto_pad" = "NOTSET", "ceil_mode" = 0 : i64, "kernel_shape" = [3 : i64, 3 : i64], "dilations" = [1 : i64, 1 : i64], "pads" = [0 : i64, 0 : i64, 0 : i64, 0 : i64], "storage_order" = 1 : i64, "strides" = [1 : i64, 1 : i64]} : (tensor<5x5x32x32xf32>) -> tensor<5x5x32x32xf32> + +} + +// ----- + +builtin.module { + %t0 = "test.op"(): () -> (tensor<5x5x32x32xf32>) + + // CHECK: Operation does not verify: stride value must be non zero positive + %res_max_pool_single_out = "onnx.MaxPoolSingleOut"(%t0) {"onnx_node_name" = "/MaxPoolSingleOut", "auto_pad" = "NOTSET", "ceil_mode" = 0 : i64, "kernel_shape" = [3 : i64, 3 : i64], "dilations" = [1 : i64, 1 : i64], "pads" = [0 : i64, 0 : i64, 0 : i64, 0 : i64], "storage_order" = 0 : i64, "strides" = [-1 : i64, -1 : i64]} : (tensor<5x5x32x32xf32>) -> tensor<5x5x32x32xf32> + +} + +// ----- + +builtin.module { + %t0 = "test.op"(): () -> (tensor<5x5x32x32xf32>) + + // CHECK: Operation does not verify: strides rank (3) and kernel shape rank (2) are not the same + %res_max_pool_single_out = "onnx.MaxPoolSingleOut"(%t0) {"onnx_node_name" = "/MaxPoolSingleOut", "auto_pad" = "NOTSET", "ceil_mode" = 0 : i64, "kernel_shape" = [3 : i64, 3 : i64], "dilations" = [1 : i64, 1 : i64], "pads" = [0 : i64, 0 : i64, 0 : i64, 0 : i64], "storage_order" = 0 : i64, "strides" = [1 : i64, 1 : i64, 1: i64]} : (tensor<5x5x32x32xf32>) -> tensor<5x5x32x32xf32> + +} + +// ----- + +builtin.module { + %t0 = "test.op"(): () -> (tensor<5x5x32x32xf32>) + + // CHECK: Operation does not verify: pads value must be nonnegative + %res_max_pool_single_out = "onnx.MaxPoolSingleOut"(%t0) {"onnx_node_name" = "/MaxPoolSingleOut", "auto_pad" = "NOTSET", "ceil_mode" = 0 : i64, "kernel_shape" = [3 : i64, 3 : i64], "dilations" = [1 : i64, 1 : i64], "pads" = [-2 : i64, 0 : i64, 0 : i64, 0 : i64], "storage_order" = 0 : i64, "strides" = [1 : i64, 1 : i64]} : (tensor<5x5x32x32xf32>) -> tensor<5x5x32x32xf32> + +} + +// ----- + +builtin.module { + %t0 = "test.op"(): () -> (tensor<5x5x32x32xf32>) + + // CHECK: Operation does not verify: pads rank (5) is not twice the kernel shape rank (2) + %res_max_pool_single_out = "onnx.MaxPoolSingleOut"(%t0) {"onnx_node_name" = "/MaxPoolSingleOut", "auto_pad" = "NOTSET", "ceil_mode" = 0 : i64, "kernel_shape" = [3 : i64, 3 : i64], "dilations" = [1 : i64, 1 : i64], "pads" = [0 : i64, 0 : i64, 0 : i64, 0 : i64, 0: i64], "storage_order" = 0 : i64, "strides" = [1 : i64, 1 : i64]} : (tensor<5x5x32x32xf32>) -> tensor<5x5x32x32xf32> + +} + diff --git a/tests/filecheck/dialects/onnx/onnx_ops.mlir b/tests/filecheck/dialects/onnx/onnx_ops.mlir index 1c25a0f29c..b3419e10e9 100644 --- a/tests/filecheck/dialects/onnx/onnx_ops.mlir +++ b/tests/filecheck/dialects/onnx/onnx_ops.mlir @@ -12,6 +12,7 @@ %t19 = "test.op"(): () -> (tensor<10x10xf32>) %t20,%t21,%t22 = "test.op"(): () -> (tensor<1x1x5x5xf32>, tensor<1x1x3x3xf32>, none) %t23,%t24,%t25 = "test.op"(): () -> (tensor<1x1x7x5xf32>, tensor<1x1x3x3xf32>, none) +%t26 = "test.op"(): () -> (tensor<5x5x32x32xf32>) %res_add = "onnx.Add"(%t0, %t1) {onnx_node_name = "/Add"} : (tensor<1x2x6xf32>, tensor<1x2x6xf32>) -> tensor<1x2x6xf32> // CHECK: %res_add = onnx.Add(%t0, %t1) {"onnx_node_name" = "/Add"}: (tensor<1x2x6xf32>, tensor<1x2x6xf32>) -> tensor<1x2x6xf32> @@ -64,3 +65,5 @@ %res_constant = "onnx.Constant"() {onnx_node_name = "/Constant", "value" = dense<1> : tensor<1xi64>}: () -> tensor<1xi64> //CHECK: %res_constant = onnx.Constant() {"onnx_node_name" = "/Constant", "value" = dense<1> : tensor<1xi64>}: () -> tensor<1xi64> +%res_max_pool_single_out = "onnx.MaxPoolSingleOut"(%t26) {onnx_node_name = "/MaxPoolSingleOut", "auto_pad" = "VALID", "ceil_mode" = 0 : i64, "kernel_shape" = [3 : i64, 3 : i64], "dilations" = [1 : i64, 1 : i64], "pads" = [0 : i64, 0 : i64, 0 : i64, 0 : i64], "storage_order" = 0 : i64, "strides" = [1 : i64, 1 : i64]}: (tensor<5x5x32x32xf32>) -> tensor<5x5x30x30xf32> +//CHECK: %res_max_pool_single_out = onnx.MaxPoolSingleOut(%t26) {"onnx_node_name" = "/MaxPoolSingleOut", "auto_pad" = "VALID", "ceil_mode" = 0 : i64, "kernel_shape" = [3 : i64, 3 : i64], "dilations" = [1 : i64, 1 : i64], "pads" = [0 : i64, 0 : i64, 0 : i64, 0 : i64], "storage_order" = 0 : i64, "strides" = [1 : i64, 1 : i64]}: (tensor<5x5x32x32xf32>) -> tensor<5x5x30x30xf32> diff --git a/xdsl/dialects/onnx.py b/xdsl/dialects/onnx.py index 4cda2ca979..85c8108bd6 100644 --- a/xdsl/dialects/onnx.py +++ b/xdsl/dialects/onnx.py @@ -13,6 +13,7 @@ FloatAttr, IntegerAttr, IntegerType, + MemRefType, NoneType, SSAValue, StringAttr, @@ -581,7 +582,7 @@ class Constant(IRDLOperation): Exactly one of the provided attributes, either value, sparse_value, or value_* must be specified. - Parameters: + Attributes: - sparse_value: sparse_tensor The value for the elements of the output tensor in sparse format. (currently unsupported) - value : tensor @@ -671,6 +672,159 @@ def verify_(self) -> None: ) +@irdl_op_definition +class MaxPoolSingleOut(IRDLOperation): + """ + ONNX MaxPool operation with a single output. + + Attributes: + + - auto_pad string (default is 'NOTSET'): auto_pad must be either NOTSET, SAME_UPPER, SAME_LOWER or + VALID. Where default value is NOTSET, which means explicit padding is used. SAME_UPPER or SAME_LOWER mean pad the + input so that output_shape[i] = ceil(input_shape[i] / strides[i]) for each axis i. The padding is split between + the two sides equally or almost equally (depending on whether it is even or odd). In case the padding is an odd + number, the extra padding is added at the end for SAME_UPPER and at the beginning for SAME_LOWER. + + - ceil_mode int (default is '1'): Whether to use ceil or floor (default) to compute the output shape. + + - dilations list of ints: Dilation value along each spatial axis of filter. + + - kernel_shape list of ints: The size of the kernel along each axis. + + - pads list of ints: Padding for the beginning and ending along each spatial axis, it can take any value greater + than or equal to 0. The value represent the number of pixels added to the beginning and end part of the + corresponding axis. `pads` format should be as follow [x1_begin, x2_begin...x1_end, x2_end,...], where xi_begin + the number of pixels added at the beginning of axis `i` and xi_end, the number of pixels added at the end of axis + `i`. This attribute cannot be used simultaneously with auto_pad attribute. If not present, the padding defaults + to 0 along start and end of each spatial axis. + + - storage_order int (default is '0') : The storage order of the tensor. 0 is row major, and 1 is column major. + This attribute is used only to convert an n-tuple index value into a single integer value for producing the + second output. + + - strides list of ints: Stride along each spatial axis. If not present, the stride defaults to 1 along each spatial axis + + """ + + name = "onnx.MaxPoolSingleOut" + + T = Annotated[AnyFloat | IntegerType, ConstraintVar("T")] + data = operand_def(TensorType[T] | MemRefType[T]) + output = result_def(TensorType[T] | MemRefType[T]) + + auto_pad = attr_def(StringAttr) + ceil_mode = attr_def(AnyIntegerAttr) + dilations = attr_def(ArrayAttr[AnyIntegerAttr]) + kernel_shape = attr_def(ArrayAttr[AnyIntegerAttr]) + pads = attr_def(ArrayAttr[AnyIntegerAttr]) + storage_order = attr_def(AnyIntegerAttr) + strides = attr_def(ArrayAttr[AnyIntegerAttr]) + + assembly_format = ( + "`(` $data`)` attr-dict `:` `(` type($data) `)` `->` type($output)" + ) + + def __init__( + self, + data: SSAValue, + auto_pad: Attribute, + ceil_mode: Attribute, + dilations: Attribute, + kernel_shape: Attribute, + pads: Attribute, + storage_order: Attribute, + strides: Attribute, + ): + super().__init__( + attributes={ + "auto_pad": auto_pad, + "ceil_mode": ceil_mode, + "dilations": dilations, + "kernel_shape": kernel_shape, + "pads": pads, + "storage_order": storage_order, + "strides": strides, + }, + operands=[data], + result_types=[data.type], + ) + + def verify_(self) -> None: + if not isinstance( + data_type := self.data.type, TensorType | MemRefType + ) or not isinstance(output_type := self.output.type, TensorType | MemRefType): + assert False, ( + "onnx elementwise operation operands (data) and result (output) must be of type TensorType or " + "MemRefTyoe " + ) + + data_type = cast(TensorType[Attribute], data_type) + output_type = cast(TensorType[Attribute], output_type) + + # auto pad + auto_pad_strings = ["NOTSET", "SAME_UPPER", "SAME_LOWER", "VALID"] + if self.auto_pad.data not in auto_pad_strings: + raise VerifyException( + f"Invalid auto_pad string. Must be one of {auto_pad_strings}" + ) + + # ceil mode + if self.ceil_mode.value.data < 0 or self.ceil_mode.value.data > 1: + raise VerifyException("ceil value must be either zero or one") + + # kernel shape + if (input_dims := len(data_type.get_shape()) - 2) != ( + kernel_dims := len(self.kernel_shape) + ): + raise VerifyException( + f"input data and kernel shape rank mismatch: ({input_dims}) vs ({kernel_dims})" + ) + + # dilations + for value in self.dilations: + val = value.value.data + if val <= 0: + raise VerifyException("dilation value must be non zero positive") + + if (dilations_dims := len(self.dilations)) != ( + kernel_dims := len(self.kernel_shape) + ): + raise VerifyException( + f"dilations rank ({dilations_dims}) and kernel shape rank ({kernel_dims}) are not the " + f"same " + ) + + # storage order + # Not supported for storage order in column major mode in onnx-mlir (therefore row major mode only considered) + if self.storage_order.value.data != 0: + raise VerifyException("column major storage order not implemented yet") + + # strides + for value in self.strides: + val = value.value.data + if val <= 0: + raise VerifyException("stride value must be non zero positive") + + if (strides_dims := len(self.strides)) != ( + kernel_dims := len(self.kernel_shape) + ): + raise VerifyException( + f"strides rank ({strides_dims}) and kernel shape rank ({kernel_dims}) are not the " + f"same " + ) + + # pads + for value in self.pads: + val = value.value.data + if val < 0: + raise VerifyException("pads value must be nonnegative") + + if (pads_dims := len(self.pads)) != 2 * len(self.kernel_shape): + raise VerifyException( + f"pads rank ({pads_dims}) is not twice the kernel shape rank ({len(self.kernel_shape)})" + ) + + ONNX = Dialect( "onnx", [ @@ -680,6 +834,7 @@ def verify_(self) -> None: Conv, Div, Gemm, + MaxPoolSingleOut, Mul, Relu, Reshape,