Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

dialects: (onnx) add onnx.MaxPoolSingleOut #2072

Merged
merged 8 commits into from
Feb 1, 2024
Merged
Show file tree
Hide file tree
Changes from 2 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
121 changes: 121 additions & 0 deletions tests/filecheck/dialects/onnx/onnx_invalid.mlir
Original file line number Diff line number Diff line change
Expand Up @@ -363,3 +363,124 @@ builtin.module {

}

// -----

builtin.module {
%t0 = "test.op"(): () -> (f32)

// CHECK: operand at position 0 does not verify!
// CHECK: Unexpected attribute f32
%res_max_pool_single_out = "onnx.MaxPoolSingleOut"(%t0) {onnx_node_name = "/MaxPoolSingleOut"} : (f32) -> tensor<5x5x32x32xf32>
}

// -----

builtin.module {
%t0= "test.op"(): () -> (tensor<5x5x32x32xf32>)

// CHECK: result at position 0 does not verify!
// CHECK: Unexpected attribute tensor<5x5x32x32xi32>
%res_max_pool_single_out = "onnx.MaxPoolSingleOut"(%t0) {"onnx_node_name" = "/MaxPoolSingleOut"} : (tensor<5x5x32x32xf32>) -> tensor<5x5x32x32xi32>

}

// -----

builtin.module {
%t0 = "test.op"(): () -> (tensor<5x5x32x32xf32>)

// CHECK: Operation does not verify: Invalid auto_pad string. Must be one of ['NOTSET', 'SAME_UPPER', 'SAME_LOWER', 'VALID']
%res_max_pool_single_out = "onnx.MaxPoolSingleOut"(%t0) {"onnx_node_name" = "/MaxPoolSingleOut", "auto_pad" = "INVALID", "ceil_mode" = 0 : i64, "kernel_shape" = [3 : i64, 3 : i64], "dilations" = [1 : i64, 1 : i64], "pads" = [0 : i64, 0 : i64, 0 : i64, 0 : i64], "storage_order" = 0 : i64, "strides" = [1 : i64, 1 : i64]} : (tensor<5x5x32x32xf32>) -> tensor<5x5x32x32xf32>

}

// -----

builtin.module {
%t0 = "test.op"(): () -> (tensor<5x5x32x32xf32>)

// CHECK: Operation does not verify: ceil value must be either zero or one
%res_max_pool_single_out = "onnx.MaxPoolSingleOut"(%t0) {"onnx_node_name" = "/MaxPoolSingleOut", "auto_pad" = "NOTSET", "ceil_mode" = 2 : i64, "kernel_shape" = [3 : i64, 3 : i64], "dilations" = [1 : i64, 1 : i64], "pads" = [0 : i64, 0 : i64, 0 : i64, 0 : i64], "storage_order" = 0 : i64, "strides" = [1 : i64, 1 : i64]} : (tensor<5x5x32x32xf32>) -> tensor<5x5x32x32xf32>

}

// -----

builtin.module {
%t0 = "test.op"(): () -> (tensor<5x5x32x32xf32>)

// CHECK: Operation does not verify: input data and kernel shape rank mismatch
%res_max_pool_single_out = "onnx.MaxPoolSingleOut"(%t0) {"onnx_node_name" = "/MaxPoolSingleOut", "auto_pad" = "NOTSET", "ceil_mode" = 0 : i64, "kernel_shape" = [3 : i64], "dilations" = [1 : i64, 1 : i64], "pads" = [0 : i64, 0 : i64, 0 : i64, 0 : i64], "storage_order" = 0 : i64, "strides" = [1 : i64, 1 : i64]} : (tensor<5x5x32x32xf32>) -> tensor<5x5x32x32xf32>

}

// -----

builtin.module {
%t0 = "test.op"(): () -> (tensor<5x5x32x32xf32>)

// CHECK: Operation does not verify: dilation value must be non zero positive
%res_max_pool_single_out = "onnx.MaxPoolSingleOut"(%t0) {"onnx_node_name" = "/MaxPoolSingleOut", "auto_pad" = "NOTSET", "ceil_mode" = 0 : i64, "kernel_shape" = [3 : i64, 3 : i64], "dilations" = [-1 : i64, -1 : i64], "pads" = [0 : i64, 0 : i64, 0 : i64, 0 : i64], "storage_order" = 0 : i64, "strides" = [1 : i64, 1 : i64]} : (tensor<5x5x32x32xf32>) -> tensor<5x5x32x32xf32>

}

// -----

builtin.module {
%t0 = "test.op"(): () -> (tensor<5x5x32x32xf32>)

// CHECK: Operation does not verify: dilations rank and kernel shape rank are not the same
%res_max_pool_single_out = "onnx.MaxPoolSingleOut"(%t0) {"onnx_node_name" = "/MaxPoolSingleOut", "auto_pad" = "NOTSET", "ceil_mode" = 0 : i64, "kernel_shape" = [3 : i64, 3 : i64], "dilations" = [1 : i64, 1 : i64, 1: i64], "pads" = [0 : i64, 0 : i64, 0 : i64, 0 : i64], "storage_order" = 0 : i64, "strides" = [1 : i64, 1 : i64]} : (tensor<5x5x32x32xf32>) -> tensor<5x5x32x32xf32>

}

// -----

builtin.module {
%t0 = "test.op"(): () -> (tensor<5x5x32x32xf32>)

// CHECK: Operation does not verify: column major storage order not implemented yet
%res_max_pool_single_out = "onnx.MaxPoolSingleOut"(%t0) {"onnx_node_name" = "/MaxPoolSingleOut", "auto_pad" = "NOTSET", "ceil_mode" = 0 : i64, "kernel_shape" = [3 : i64, 3 : i64], "dilations" = [1 : i64, 1 : i64], "pads" = [0 : i64, 0 : i64, 0 : i64, 0 : i64], "storage_order" = 1 : i64, "strides" = [1 : i64, 1 : i64]} : (tensor<5x5x32x32xf32>) -> tensor<5x5x32x32xf32>

}

// -----

builtin.module {
%t0 = "test.op"(): () -> (tensor<5x5x32x32xf32>)

// CHECK: Operation does not verify: stride value must be non zero positive
%res_max_pool_single_out = "onnx.MaxPoolSingleOut"(%t0) {"onnx_node_name" = "/MaxPoolSingleOut", "auto_pad" = "NOTSET", "ceil_mode" = 0 : i64, "kernel_shape" = [3 : i64, 3 : i64], "dilations" = [1 : i64, 1 : i64], "pads" = [0 : i64, 0 : i64, 0 : i64, 0 : i64], "storage_order" = 0 : i64, "strides" = [-1 : i64, -1 : i64]} : (tensor<5x5x32x32xf32>) -> tensor<5x5x32x32xf32>

}

// -----

builtin.module {
%t0 = "test.op"(): () -> (tensor<5x5x32x32xf32>)

// CHECK: Operation does not verify: strides rank and kernel shape rank are not the same
%res_max_pool_single_out = "onnx.MaxPoolSingleOut"(%t0) {"onnx_node_name" = "/MaxPoolSingleOut", "auto_pad" = "NOTSET", "ceil_mode" = 0 : i64, "kernel_shape" = [3 : i64, 3 : i64], "dilations" = [1 : i64, 1 : i64], "pads" = [0 : i64, 0 : i64, 0 : i64, 0 : i64], "storage_order" = 0 : i64, "strides" = [1 : i64, 1 : i64, 1: i64]} : (tensor<5x5x32x32xf32>) -> tensor<5x5x32x32xf32>

}

// -----

builtin.module {
%t0 = "test.op"(): () -> (tensor<5x5x32x32xf32>)

// CHECK: Operation does not verify: pads value must be nonnegative
%res_max_pool_single_out = "onnx.MaxPoolSingleOut"(%t0) {"onnx_node_name" = "/MaxPoolSingleOut", "auto_pad" = "NOTSET", "ceil_mode" = 0 : i64, "kernel_shape" = [3 : i64, 3 : i64], "dilations" = [1 : i64, 1 : i64], "pads" = [-2 : i64, 0 : i64, 0 : i64, 0 : i64], "storage_order" = 0 : i64, "strides" = [1 : i64, 1 : i64]} : (tensor<5x5x32x32xf32>) -> tensor<5x5x32x32xf32>

}

// -----

builtin.module {
%t0 = "test.op"(): () -> (tensor<5x5x32x32xf32>)

// CHECK: Operation does not verify: pads rank is not twice the kernel shape rank
%res_max_pool_single_out = "onnx.MaxPoolSingleOut"(%t0) {"onnx_node_name" = "/MaxPoolSingleOut", "auto_pad" = "NOTSET", "ceil_mode" = 0 : i64, "kernel_shape" = [3 : i64, 3 : i64], "dilations" = [1 : i64, 1 : i64], "pads" = [0 : i64, 0 : i64, 0 : i64, 0 : i64, 0: i64], "storage_order" = 0 : i64, "strides" = [1 : i64, 1 : i64]} : (tensor<5x5x32x32xf32>) -> tensor<5x5x32x32xf32>

}

3 changes: 3 additions & 0 deletions tests/filecheck/dialects/onnx/onnx_ops.mlir
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@
%t19 = "test.op"(): () -> (tensor<10x10xf32>)
%t20,%t21,%t22 = "test.op"(): () -> (tensor<1x1x5x5xf32>, tensor<1x1x3x3xf32>, none)
%t23,%t24,%t25 = "test.op"(): () -> (tensor<1x1x7x5xf32>, tensor<1x1x3x3xf32>, none)
%t26 = "test.op"(): () -> (tensor<5x5x32x32xf32>)

%res_add = "onnx.Add"(%t0, %t1) {onnx_node_name = "/Add"} : (tensor<1x2x6xf32>, tensor<1x2x6xf32>) -> tensor<1x2x6xf32>
// CHECK: %res_add = onnx.Add(%t0, %t1) {"onnx_node_name" = "/Add"}: (tensor<1x2x6xf32>, tensor<1x2x6xf32>) -> tensor<1x2x6xf32>
Expand Down Expand Up @@ -64,3 +65,5 @@
%res_constant = "onnx.Constant"() {onnx_node_name = "/Constant", "value" = dense<1> : tensor<1xi64>}: () -> tensor<1xi64>
//CHECK: %res_constant = onnx.Constant() {"onnx_node_name" = "/Constant", "value" = dense<1> : tensor<1xi64>}: () -> tensor<1xi64>

%res_max_pool_single_out = "onnx.MaxPoolSingleOut"(%t26) {onnx_node_name = "/MaxPoolSingleOut", "auto_pad" = "VALID", "ceil_mode" = 0 : i64, "kernel_shape" = [3 : i64, 3 : i64], "dilations" = [1 : i64, 1 : i64], "pads" = [0 : i64, 0 : i64, 0 : i64, 0 : i64], "storage_order" = 0 : i64, "strides" = [1 : i64, 1 : i64]}: (tensor<5x5x32x32xf32>) -> tensor<5x5x30x30xf32>
//CHECK: %res_max_pool_single_out = onnx.MaxPoolSingleOut(%t26) {"onnx_node_name" = "/MaxPoolSingleOut", "auto_pad" = "VALID", "ceil_mode" = 0 : i64, "kernel_shape" = [3 : i64, 3 : i64], "dilations" = [1 : i64, 1 : i64], "pads" = [0 : i64, 0 : i64, 0 : i64, 0 : i64], "storage_order" = 0 : i64, "strides" = [1 : i64, 1 : i64]}: (tensor<5x5x32x32xf32>) -> tensor<5x5x30x30xf32>
kayode-gif marked this conversation as resolved.
Show resolved Hide resolved
143 changes: 142 additions & 1 deletion xdsl/dialects/onnx.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@
FloatAttr,
IntegerAttr,
IntegerType,
MemRefType,
NoneType,
SSAValue,
StringAttr,
Expand Down Expand Up @@ -581,7 +582,7 @@ class Constant(IRDLOperation):

Exactly one of the provided attributes, either value, sparse_value, or value_* must be specified.

Parameters:
Attributes:
- sparse_value: sparse_tensor
The value for the elements of the output tensor in sparse format. (currently unsupported)
- value : tensor
Expand Down Expand Up @@ -671,6 +672,145 @@ def verify_(self) -> None:
)


@irdl_op_definition
class MaxPoolSingleOut(IRDLOperation):
"""
ONNX MaxPool operation with a single output.

Attributes:

- auto_pad string (defaukt is 'NOTSET'): auto_pad must be either NOTSET, SAME_UPPER, SAME_LOWER or
kayode-gif marked this conversation as resolved.
Show resolved Hide resolved
VALID. Where default value is NOTSET, which means explicit padding is used. SAME_UPPER or SAME_LOWER mean pad the
input so that output_shape[i] = ceil(input_shape[i] / strides[i]) for each axis i. The padding is split between
the two sides equally or almost equally (depending on whether it is even or odd). In case the padding is an odd
number, the extra padding is added at the end for SAME_UPPER and at the beginning for SAME_LOWER.

- ceil_mode int (default is '1'): Whether to use ceil or floor (default) to compute the output shape.

- dilations list of ints: Dilation value along each spatial axis of filter.

- kernel_shape list of ints: The size of the kernel along each axis.

- pads list of ints: Padding for the beginning and ending along each spatial axis, it can take any value greater
than or equal to 0. The value represent the number of pixels added to the beginning and end part of the
corresponding axis. `pads` format should be as follow [x1_begin, x2_begin...x1_end, x2_end,...], where xi_begin
the number of pixels added at the beginning of axis `i` and xi_end, the number of pixels added at the end of axis
`i`. This attribute cannot be used simultaneously with auto_pad attribute. If not present, the padding defaults
to 0 along start and end of each spatial axis.

- storage_order int (default is '0') : The storage order of the tensor. 0 is row major, and 1 is column major.
This attribute is used only to convert an n-tuple index value into a single integer value for producing the
second output.

- strides list of ints: Stride along each spatial axis. If not present, the stride defaults to 1 along each spatial axis

"""

name = "onnx.MaxPoolSingleOut"

T = Annotated[AnyFloat | IntegerType, ConstraintVar("T")]
data = operand_def(TensorType[T] | MemRefType[T])
output = result_def(TensorType[T] | MemRefType[T])

auto_pad = attr_def(StringAttr)
ceil_mode = attr_def(AnyIntegerAttr)
dilations = attr_def(ArrayAttr[AnyIntegerAttr])
kernel_shape = attr_def(ArrayAttr[AnyIntegerAttr])
pads = attr_def(ArrayAttr[AnyIntegerAttr])
storage_order = attr_def(AnyIntegerAttr)
strides = attr_def(ArrayAttr[AnyIntegerAttr])

assembly_format = (
"`(` $data`)` attr-dict `:` `(` type($data) `)` `->` type($output)"
)

def __init__(
self,
data: SSAValue,
auto_pad: Attribute,
ceil_mode: Attribute,
dilations: Attribute,
kernel_shape: Attribute,
pads: Attribute,
storage_order: Attribute,
strides: Attribute,
):
super().__init__(
attributes={
"auto_pad": auto_pad,
"ceil_mode": ceil_mode,
"dilations": dilations,
"kernel_shape": kernel_shape,
"pads": pads,
"storage_order": storage_order,
"strides": strides,
},
operands=[data],
result_types=[data.type],
)

def verify_(self) -> None:
if not isinstance(
data_type := self.data.type, TensorType | MemRefType
) or not isinstance(output_type := self.output.type, TensorType | MemRefType):
assert False, (
"onnx elementwise operation operands (data) and result (output) must be of type TensorType or "
"MemRefTyoe "
)

data_type = cast(TensorType[Attribute], data_type)
output_type = cast(TensorType[Attribute], output_type)

# auto_pad
kayode-gif marked this conversation as resolved.
Show resolved Hide resolved
auto_pad_strings = ["NOTSET", "SAME_UPPER", "SAME_LOWER", "VALID"]
if self.auto_pad.data not in auto_pad_strings:
raise VerifyException(
f"Invalid auto_pad string. Must be one of {auto_pad_strings}"
)

# ceil mode
if self.ceil_mode.value.data < 0 or self.ceil_mode.value.data > 1:
raise VerifyException("ceil value must be either zero or one")

# kernel shape
if len(data_type.get_shape()) - 2 != len(self.kernel_shape):
raise VerifyException("input data and kernel shape rank mismatch ")
kayode-gif marked this conversation as resolved.
Show resolved Hide resolved

# dilations
for value in self.dilations:
val = value.value.data
if val <= 0:
raise VerifyException("dilation value must be non zero positive")

if len(self.dilations) != len(self.kernel_shape):
raise VerifyException(
"dilations rank and kernel shape rank are not the same"
)

# storage order
# Not supported for storage order in column major mode in onnx-mlir (therefore row major mode only considered)
if self.storage_order.value.data != 0:
raise VerifyException("column major storage order not implemented yet")

# strides
for value in self.strides:
val = value.value.data
if val <= 0:
raise VerifyException("stride value must be non zero positive")

if len(self.strides) != len(self.kernel_shape):
raise VerifyException("strides rank and kernel shape rank are not the same")

# pads
for value in self.pads:
val = value.value.data
if val < 0:
raise VerifyException("pads value must be nonnegative")

if len(self.pads) != 2 * len(self.kernel_shape):
raise VerifyException("pads rank is not twice the kernel shape rank")


ONNX = Dialect(
"onnx",
[
Expand All @@ -680,6 +820,7 @@ def verify_(self) -> None:
Conv,
Div,
Gemm,
MaxPoolSingleOut,
Mul,
Relu,
Reshape,
Expand Down
Loading