Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Support NHWC compute format in concat and conv's pad op. #1065

Open
wants to merge 1 commit into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
22 changes: 20 additions & 2 deletions onnx_tf/handlers/backend/concat.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,8 @@
from onnx_tf.handlers.backend_handler import BackendHandler
from onnx_tf.handlers.handler import onnx_op
from onnx_tf.handlers.handler import tf_func

from onnx_tf.common import get_data_format
from onnx_tf.common import get_perm_from_formats

@onnx_op("Concat")
@tf_func(tf.concat)
Expand All @@ -12,7 +13,24 @@ class Concat(BackendHandler):
@classmethod
def _common(cls, node, **kwargs):
inputs = [kwargs["tensor_dict"][inp] for inp in node.inputs]
return [cls.make_tensor_from_onnx_node(node, inputs=[inputs])]
rank = len(inputs[0].get_shape())
storage_format, compute_format = get_data_format(rank) if (rank >= 2 and rank <= 5) else ('', '')
if storage_format == compute_format:
return [cls.make_tensor_from_onnx_node(node, inputs=[inputs])]
else:
# Transpose from storage_format to compute_format and do concat.
# NOTE: it's assumed that all other operators will be run in `compute_format` as much as
# possible, so those redundant `transpose` operators can be resolved.
inputs = [tf.transpose(x, get_perm_from_formats(storage_format, compute_format)) for x in inputs]
# adjust concat axis according to source and target layout format.
from copy import deepcopy
attrs = deepcopy(node.attrs)
axis = attrs["axis"]
axis = compute_format.index(storage_format[axis])
attrs["axis"] = axis
output = cls.make_tensor_from_onnx_node(node, inputs=[inputs], attrs=attrs)
output = [tf.transpose(output, get_perm_from_formats(compute_format, storage_format))]
return output

@classmethod
def version_1(cls, node, **kwargs):
Expand Down
56 changes: 28 additions & 28 deletions onnx_tf/handlers/backend/conv_mixin.py
Original file line number Diff line number Diff line change
Expand Up @@ -62,34 +62,6 @@ def conv(cls, node, input_dict, transpose=False):

pads = node.attrs.get("pads", [0, 0] * spatial_size)

# Check auto_pad nonexistent or NOTSET first
if "auto_pad" not in node.attrs or node.attrs["auto_pad"] == "NOTSET":
if not transpose:
if pads != [0, 0] * spatial_size:
x = PadMixin.get_padding_as_op(x, pads)
pad_mode = "VALID"
else:
pad_mode = "NOTSET"
# Then we use auto_pad to setup pad_mode
elif node.attrs["auto_pad"] == "SAME_UPPER":
pad_mode = "SAME"
elif node.attrs["auto_pad"] == "VALID":
pad_mode = "VALID"
elif node.attrs["auto_pad"] == "SAME_LOWER":
pad_mode = PAD_TF_INCOMPATIBLE
else:
raise ValueError("Invalid auto_pad attribute: {}".format(
node.attrs["auto_pad"]))

# Currently auto_pad = SAME_LOWER is not supported
if pad_mode is PAD_TF_INCOMPATIBLE:
if transpose:
exception.OP_UNSUPPORTED_EXCEPT(
"ConvTranspose with auto_pad `SAME_LOWER`", "Tensorflow")
else:
exception.OP_UNSUPPORTED_EXCEPT("Conv with auto_pad `SAME_LOWER`",
"Tensorflow")

group = node.attrs.get("group", 1)
weight_shape = weights.get_shape().as_list()
# Is this convolution depthwise we can support?
Expand Down Expand Up @@ -134,6 +106,34 @@ def conv(cls, node, input_dict, transpose=False):
else:
xs = tf.split(x, num_or_size_splits=group, axis=-1)

# Check auto_pad nonexistent or NOTSET first
if "auto_pad" not in node.attrs or node.attrs["auto_pad"] == "NOTSET":
if not transpose:
if pads != [0, 0] * spatial_size:
xs = [PadMixin.get_padding_as_op(x, pads, format=compute_format) for x in xs]
pad_mode = "VALID"
else:
pad_mode = "NOTSET"
# Then we use auto_pad to setup pad_mode
elif node.attrs["auto_pad"] == "SAME_UPPER":
pad_mode = "SAME"
elif node.attrs["auto_pad"] == "VALID":
pad_mode = "VALID"
elif node.attrs["auto_pad"] == "SAME_LOWER":
pad_mode = PAD_TF_INCOMPATIBLE
else:
raise ValueError("Invalid auto_pad attribute: {}".format(
node.attrs["auto_pad"]))

# Currently auto_pad = SAME_LOWER is not supported
if pad_mode is PAD_TF_INCOMPATIBLE:
if transpose:
exception.OP_UNSUPPORTED_EXCEPT(
"ConvTranspose with auto_pad `SAME_LOWER`", "Tensorflow")
else:
exception.OP_UNSUPPORTED_EXCEPT("Conv with auto_pad `SAME_LOWER`",
"Tensorflow")

if transpose:
if dilations != [1] * spatial_size:
raise RuntimeError("Cannot set non-1 dilation for conv transpose.")
Expand Down
24 changes: 17 additions & 7 deletions onnx_tf/handlers/backend/pad_mixin.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,13 +5,23 @@
class PadMixin(object):

@classmethod
def get_padding_as_op(cls, x, pads):
def get_padding_as_op(cls, x, pads, format:str=None):
num_dim = int(len(pads) / 2)

tf_pads = np.transpose(np.array(pads).reshape([2, num_dim]))
tf_pads = [0, 0, 0, 0] + tf_pads.flatten().tolist()
# tf_pads = np.transpose(np.array(pads).reshape([2, num_dim]))
if format is None:
NIdx, CIdx = 0, 1
else:
assert "N" in format and "C" in format, "expected `N` and `C` in padding op's input format " \
"if given"
NIdx = format.index("N")
CIdx = format.index("C")
# create an empty tf_pads array
tf_pads = np.zeros([num_dim + 2, 2], dtype=np.int32)
# the indices of spatial axes in input format.
spatial_indices = [axis for axis in range(num_dim + 2) if axis not in [NIdx, CIdx]]
# fill pads into tf_pads's spatial axes
tf_pads[spatial_indices, :] = np.transpose(np.array(pads).reshape([2, num_dim]))

padding = tf.constant(
np.array(tf_pads).reshape([num_dim + 2, 2])
.astype(np.int32)) # tf requires int32 paddings
return tf.pad(x, padding)
padding = tf.constant(tf_pads) # tf requires int32 paddings
return tf.pad(x, padding)