Skip to content

Commit

Permalink
[TOPI] Fix bug in Winograd on CUDA (#4260)
Browse files Browse the repository at this point in the history
* fix winograd

* move get padding after kernel transform
  • Loading branch information
comaniac authored and Laurawly committed Nov 6, 2019
1 parent ddaa953 commit 7211c27
Show file tree
Hide file tree
Showing 3 changed files with 18 additions and 14 deletions.
30 changes: 16 additions & 14 deletions topi/python/topi/cuda/conv2d_winograd.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@
# pylint: disable=invalid-name,unused-variable,unused-argument
"""Winograd template for cuda backend"""

import logging
import tvm
from tvm import autotvm

Expand All @@ -27,6 +28,8 @@
from ..nn.winograd_util import winograd_transform_matrices


logger = logging.getLogger('conv2d_winograd')

def _infer_tile_size(data, kernel):
N, CI, H, W = get_const_tuple(data.shape)

Expand All @@ -42,26 +45,25 @@ def winograd_cuda(cfg, data, kernel, strides, padding, dilation, layout, out_dty

N, CI, H, W = get_const_tuple(data.shape)

if isinstance(dilation, int):
dilation_h = dilation_w = dilation
else:
dilation_h, dilation_w = dilation
HSTR, WSTR = (strides, strides) if isinstance(strides, int) else strides

if not pre_computed: # kernel tensor is raw tensor, do strict check
if isinstance(dilation, int):
dilation_h = dilation_w = dilation
else:
dilation_h, dilation_w = dilation
if dilation_h != 1 or dilation_w != 1:
kernel = dilate(kernel, (1, 1, dilation_h, dilation_w))

kernel = dilation(kernel, (1, 1, dilation_h, dilation_w))
CO, CI, KH, KW = get_const_tuple(kernel.shape)
HPAD, WPAD, _, _ = nn.get_pad_tuple(padding, kernel)
HSTR, WSTR = (strides, strides) if isinstance(strides, int) else strides
assert HSTR == 1 and WSTR == 1 and KH == KW
else: # kernel tensor is pre-transfomred. this op is created by
# alter op layout, do not check
else:
# kernel tensor is pre-transfomred. this op is created by alter op layout.
# dilation is not supported
HSTR = WSTR = 1
HPAD = WPAD = 1
KH = KW = 3
_, _, CI, CO = get_const_tuple(kernel.shape)
KH = KW = 3
assert HSTR == 1 and WSTR == 1 and dilation_h == 1 and dilation_w == 1

HPAD, WPAD, _, _ = nn.get_pad_tuple(padding, kernel)
data_pad = nn.pad(data, (0, 0, HPAD, WPAD), (0, 0, HPAD, WPAD), name="data_pad")

r = KW
Expand Down Expand Up @@ -384,7 +386,7 @@ def _alter_conv2d_layout(attrs, inputs, tinfos, F):
return F.nn.conv2d(*copy_inputs, **new_attrs)

if attrs.get_int_tuple("dilation") != (1, 1):
warnings.warn("Does not support weight pre-transform for dilated convolution.")
logger.warning("Does not support weight pre-transform for dilated convolution.")
return None

# pre-compute weight transformation in winograd
Expand Down
1 change: 1 addition & 0 deletions topi/tests/python/common.py
Original file line number Diff line number Diff line change
Expand Up @@ -40,4 +40,5 @@ def _query_inside(self, target, workload):
cfg = FallbackConfigEntity()
cfg.template_key = 'int8'
self.memory[key] = cfg
cfg.is_fallback = False
return cfg
1 change: 1 addition & 0 deletions topi/tests/python/test_topi_conv2d_winograd.py
Original file line number Diff line number Diff line change
Expand Up @@ -99,6 +99,7 @@ def _query_inside(self, target, workload):
cfg = FallbackConfigEntity()
cfg.template_key = 'winograd'
self.memory[key] = cfg
cfg.is_fallback = False
return cfg


Expand Down

0 comments on commit 7211c27

Please sign in to comment.