Skip to content

Commit

Permalink
[AutoTVM][TOPI] Putting placeholder replacement in compute
Browse files Browse the repository at this point in the history
  • Loading branch information
cchung100m committed May 30, 2020
1 parent ebae075 commit e8f634b
Show file tree
Hide file tree
Showing 3 changed files with 19 additions and 22 deletions.
23 changes: 15 additions & 8 deletions topi/python/topi/arm_cpu/conv2d_spatial_pack.py
Original file line number Diff line number Diff line change
Expand Up @@ -109,12 +109,15 @@ def conv2d_spatial_pack_nchw(cfg, data, kernel, strides, padding, dilation,
data_pad[n][ci][h*VH*HSTR+vh][w*VW*WSTR+vw],
name='data_vec')

if pre_packed:
kernel_vec = kernel
if autotvm.GLOBAL_SCOPE.in_tuning:
kernel_vec = tvm.te.placeholder(kvshape, kernel.dtype, name="kernel")
else:
kernel_vec = te.compute(kvshape, lambda co, ci, kh, kw, vc:
kernel[co*VC+vc][ci][kh][kw],
name='kernel_vec')
if pre_packed:
kernel_vec = kernel
else:
kernel_vec = te.compute(kvshape, lambda co, ci, kh, kw, vc:
kernel[co*VC+vc][ci][kh][kw],
name='kernel_vec')

ci = te.reduce_axis((0, CI), name='ci')
kh = te.reduce_axis((0, KH), name='kh')
Expand Down Expand Up @@ -267,9 +270,13 @@ def conv2d_spatial_pack_nhwc(cfg, data, kernel, strides, padding, dilation, out_
data_vec = te.compute(dvshape, lambda n, oho, owo, ohi, owi, ic:
data_pad[n][oho*OHI*HSTR+ohi][owo*OWI*WSTR+owi][ic],
name='data_vec')
kernel_vec = te.compute(kvshape, lambda oco, kh, kw, ic, oci: \
kernel[kh][kw][ic][oco*OCI+oci],
name='kernel_vec')

if autotvm.GLOBAL_SCOPE.in_tuning:
kernel_vec = tvm.te.placeholder(kvshape, kernel.dtype, name="kernel")
else:
kernel_vec = te.compute(kvshape, lambda oco, kh, kw, ic, oci: \
kernel[kh][kw][ic][oco*OCI+oci],
name='kernel_vec')

ic = te.reduce_axis((0, IC), name='ic')
kh = te.reduce_axis((0, KH), name='kh')
Expand Down
9 changes: 2 additions & 7 deletions topi/python/topi/bifrost/conv2d.py
Original file line number Diff line number Diff line change
Expand Up @@ -142,14 +142,9 @@ def _schedule_spatial_pack(cfg, s, output, conv, data_vec, kernel_vec):
s[data_vec].unroll(vw)

if isinstance(kernel_vec.op, tvm.te.ComputeOp) and kernel_vec.name == 'kernel_vec':
co, ci, kh, kw, vc = s[kernel_vec].op.axis
if autotvm.GLOBAL_SCOPE.in_tuning:
# Directly use modified data layout placeholder.
kvshape = (co // vc, ci, kh, kw, vc)
kernel_vec = tvm.te.placeholder(kvshape, kernel_vec.dtype, name="kernel")
s[kernel_vec] = kernel_vec
else:
if not autotvm.GLOBAL_SCOPE.in_tuning:
max_threads = tvm.target.Target.current(allow_none=False).max_num_threads
co, ci, kh, kw, vc = s[kernel_vec].op.axis
fused = s[kernel_vec].fuse(co, ci, kh, kw, vc)
fused, vec = s[kernel_vec].split(fused, VC)
bb, tt = s[kernel_vec].split(fused, max_threads)
Expand Down
9 changes: 2 additions & 7 deletions topi/python/topi/mali/conv2d.py
Original file line number Diff line number Diff line change
Expand Up @@ -138,14 +138,9 @@ def _schedule_spatial_pack(cfg, s, output, conv, data_vec, kernel_vec):
s[data_vec].unroll(vw)

if isinstance(kernel_vec.op, tvm.te.ComputeOp) and kernel_vec.name == 'kernel_vec':
co, ci, kh, kw, vc = s[kernel_vec].op.axis
if autotvm.GLOBAL_SCOPE.in_tuning:
# Directly use modified data layout placeholder.
kvshape = (co // vc, ci, kh, kw, vc)
kernel_vec = tvm.te.placeholder(kvshape, kernel_vec.dtype, name="kernel")
s[kernel_vec] = kernel_vec
else:
if not autotvm.GLOBAL_SCOPE.in_tuning:
max_threads = tvm.target.Target.current(allow_none=False).max_num_threads
co, ci, kh, kw, vc = s[kernel_vec].op.axis
fused = s[kernel_vec].fuse(co, ci, kh, kw, vc)
fused, vec = s[kernel_vec].split(fused, VC)
bb, tt = s[kernel_vec].split(fused, max_threads)
Expand Down

0 comments on commit e8f634b

Please sign in to comment.