diff --git a/python/tvm/intrin.py b/python/tvm/intrin.py index 6a580d39486c..fd7131e5c92f 100644 --- a/python/tvm/intrin.py +++ b/python/tvm/intrin.py @@ -575,7 +575,7 @@ def register_intrin_rule(target, intrin, f=None, override=False): The name of codegen target. intrin : str - The name of the instrinsic. + The name of the intrinsic. f : function, optional The function to be registered. diff --git a/src/api/api_pass.cc b/src/api/api_pass.cc index bcafe0904ed2..4210788d52b5 100644 --- a/src/api/api_pass.cc +++ b/src/api/api_pass.cc @@ -18,7 +18,7 @@ */ /*! - * Exposre of pass functions. + * Exposure of pass functions. * \file api_pass.cc */ #include diff --git a/src/pass/lower_warp_memory.cc b/src/pass/lower_warp_memory.cc index bfd5c3979e69..393605e85b8a 100644 --- a/src/pass/lower_warp_memory.cc +++ b/src/pass/lower_warp_memory.cc @@ -46,9 +46,9 @@ namespace ir { // This requires us to do the following rewriting: // - Rewrite allocation to use local memory. // - Rewrite store of warp memory to local store. -// - Rewrite load of waro memory to local plus a shuffle. +// - Rewrite load of warp memory to local plus a shuffle. // -// Define a generic shuffle instrinsic warp_shuffle(data, warp_index). +// Define a generic shuffle intrinsic warp_shuffle(data, warp_index). // We can use the following rewriting rule // // Before rewrite, diff --git a/topi/python/topi/arm_cpu/bitserial_dense.py b/topi/python/topi/arm_cpu/bitserial_dense.py index 0148cfba3f38..8bd6c5d15f8c 100644 --- a/topi/python/topi/arm_cpu/bitserial_dense.py +++ b/topi/python/topi/arm_cpu/bitserial_dense.py @@ -158,7 +158,7 @@ def _schedule(cfg, s, data_vec, weight_vec, output, unipolar): return s def traverse(op): - """Internal travserse function""" + """Internal traverse function""" # inline all one-to-one-mapping operators except the last stage (output) if tag.is_broadcast(op.tag) or 'elemwise' in op.tag: if op not in s.outputs: diff --git a/topi/python/topi/bifrost/depthwise_conv2d.py b/topi/python/topi/bifrost/depthwise_conv2d.py index 0cde1ea6f413..305abee0bcd9 100644 --- a/topi/python/topi/bifrost/depthwise_conv2d.py +++ b/topi/python/topi/bifrost/depthwise_conv2d.py @@ -104,7 +104,7 @@ def tile_and_bind3d(tensor, z, y, x, z_factor=2, y_factor=None, x_factor=None): s[conv].compute_at(s[output], ji) def traverse(op): - """Internal travserse function""" + """Internal traverse function""" # inline all one-to-one-mapping operators except the last stage (output) if tag.is_broadcast(op.tag): if op not in s.outputs: diff --git a/topi/python/topi/cuda/dense.py b/topi/python/topi/cuda/dense.py index df77f4100bf0..f17feb0b7a98 100644 --- a/topi/python/topi/cuda/dense.py +++ b/topi/python/topi/cuda/dense.py @@ -105,7 +105,7 @@ def _schedule(C): scheduled_ops = [] def traverse(OP): - """Internal travserse function""" + """Internal traverse function""" # inline all one-to-one-mapping operators except the last stage (output) if tag.is_broadcast(OP.tag): if OP not in s.outputs: diff --git a/topi/python/topi/cuda/depthwise_conv2d.py b/topi/python/topi/cuda/depthwise_conv2d.py index def87ed8102c..6dbfbfe39cae 100644 --- a/topi/python/topi/cuda/depthwise_conv2d.py +++ b/topi/python/topi/cuda/depthwise_conv2d.py @@ -192,7 +192,7 @@ def _schedule(temp, Filter, DepthwiseConv2d): scheduled_ops = [] def traverse(OP): - """Internal travserse function""" + """Internal traverse function""" # inline all one-to-one-mapping operators except the last stage (output) if tag.is_broadcast(OP.tag): if OP not in s.outputs: diff --git a/topi/python/topi/cuda/pooling.py b/topi/python/topi/cuda/pooling.py index 544b09d36787..f11085afb3cd 100644 --- a/topi/python/topi/cuda/pooling.py +++ b/topi/python/topi/cuda/pooling.py @@ -68,7 +68,7 @@ def _schedule(Pool): scheduled_ops = [] def traverse(OP): - """Internal travserse function""" + """Internal traverse function""" # inline all one-to-one-mapping operators except the last stage (output) if tag.is_broadcast(OP.tag): if OP not in s.outputs: @@ -131,7 +131,7 @@ def _schedule(PaddedInput, Pool): scheduled_ops = [] def traverse(OP): - """Internal travserse function""" + """Internal traverse function""" # inline all one-to-one-mapping operators except the last stage (output) if tag.is_broadcast(OP.tag): if OP not in s.outputs: diff --git a/topi/python/topi/cuda/reduction.py b/topi/python/topi/cuda/reduction.py index a7a56b90fb1f..2968ab75e040 100644 --- a/topi/python/topi/cuda/reduction.py +++ b/topi/python/topi/cuda/reduction.py @@ -109,7 +109,7 @@ def schedule_reduce(outs): scheduled_ops = [] def traverse_before_reduce(operator): - """Internal travserse function""" + """Internal traverse function""" if isinstance(operator, tvm.tensor.PlaceholderOp): return if tag.is_injective(operator.tag): @@ -123,7 +123,7 @@ def traverse_before_reduce(operator): scheduled_ops.append(operator) def traverse_after_reduce(operator): - """Internal travserse function""" + """Internal traverse function""" if tag.is_broadcast(operator.tag): if operator not in scheduled_ops: schedule_injective_from_existing(sch, operator.output(0)) diff --git a/topi/python/topi/hls/nn.py b/topi/python/topi/hls/nn.py index 8cc23ca43a6a..6b925c865947 100644 --- a/topi/python/topi/hls/nn.py +++ b/topi/python/topi/hls/nn.py @@ -28,7 +28,7 @@ def _schedule_conv2d(outs): tvm.schedule.AutoInlineInjective(s) def traverse(OP): - """Internal travserse function""" + """Internal traverse function""" # inline all one-to-one-mapping operators except the last stage (output) if tag.is_injective(OP.tag): if OP not in s.outputs: @@ -214,7 +214,7 @@ def schedule_reduce(outs): tvm.schedule.AutoInlineInjective(s) def traverse(OP): - """Internal travserse function""" + """Internal traverse function""" # inline all one-to-one-mapping operators except the last stage (output) if tag.is_broadcast(OP.tag): if OP not in s.outputs: @@ -306,7 +306,7 @@ def schedule_dense(outs): tvm.schedule.AutoInlineInjective(s) def traverse(OP): - """Internal travserse function""" + """Internal traverse function""" # inline all one-to-one-mapping operators except the last stage (output) if tag.is_broadcast(OP.tag): if OP not in s.outputs: @@ -350,7 +350,7 @@ def schedule_pool(outs, layout): tvm.schedule.AutoInlineInjective(s) def traverse(OP): - """Internal travserse function""" + """Internal traverse function""" # inline all one-to-one-mapping operators except the last stage (output) if tag.is_broadcast(OP.tag): if OP not in s.outputs: @@ -394,7 +394,7 @@ def schedule_adaptive_pool(outs): tvm.schedule.AutoInlineInjective(s) def traverse(OP): - """Internal travserse function""" + """Internal traverse function""" # inline all one-to-one-mapping operators except the last stage (output) if tag.is_broadcast(OP.tag): if OP not in s.outputs: diff --git a/topi/python/topi/intel_graphics/depthwise_conv2d.py b/topi/python/topi/intel_graphics/depthwise_conv2d.py index 424cb3c5f925..c747c539d7fe 100644 --- a/topi/python/topi/intel_graphics/depthwise_conv2d.py +++ b/topi/python/topi/intel_graphics/depthwise_conv2d.py @@ -193,7 +193,7 @@ def _schedule(temp, Filter, DepthwiseConv2d): scheduled_ops = [] def traverse(OP): - """Internal travserse function""" + """Internal traverse function""" # inline all one-to-one-mapping operators except the last stage (output) if tag.is_broadcast(OP.tag): if OP not in s.outputs: diff --git a/topi/python/topi/opengl/conv2d_nchw.py b/topi/python/topi/opengl/conv2d_nchw.py index 95a059357c76..e39d1ad805b0 100644 --- a/topi/python/topi/opengl/conv2d_nchw.py +++ b/topi/python/topi/opengl/conv2d_nchw.py @@ -49,7 +49,7 @@ def _schedule(conv2d, data): s[data].opengl() def traverse(OP): - """Internal travserse function""" + """Internal traverse function""" # inline all one-to-one-mapping operators except the last stage (output) if tag.is_broadcast(OP.tag): if OP not in s.outputs: diff --git a/topi/python/topi/opengl/dense.py b/topi/python/topi/opengl/dense.py index c78de1a13223..c93dfccbeece 100644 --- a/topi/python/topi/opengl/dense.py +++ b/topi/python/topi/opengl/dense.py @@ -49,7 +49,7 @@ def _schedule(Dense): s[Out].opengl() def traverse(OP): - """Internal travserse function""" + """Internal traverse function""" # inline all one-to-one-mapping operators except the last stage (output) if tag.is_broadcast(OP.tag): if OP not in s.outputs: diff --git a/topi/python/topi/opengl/pooling.py b/topi/python/topi/opengl/pooling.py index fd9c74c01c66..04c7b0cd0002 100644 --- a/topi/python/topi/opengl/pooling.py +++ b/topi/python/topi/opengl/pooling.py @@ -48,7 +48,7 @@ def _schedule(Pool): s[Out].opengl() def traverse(OP): - """Internal travserse function""" + """Internal traverse function""" # inline all one-to-one-mapping operators except the last stage (output) if tag.is_broadcast(OP.tag): if OP not in s.outputs: @@ -102,7 +102,7 @@ def _schedule(PaddedInput, Pool): s[Out].opengl() def traverse(OP): - """Internal travserse function""" + """Internal traverse function""" # inline all one-to-one-mapping operators except the last stage (output) if tag.is_broadcast(OP.tag): if OP not in s.outputs: diff --git a/topi/python/topi/x86/binary_dense.py b/topi/python/topi/x86/binary_dense.py index a887ff71066f..abf090889ec3 100644 --- a/topi/python/topi/x86/binary_dense.py +++ b/topi/python/topi/x86/binary_dense.py @@ -52,7 +52,7 @@ def _schedule(A, B, C): s[Out].vectorize(xi) def traverse(OP): - """Internal travserse function""" + """Internal traverse function""" # inline all one-to-one-mapping operators except the last stage (output) if tag.is_broadcast(OP.tag): if OP not in s.outputs: diff --git a/topi/python/topi/x86/bitserial_dense.py b/topi/python/topi/x86/bitserial_dense.py index cc2fb71aad6b..47b972fa1319 100644 --- a/topi/python/topi/x86/bitserial_dense.py +++ b/topi/python/topi/x86/bitserial_dense.py @@ -69,7 +69,7 @@ def _schedule(cfg, s, data_vec, weight_vec, output): return s def traverse(op): - """Internal travserse function""" + """Internal traverse function""" # inline all one-to-one-mapping operators except the last stage (output) if tag.is_broadcast(op.tag) or 'elemwise' in op.tag: if op not in s.outputs: diff --git a/topi/python/topi/x86/pooling.py b/topi/python/topi/x86/pooling.py index e9f832dde902..ed7d525028e4 100644 --- a/topi/python/topi/x86/pooling.py +++ b/topi/python/topi/x86/pooling.py @@ -88,7 +88,7 @@ def _schedule(PaddedInput, Pool): _parallel_sch(s[Pool], outs[0].shape, do_vectorize) def traverse(OP): - """Internal travserse function""" + """Internal traverse function""" # inline all one-to-one-mapping operators except the last stage (output) if tag.is_broadcast(OP.tag): if OP not in s.outputs: @@ -137,7 +137,7 @@ def schedule_adaptive_pool(outs): scheduled_ops = [] def traverse(OP): - """Internal travserse function""" + """Internal traverse function""" # inline all one-to-one-mapping operators except the last stage (output) if tag.is_broadcast(OP.tag): if OP not in s.outputs: diff --git a/topi/python/topi/x86/reduction.py b/topi/python/topi/x86/reduction.py index 41e8e9ecb56b..f704d4961f15 100644 --- a/topi/python/topi/x86/reduction.py +++ b/topi/python/topi/x86/reduction.py @@ -78,7 +78,7 @@ def schedule_reduce(outs): scheduled_ops = [] def traverse_before_reduce(operator): - """Internal travserse function""" + """Internal traverse function""" if isinstance(operator, tvm.tensor.PlaceholderOp): return if tag.is_injective(operator.tag): @@ -92,7 +92,7 @@ def traverse_before_reduce(operator): scheduled_ops.append(operator) def traverse_after_reduce(operator): - """Internal travserse function""" + """Internal traverse function""" if tag.is_broadcast(operator.tag): if operator not in scheduled_ops: generic.schedule_injective_from_existing(sch, operator)