Skip to content
This repository has been archived by the owner on Nov 17, 2023. It is now read-only.

Fix oneDNN fallback for concat with scalar #20772

Merged
merged 2 commits into from
Jan 5, 2022
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions src/operator/nn/concat.cc
Original file line number Diff line number Diff line change
Expand Up @@ -252,8 +252,8 @@ bool SupportDNNLConcat(const std::vector<NDArray>& arrs) {
return false;
if (!(arr.dtype() == mshadow::kFloat32 || arr.dtype() == mshadow::kBfloat16))
return false;
// DO not support zero-size tensors.
if (arr.shape().Size() == 0)
// Do not support zero-size tensors.
if (arr.shape().Size() == 0 || arr.shape().ndim() == 0)
return false;
int ndim = arr.shape().ndim();
const int dnnl_ndims = arr.GetDNNLData()->get_desc().data.ndims;
Expand Down
4 changes: 2 additions & 2 deletions src/operator/nn/dnnl/dnnl_log_softmax.cc
Original file line number Diff line number Diff line change
Expand Up @@ -60,8 +60,8 @@ bool SupportDNNLLogSoftmax(const SoftmaxParam& param, const NDArray& data, const
// DNNL does not support temperature argument in their log_softmax function
// now. Need update this once they start to support it.
// Currently, DNNL shows bad performance when log_softmax is not performed on the last dimension
if (param.temperature.has_value() || in_dtype != mshadow::kFloat32 || in_dtype != out_dtype ||
axis != (ndim - 1)) {
if (data.shape().Size() == 0 || data.shape().ndim() == 0 || param.temperature.has_value() ||
in_dtype != mshadow::kFloat32 || in_dtype != out_dtype || axis != (ndim - 1)) {
return false;
}

Expand Down
3 changes: 2 additions & 1 deletion src/operator/nn/dnnl/dnnl_softmax.cc
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,7 @@ namespace op {

bool SupportDNNLSoftmax(const SoftmaxParam& param, const NDArray& data, const NDArray& output) {
const int ndim = data.shape().ndim();
const int in_size = data.shape().Size();
const int in_dtype = data.dtype();
const int out_dtype = output.dtype();
const int axis = CheckAxis(param.axis, ndim);
Expand All @@ -44,7 +45,7 @@ bool SupportDNNLSoftmax(const SoftmaxParam& param, const NDArray& data, const ND
}

// Supports ndim up to 6
return (ndim >= 1 && ndim <= 6);
return (ndim >= 1 && ndim <= 6 && in_size != 0);
}

void DNNLSoftmaxForward(const nnvm::NodeAttrs& attrs,
Expand Down
2 changes: 1 addition & 1 deletion src/operator/nn/dnnl/dnnl_stack.cc
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,7 @@ bool SupportDNNLStack(const std::vector<NDArray>& inputs) {
if (arr.dtype() != src_dtype) {
return false;
}
// DO not support zero-size tensors.
// Do not support zero-size tensors.
if (arr.shape().Size() == 0) {
return false;
}
Expand Down
4 changes: 0 additions & 4 deletions src/operator/nn/log_softmax.cc
Original file line number Diff line number Diff line change
Expand Up @@ -39,8 +39,6 @@ static void LogSoftmaxComputeExCPU(const nnvm::NodeAttrs& attrs,
const std::vector<NDArray>& inputs,
const std::vector<OpReqType>& req,
const std::vector<NDArray>& outputs) {
if (inputs[0].shape().Size() == 0U)
return;
const SoftmaxParam& param = nnvm::get<SoftmaxParam>(attrs.parsed);
if (SupportDNNLLogSoftmax(param, inputs[0], outputs[0])) {
DNNL_OPCHECK_INIT(false, outputs.size(), inputs, outputs);
Expand All @@ -57,8 +55,6 @@ static void LogSoftmaxGradComputeExCPU(const nnvm::NodeAttrs& attrs,
const std::vector<NDArray>& inputs,
const std::vector<OpReqType>& req,
const std::vector<NDArray>& outputs) {
if (inputs[0].shape().Size() == 0U)
return;
const SoftmaxParam& param = nnvm::get<SoftmaxParam>(attrs.parsed);
if (SupportDNNLLogSoftmax(param, inputs[1], outputs[0])) {
DNNL_OPCHECK_INIT(false, outputs.size(), inputs, outputs);
Expand Down
4 changes: 0 additions & 4 deletions src/operator/nn/softmax.cc
Original file line number Diff line number Diff line change
Expand Up @@ -41,8 +41,6 @@ static void SoftmaxComputeExCPU(const nnvm::NodeAttrs& attrs,
const std::vector<NDArray>& inputs,
const std::vector<OpReqType>& req,
const std::vector<NDArray>& outputs) {
if (inputs[0].shape().Size() == 0U)
return;
const SoftmaxParam& param = nnvm::get<SoftmaxParam>(attrs.parsed);
if (SupportDNNLSoftmax(param, inputs[0], outputs[0])) {
DNNL_OPCHECK_INIT(false, outputs.size(), inputs, outputs);
Expand All @@ -59,8 +57,6 @@ static void SoftmaxGradComputeExCPU(const nnvm::NodeAttrs& attrs,
const std::vector<NDArray>& inputs,
const std::vector<OpReqType>& req,
const std::vector<NDArray>& outputs) {
if (inputs[0].shape().Size() == 0U)
return;
const SoftmaxParam& param = nnvm::get<SoftmaxParam>(attrs.parsed);
if (SupportDNNLSoftmax(param, inputs[1], outputs[0])) {
DNNL_OPCHECK_INIT(false, outputs.size(), inputs, outputs);
Expand Down
4 changes: 3 additions & 1 deletion tests/python/unittest/test_numpy_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -4096,7 +4096,7 @@ def get_new_shape(shape, axis):
shape_lst[axis] = random.randint(0, 3)
return tuple(shape_lst)

shapes = [(0, 0), (2, 3), (2, 1, 3)]
shapes = [(), (0, 0), (2, 3), (2, 1, 3)]
hybridizes = [True, False]
axes = [0, 1, -1, None]
grad_reqs = ['write', 'add', 'null']
Expand All @@ -4105,6 +4105,8 @@ def get_new_shape(shape, axis):

for shape, hybridize, axis, grad_req, dtype in combinations:
# test gluon
if shape == () and axis != None:
continue
test_concat = TestConcat(axis=axis)
if hybridize:
test_concat.hybridize()
Expand Down