Skip to content
This repository has been archived by the owner on Nov 17, 2023. It is now read-only.

Fix oneDNN feature name in MxNET #20070

Merged
merged 1 commit into from
Mar 24, 2021
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions cd/utils/artifact_repository.py
Original file line number Diff line number Diff line change
Expand Up @@ -291,7 +291,7 @@ def probe_cpu_variant(mxnet_features: Dict[str, bool]) -> str:
:return: Either cpu, or mkl as the variant
"""
logger.debug('Determining cpu variant')
if not mxnet_features['MKLDNN']:
if not mxnet_features['ONEDNN']:
logger.debug('variant is: native')
return 'native'

Expand All @@ -312,7 +312,7 @@ def probe_gpu_variant(mxnet_features: Dict[str, bool]) -> Optional[str]:
cuda_version = get_cuda_version()
if cuda_version:
variant = 'cu{}'.format(cuda_version)
if not mxnet_features['MKLDNN']:
if not mxnet_features['ONEDNN']:
RuntimeError('Error determining mxnet variant: ONEDNN should be enabled for cuda variants')
logger.debug('variant is: {}'.format(variant))
return variant
Expand Down
14 changes: 7 additions & 7 deletions cd/utils/test_artifact_repository.py
Original file line number Diff line number Diff line change
Expand Up @@ -161,26 +161,26 @@ def test_get_cuda_version_not_found(self, mock):
@patch('artifact_repository.get_libmxnet_features')
def test_probe_variant_native(self, mock_features):
"""
Tests 'native' is returned if MKLDNN and CUDA features are OFF
Tests 'native' is returned if ONEDNN and CUDA features are OFF
"""
mock_features.return_value = {'MKLDNN': False, 'CUDA': False}
mock_features.return_value = {'ONEDNN': False, 'CUDA': False}
self.assertEqual(probe_mxnet_variant('libmxnet.so'), 'native')

@patch('artifact_repository.get_libmxnet_features')
def test_probe_variant_cpu(self, mock_features):
"""
Tests 'cpu' is returned if MKLDNN is ON and CUDA is OFF
Tests 'cpu' is returned if ONEDNN is ON and CUDA is OFF
"""
mock_features.return_value = {'MKLDNN': True, 'CUDA': False}
mock_features.return_value = {'ONEDNN': True, 'CUDA': False}
self.assertEqual(probe_mxnet_variant('libmxnet.so'), 'cpu')

@patch('artifact_repository.get_libmxnet_features')
@patch('artifact_repository.get_cuda_version')
def test_probe_variant_cuda(self, mock_cuda_version, mock_features):
"""
Tests 'cu102' is returned if MKLDNN is OFF and CUDA is ON and CUDA version is 10.2
Tests 'cu102' is returned if ONEDNN is OFF and CUDA is ON and CUDA version is 10.2
"""
mock_features.return_value = {'MKLDNN': True, 'CUDA': True}
mock_features.return_value = {'ONEDNN': True, 'CUDA': True}
mock_cuda_version.return_value = '102'
self.assertEqual(probe_mxnet_variant('libmxnet.so'), 'cu102')

Expand All @@ -198,7 +198,7 @@ def test_probe_variant_cuda_mkl(self, mock_cuda_version, mock_features):
"""
Tests exception is raised if CUDA feature is ON but cuda version could not be determined
"""
mock_features.return_value = {'MKLDNN': True, 'CUDA': True}
mock_features.return_value = {'ONEDNN': True, 'CUDA': True}
mock_cuda_version.return_value = None
with self.assertRaises(RuntimeError):
probe_mxnet_variant('libmxnet.so')
Expand Down
4 changes: 2 additions & 2 deletions include/mxnet/libinfo.h
Original file line number Diff line number Diff line change
Expand Up @@ -172,8 +172,8 @@ enum : unsigned {
// Other math libraries:
// Linear Algebra PACKage
LAPACK,
// Intel(R) Math Kernel Library for Deep Neural Networks
MKLDNN,
// oneAPI Deep Neural Network Library (oneDNN)
ONEDNN,

// Image processing
OPENCV,
Expand Down
2 changes: 1 addition & 1 deletion python/mxnet/amp/lists/symbol_fp16.py
Original file line number Diff line number Diff line change
Expand Up @@ -600,7 +600,7 @@
'_contrib_sldwin_atten_context',
]

if Features().is_enabled('MKLDNN'):
if Features().is_enabled('ONEDNN'):
FP32_FUNCS.extend([
'_sg_mkldnn_conv',
'_sg_mkldnn_fully_connected',
Expand Down
2 changes: 1 addition & 1 deletion python/mxnet/runtime.py
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,7 @@
[✖ CUDA, ✖ CUDNN, ✖ NCCL, ✖ TENSORRT, ✔ CPU_SSE, ✔ CPU_SSE2, ✔ CPU_SSE3,
✔ CPU_SSE4_1, ✔ CPU_SSE4_2, ✖ CPU_SSE4A, ✔ CPU_AVX, ✖ CPU_AVX2, ✔ OPENMP, ✖ SSE,
✔ F16C, ✔ JEMALLOC, ✔ BLAS_OPEN, ✖ BLAS_ATLAS, ✖ BLAS_MKL, ✖ BLAS_APPLE, ✔ LAPACK,
MKLDNN, ✔ OPENCV, ✖ DIST_KVSTORE, ✖ INT64_TENSOR_SIZE, ✔ SIGNAL_HANDLER, ✔ DEBUG, ✖ TVM_OP]
ONEDNN, ✔ OPENCV, ✖ DIST_KVSTORE, ✖ INT64_TENSOR_SIZE, ✔ SIGNAL_HANDLER, ✔ DEBUG, ✖ TVM_OP]


"""
Expand Down
4 changes: 2 additions & 2 deletions src/libinfo.cc
Original file line number Diff line number Diff line change
Expand Up @@ -78,7 +78,7 @@ class FeatureSet {
feature_bits.set(BLAS_MKL, MXNET_USE_BLAS_MKL);
feature_bits.set(BLAS_APPLE, MXNET_USE_BLAS_APPLE);
feature_bits.set(LAPACK, MXNET_USE_LAPACK);
feature_bits.set(MKLDNN, MXNET_USE_ONEDNN);
feature_bits.set(ONEDNN, MXNET_USE_ONEDNN);

// Image
feature_bits.set(OPENCV, MXNET_USE_OPENCV);
Expand Down Expand Up @@ -152,7 +152,7 @@ const std::vector<std::string> EnumNames::names = {
"BLAS_MKL",
"BLAS_APPLE",
"LAPACK",
"MKLDNN",
"ONEDNN",
"OPENCV",
"DIST_KVSTORE",
"INT64_TENSOR_SIZE",
Expand Down
6 changes: 3 additions & 3 deletions tools/dependencies/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -49,12 +49,12 @@ MXNet is built on top of many dependencies. Managing these dependencies could be

## Overview

The dependencies could be categorized by several groups: BLAS libraries, CPU-based performance boost library, i.e. MKLDNN and GPU-based performance boosting library including CUDA, cuDNN, NCCL. and others including OpenCV, Numpy, S3-related, PS-lite dependencies. The list below shows all the dependencies and their version. Except for CUDA, cuDNN, NCCL which the user is required to install on their environments, we statically link those dependencies into libmxnet.so when we build PyPi package. By doing this, the user can take advantage of these dependencies without being worry about it.
The dependencies could be categorized by several groups: BLAS libraries, CPU-based performance boost library, i.e. ONEDNN and GPU-based performance boosting library including CUDA, cuDNN, NCCL. and others including OpenCV, Numpy, S3-related, PS-lite dependencies. The list below shows all the dependencies and their version. Except for CUDA, cuDNN, NCCL which the user is required to install on their environments, we statically link those dependencies into libmxnet.so when we build PyPi package. By doing this, the user can take advantage of these dependencies without being worry about it.

| Dependencies | MXNet Version |
| :------------: |:-------------:|
|OpenBLAS| 0.3.9 |
|MKLDNN| 0.19 |
|ONEDNN| 2.0 |
|CUDA| 10.1 |
|cuDNN| 7.5.1 |
|NCCL| 2.4.2 |
Expand Down Expand Up @@ -102,7 +102,7 @@ sudo apt-get install -y git \
pkg-config
```

### MKL, MKLDNN
### MKL, ONEDNN

@pengzhao-intel (https://github.com/apache/incubator-mxnet/commits?author=pengzhao-intel) and his team are tracking and updating these versions. Kudos to them!

Expand Down
6 changes: 3 additions & 3 deletions tools/pip/setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -141,14 +141,14 @@ def skip_markdown_comments(md):
libraries.append('CUDA-10.1')

from mxnet.runtime import Features
if Features().is_enabled("MKLDNN"):
libraries.append('MKLDNN')
if Features().is_enabled("ONEDNN"):
libraries.append('ONEDNN')

short_description += ' This version uses {0}.'.format(' and '.join(libraries))

package_data = {'mxnet': [os.path.join('mxnet', os.path.basename(LIB_PATH[0]))],
'dmlc_tracker': []}
if Features().is_enabled("MKLDNN"):
if Features().is_enabled("ONEDNN"):
shutil.copytree(os.path.join(CURRENT_DIR, 'mxnet-build/3rdparty/onednn/include'),
os.path.join(CURRENT_DIR, 'mxnet/include/onednn'))
if platform.system() == 'Linux':
Expand Down