Skip to content
This repository has been archived by the owner on Nov 17, 2023. It is now read-only.

Commit

Permalink
[API] Add floor_divide (#20620)
Browse files Browse the repository at this point in the history
* [API] Add floor_divide

* fix lint

* fix sanity

* update implementation

* fix lint

* update operator_tune.cc

* fix

* fix lint

* fix build

* fix include

* fix rtc functions

* add amp list

* add floor_divide in GPU

* fix lint

* fix docstring

* Fix docstring

* fix lint

* update rtc

* fix rtc
  • Loading branch information
barry-jin authored Oct 29, 2021
1 parent 197fbba commit fcbab28
Show file tree
Hide file tree
Showing 17 changed files with 356 additions and 4 deletions.
1 change: 1 addition & 0 deletions docs/python_docs/python/api/np/routines.math.rst
Original file line number Diff line number Diff line change
Expand Up @@ -157,6 +157,7 @@ Arithmetic operations
fmod
modf
divmod
floor_divide


Miscellaneous
Expand Down
3 changes: 3 additions & 0 deletions python/mxnet/amp/lists/symbol_fp16.py
Original file line number Diff line number Diff line change
Expand Up @@ -265,6 +265,9 @@
'_npi_multinomial',
'_npi_multiply',
'_npi_multiply_scalar',
'_npi_floor_divide',
'_npi_floor_divide_scalar',
'_npi_rfloor_divide_scalar',
'_npi_nan_to_num',
'_npi_negative',
'_npi_normal',
Expand Down
41 changes: 40 additions & 1 deletion python/mxnet/ndarray/numpy/_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,7 @@
'diff', 'ediff1d', 'resize', 'polyval', 'nan_to_num', 'isnan', 'isinf', 'isposinf', 'isneginf', 'isfinite',
'atleast_1d', 'atleast_2d', 'atleast_3d', 'fill_diagonal', 'squeeze',
'where', 'bincount', 'rollaxis', 'diagflat', 'repeat', 'prod', 'pad', 'cumsum', 'sum', 'diag', 'diagonal',
'positive', 'logaddexp']
'positive', 'logaddexp', 'floor_divide']


@set_module('mxnet.ndarray.numpy')
Expand Down Expand Up @@ -1168,6 +1168,45 @@ def true_divide(x1, x2, out=None):
return _api_internal.true_divide(x1, x2, out)


@set_module('mxnet.ndarray.numpy')
@wrap_np_binary_func
def floor_divide(x1, x2, out=None):
"""Return the largest integer smaller or equal to the division of the inputs.
It is equivalent to the Python // operator and pairs with the Python % (remainder),
function so that a = a % b + b * (a // b) up to roundoff.
Parameters
----------
x1 : ndarray or scalar
Dividend array.
x2 : ndarray or scalar
Divisor array.
out : ndarray
A location into which the result is stored. If provided, it must have a shape
that the inputs broadcast to. If not provided or None, a freshly-allocated array
is returned.
Returns
-------
out : ndarray or scalar
This is a scalar if both x1 and x2 are scalars.
.. note::
This operator now supports automatic type promotion. The resulting type will be determined
according to the following rules:
* If both inputs are of floating number types, the output is the more precise type.
* If only one of the inputs is floating number type, the result is that type.
* If both inputs are of integer types (including boolean), the output is the more
precise type
"""
if isinstance(x1, numeric_types) and isinstance(x2, numeric_types):
return _np.floor_divide(x1, x2, out=out)
return _api_internal.floor_divide(x1, x2, out)


@set_module('mxnet.ndarray.numpy')
@wrap_np_binary_func
def mod(x1, x2, out=None, **kwargs):
Expand Down
63 changes: 62 additions & 1 deletion python/mxnet/numpy/multiarray.py
Original file line number Diff line number Diff line change
Expand Up @@ -81,7 +81,7 @@
'nan_to_num', 'isnan', 'isinf', 'isposinf', 'isneginf', 'isfinite', 'polyval', 'where', 'bincount',
'atleast_1d', 'atleast_2d', 'atleast_3d', 'fill_diagonal', 'squeeze',
'diagflat', 'repeat', 'prod', 'pad', 'cumsum', 'sum', 'rollaxis', 'diag', 'diagonal',
'positive', 'logaddexp']
'positive', 'logaddexp', 'floor_divide']

__all__ += fallback.__all__

Expand Down Expand Up @@ -1114,6 +1114,23 @@ def __mul__(self, other):
"""x.__mul__(y) <=> x * y"""
return multiply(self, other)

@wrap_mxnp_np_ufunc
def __floordiv__(self, other):
"""x.__floordiv__(y) <=> x // y"""
return floor_divide(self, other)

@wrap_mxnp_np_ufunc
def __ifloordiv__(self, other):
"""x.__ifloordiv__(y) <=> x //= y"""
if not self.writable:
raise ValueError('trying to divide from a readonly ndarray')
return floor_divide(self, other, out=self)

@wrap_mxnp_np_ufunc
def __rfloordiv__(self, other):
"""x.__rfloordiv__(y) <=> y // x"""
return floor_divide(other, self)

def __neg__(self):
"""x.__neg__() <=> -x"""
return negative(self)
Expand Down Expand Up @@ -3433,6 +3450,50 @@ def true_divide(x1, x2, out=None):
return _mx_nd_np.true_divide(x1, x2, out=out)


@set_module('mxnet.numpy')
@wrap_np_binary_func
def floor_divide(x1, x2, out=None):
"""Return the largest integer smaller or equal to the division of the inputs.
It is equivalent to the Python // operator and pairs with the Python % (remainder),
function so that a = a % b + b * (a // b) up to roundoff.
Parameters
----------
x1 : ndarray or scalar
Dividend array.
x2 : ndarray or scalar
Divisor array.
out : ndarray
A location into which the result is stored. If provided, it must have a shape
that the inputs broadcast to. If not provided or None, a freshly-allocated array
is returned.
Returns
-------
out : ndarray or scalar
This is a scalar if both x1 and x2 are scalars.
.. note::
This operator now supports automatic type promotion. The resulting type will be determined
according to the following rules:
* If both inputs are of floating number types, the output is the more precise type.
* If only one of the inputs is floating number type, the result is that type.
* If both inputs are of integer types (including boolean), the output is the more
precise type
Examples
--------
>>> np.floor_divide(7,3)
2
>>> np.floor_divide([1., 2., 3., 4.], 2.5)
array([ 0., 0., 1., 1.])
"""
return _mx_nd_np.floor_divide(x1, x2, out=out)


@set_module('mxnet.numpy')
@wrap_np_binary_func
def mod(x1, x2, out=None, **kwargs):
Expand Down
1 change: 1 addition & 0 deletions python/mxnet/numpy_dispatch_protocol.py
Original file line number Diff line number Diff line change
Expand Up @@ -254,6 +254,7 @@ def _register_array_function():
'logaddexp',
'subtract',
'multiply',
'floor_divide',
'true_divide',
'negative',
'power',
Expand Down
9 changes: 9 additions & 0 deletions src/api/operator/numpy/np_elemwise_broadcast_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -61,6 +61,15 @@ MXNET_REGISTER_API("_npi.true_divide")
UFuncHelper(args, ret, op, op_scalar, op_rscalar);
});

MXNET_REGISTER_API("_npi.floor_divide")
.set_body([](runtime::MXNetArgs args, runtime::MXNetRetValue* ret) {
using namespace runtime;
const nnvm::Op* op = Op::Get("_npi_floor_divide");
const nnvm::Op* op_scalar = Op::Get("_npi_floor_divide_scalar");
const nnvm::Op* op_rscalar = Op::Get("_npi_rfloor_divide_scalar");
UFuncHelper(args, ret, op, op_scalar, op_rscalar);
});

MXNET_REGISTER_API("_npi.mod").set_body([](runtime::MXNetArgs args, runtime::MXNetRetValue* ret) {
using namespace runtime;
const nnvm::Op* op = Op::Get("_npi_mod");
Expand Down
20 changes: 20 additions & 0 deletions src/common/cuda/rtc/forward_functions-inl.h
Original file line number Diff line number Diff line change
Expand Up @@ -259,6 +259,26 @@ rsub(const DType a, const DType2 b) {
return b - a;
}
template <typename DType, typename DType2>
__device__ inline mixed_type<DType, DType2>
floor_divide(const DType a, const DType2 b) {
if (type_util::has_double_or_integral<DType, DType2>::value) {
return ::floor((double)a / (double)b);
} else {
return ::floorf((float)a / (float)b);
}
}
template <typename DType, typename DType2>
__device__ inline mixed_type<DType, DType2>
rfloor_divide(const DType a, const DType2 b) {
if (type_util::has_double_or_integral<DType, DType2>::value) {
return ::floor((double)b / (double)a);
} else {
return ::floorf((float)b / (float)a);
}
}
template <typename DType, typename DType2>
__device__ inline mixed_type<DType, DType2>
mul(const DType a, const DType2 b) {
Expand Down
112 changes: 112 additions & 0 deletions src/operator/mshadow_op.h
Original file line number Diff line number Diff line change
Expand Up @@ -231,6 +231,118 @@ struct rtrue_divide : public mxnet_op::tunable {
}
};

/***** floor_divide ******/

struct floor_divide : public mxnet_op::tunable {
template <
typename DType,
typename std::enable_if<!std::is_same<DType, bool>::value && std::is_integral<DType>::value,
int>::type = 0>
MSHADOW_XINLINE static DType Map(DType a, DType b) {
DType c = static_cast<DType>(::floor(a / b));
if ((c * a != b) && ((a < 0) != (b < 0))) {
return DType(c - 1);
} else {
return c;
}
}

MSHADOW_XINLINE static bool Map(bool a, bool b) {
return static_cast<bool>(::floor(a / b));
}

template <
typename DType,
typename std::enable_if<!std::is_integral<DType>::value && !std::is_same<DType, float>::value,
int>::type = 0>
MSHADOW_XINLINE static DType Map(DType a, DType b) {
return ::floor(a / b);
}

MSHADOW_XINLINE static float Map(float a, float b) {
return ::floorf(a / b);
}
};

struct rfloor_divide : public mxnet_op::tunable {
template <
typename DType,
typename std::enable_if<!std::is_same<DType, bool>::value && std::is_integral<DType>::value,
int>::type = 0>
MSHADOW_XINLINE static DType Map(DType a, DType b) {
DType c = static_cast<DType>(::floor(b / a));
if ((c * a != b) && ((a < 0) != (b < 0))) {
return DType(c - 1);
} else {
return c;
}
}

MSHADOW_XINLINE static bool Map(bool a, bool b) {
return static_cast<bool>(::floor(b / a));
}

template <
typename DType,
typename std::enable_if<!std::is_integral<DType>::value && !std::is_same<DType, float>::value,
int>::type = 0>
MSHADOW_XINLINE static DType Map(DType a, DType b) {
return ::floor(b / a);
}

MSHADOW_XINLINE static float Map(float a, float b) {
return ::floorf(b / a);
}
};

struct mixed_floor_divide {
template <typename DType, typename std::enable_if<std::is_integral<DType>::value, int>::type = 0>
MSHADOW_XINLINE static mshadow::half::half_t Map(DType a, mshadow::half::half_t b) {
return ::floor(a / static_cast<mshadow::half::half_t>(b));
}

template <typename DType,
typename std::enable_if<std::is_same<DType, mshadow::half::half_t>::value ||
std::is_integral<DType>::value,
int>::type = 0>
MSHADOW_XINLINE static float Map(DType a, float b) {
return ::floorf(a / static_cast<float>(b));
}

template <typename DType,
typename std::enable_if<std::is_same<DType, mshadow::half::half_t>::value ||
std::is_same<DType, float>::value ||
std::is_integral<DType>::value,
int>::type = 0>
MSHADOW_XINLINE static double Map(DType a, double b) {
return ::floor(a / static_cast<double>(b));
}
};

struct mixed_rfloor_divide {
template <typename DType, typename std::enable_if<std::is_integral<DType>::value, int>::type = 0>
MSHADOW_XINLINE static mshadow::half::half_t Map(DType a, mshadow::half::half_t b) {
return ::floor(b / static_cast<mshadow::half::half_t>(a));
}

template <typename DType,
typename std::enable_if<std::is_same<DType, mshadow::half::half_t>::value ||
std::is_integral<DType>::value,
int>::type = 0>
MSHADOW_XINLINE static float Map(DType a, float b) {
return ::floorf(b / static_cast<float>(a));
}

template <typename DType,
typename std::enable_if<std::is_same<DType, mshadow::half::half_t>::value ||
std::is_same<DType, float>::value ||
std::is_integral<DType>::value,
int>::type = 0>
MSHADOW_XINLINE static double Map(DType a, double b) {
return ::floor(b / static_cast<double>(a));
}
};

MXNET_BINARY_MATH_OP_NC(left, a);

MXNET_BINARY_MATH_OP_NC(right, b);
Expand Down
9 changes: 9 additions & 0 deletions src/operator/numpy/np_elemwise_broadcast_op_scalar.cc
Original file line number Diff line number Diff line change
Expand Up @@ -61,5 +61,14 @@ MXNET_OPERATOR_REGISTER_NP_BINARY_SCALAR(_npi_rpower_scalar)
.set_attr<FCompute>("FCompute<cpu>", BinaryScalarOp::Compute<cpu, mshadow_op::rpower>)
.set_attr<nnvm::FGradient>("FGradient", ElemwiseGradUseOut{"_backward_rpower_scalar"});

MXNET_OPERATOR_REGISTER_NP_BINARY_SCALAR(_npi_floor_divide_scalar)
.set_attr<FCompute>("FCompute<cpu>", BinaryScalarOp::Compute<cpu, op::mshadow_op::floor_divide>)
.set_attr<nnvm::FGradient>("FGradient", MakeZeroGradNodes);

MXNET_OPERATOR_REGISTER_NP_BINARY_SCALAR(_npi_rfloor_divide_scalar)
.set_attr<FCompute>("FCompute<cpu>",
BinaryScalarOp::Compute<cpu, op::mshadow_op::rfloor_divide>)
.set_attr<nnvm::FGradient>("FGradient", MakeZeroGradNodes);

} // namespace op
} // namespace mxnet
6 changes: 6 additions & 0 deletions src/operator/numpy/np_elemwise_broadcast_op_scalar.cu
Original file line number Diff line number Diff line change
Expand Up @@ -51,5 +51,11 @@ NNVM_REGISTER_OP(_npi_power_scalar)
NNVM_REGISTER_OP(_npi_rpower_scalar)
.set_attr<FCompute>("FCompute<gpu>", BinaryScalarRTCCompute{"rpow"});

NNVM_REGISTER_OP(_npi_floor_divide_scalar)
.set_attr<FCompute>("FCompute<gpu>", BinaryScalarRTCCompute{"floor_divide"});

NNVM_REGISTER_OP(_npi_rfloor_divide_scalar)
.set_attr<FCompute>("FCompute<gpu>", BinaryScalarRTCCompute{"rfloor_divide"});

} // namespace op
} // namespace mxnet
39 changes: 39 additions & 0 deletions src/operator/numpy/np_floor_divide.cc
Original file line number Diff line number Diff line change
@@ -0,0 +1,39 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/

/*!
* \file np_floor_divide.cc
* \brief CPU Implementation of floor_divide operator.
*/

#include "./np_elemwise_broadcast_op.h"

namespace mxnet {
namespace op {

MXNET_OPERATOR_REGISTER_NP_BINARY_MIXED_PRECISION(_npi_floor_divide)
.set_attr<FCompute>("FCompute<cpu>",
NumpyBinaryBroadcastComputeWithBool<cpu,
op::mshadow_op::floor_divide,
op::mshadow_op::mixed_floor_divide,
op::mshadow_op::mixed_rfloor_divide>)
.set_attr<nnvm::FGradient>("FGradient", MakeZeroGradNodes);

} // namespace op
} // namespace mxnet
Loading

0 comments on commit fcbab28

Please sign in to comment.