Skip to content
This repository has been archived by the owner on Nov 17, 2023. It is now read-only.

Commit

Permalink
switch to unique_tpr
Browse files Browse the repository at this point in the history
  • Loading branch information
haojin2 committed Nov 9, 2019
1 parent f254deb commit 2e76582
Show file tree
Hide file tree
Showing 4 changed files with 6 additions and 9 deletions.
9 changes: 3 additions & 6 deletions src/operator/operator_tune-inl.h
Original file line number Diff line number Diff line change
Expand Up @@ -116,10 +116,6 @@ class OperatorTune : public OperatorTuneByType<DType> {
TuneAll();
}

~OperatorTune() {
delete[] data_set_;
}

/*!
* \brief Initialize the OperatorTune object
* \return Whether the OperatorTune object was successfully initialized
Expand All @@ -128,7 +124,8 @@ class OperatorTune : public OperatorTuneByType<DType> {
if (!initialized_) {
initialized_ = true;
// Generate some random data for calling the operator kernels
data_set_ = reinterpret_cast<DType*>(new char[0x100 * sizeof(DType)]);
data_set_ =
std::unique_ptr<DType[]>(reinterpret_cast<DType*>(new char[0x100 * sizeof(DType)]));
std::random_device rd;
std::mt19937 gen(rd());
if (!std::is_integral<DType>::value) {
Expand Down Expand Up @@ -521,7 +518,7 @@ class OperatorTune : public OperatorTuneByType<DType> {
/*! \brief Number of passes to obtain an average */
static constexpr duration_t OUTSIDE_COUNT = (1 << OUTSIDE_COUNT_SHIFT);
/*! \brief Random data for timing operator calls */
static DType* data_set_;
static std::unique_ptr<DType[]> data_set_;
/*! \brief Operators tuned */
static std::unordered_set<std::string> operator_names_;
/*! \brief Arbitary object to modify in OMP loop */
Expand Down
2 changes: 1 addition & 1 deletion src/operator/operator_tune.cc
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,7 @@ double OperatorTuneBase::tuning_weight_scale_ = 0.0;
*/
#define IMPLEMENT_OPERATOR_TUNE_STATICS_FOR_TYPE(__typ$) \
template<> bool OperatorTune<__typ$>::initialized_ = false; \
template<> __typ$* OperatorTune<__typ$>::data_set_ = nullptr; \
template<> std::unique_ptr<__typ$[]> OperatorTune<__typ$>::data_set_ = nullptr; \
template<> volatile tune::TuningMode OperatorTuneByType<__typ$>::tuning_mode_ = tune::kAuto; \
template<> volatile int OperatorTune<__typ$>::volatile_int_ = 9; /* arbitrary number */ \
template<> std::unordered_set<std::string> OperatorTune<__typ$>::operator_names_({}); \
Expand Down
2 changes: 1 addition & 1 deletion src/operator/tensor/elemwise_binary_scalar_op.h
Original file line number Diff line number Diff line change
Expand Up @@ -236,7 +236,7 @@ class BinaryScalarOp : public UnaryOp {
using namespace mshadow::expr;
Stream<xpu> *s = ctx.get_stream<xpu>();
const double alpha = nnvm::get<double>(attrs.parsed);
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
MSHADOW_TYPE_SWITCH_WITH_BOOL(outputs[0].type_flag_, DType, {
MXNET_ASSIGN_REQ_SWITCH(req[0], Req, {
mxnet_op::Kernel<mxnet_op::op_with_req<OP, Req>, xpu>::Launch(
s, inputs[0].Size(), outputs[0].dptr<DType>(), inputs[0].dptr<DType>(), DType(alpha));
Expand Down
2 changes: 1 addition & 1 deletion tests/python/unittest/test_numpy_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -1669,7 +1669,7 @@ def hybrid_forward(self, F, a, b, *args, **kwargs):
mx_test_x1 = mx.numpy.array(np_test_x1, dtype=ltype)
mx_test_x2 = mx.numpy.array(np_test_x2, dtype=rtype)
rtol = 1e-2 if ltype is np.float16 or rtype is np.float16 else 1e-3
atol = 1e-4 if ltype is np.float16 or rtype is np.float16 else 1e-5
atol = 1e-3 if ltype is np.float16 or rtype is np.float16 else 1e-5
for hybridize in [True, False]:
if hybridize:
mx_func.hybridize()
Expand Down

0 comments on commit 2e76582

Please sign in to comment.