Skip to content

Commit

Permalink
[NPU] add NPU add topk (#31596)
Browse files Browse the repository at this point in the history
* add topk op

* add cmake

* update topk npu op

* refactor func

* fix test not go npu TopKD bug

* NPUPlace(4) to NPUPlace(0)

* update comment

Co-authored-by: oyjxer <1728722986@qq.com>
  • Loading branch information
OleNet and oyjxer authored Mar 17, 2021
1 parent 743cc9b commit ef15544
Show file tree
Hide file tree
Showing 2 changed files with 184 additions and 0 deletions.
89 changes: 89 additions & 0 deletions paddle/fluid/operators/top_k_op_npu.cc
Original file line number Diff line number Diff line change
@@ -0,0 +1,89 @@
/* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

#include <memory>
#include <string>

#include "paddle/fluid/operators/top_k_op.h"
#include "paddle/fluid/operators/npu_op_runner.h"

namespace paddle {
namespace operators {

void gen_assist_seq(framework::Tensor* assit_tensor,
int64_t dim, const framework::ExecutionContext& ctx) {
const int64_t dimx2 = dim;
std::vector<paddle::platform::float16> assit;
assit.resize(2 * dimx2);
for (int64_t i = 0; i < dimx2; i++) {
// for i in range [0, dim]
assit[i] = static_cast<paddle::platform::float16>(i);

// for i in range [dim, dimx2]
int64_t idx = static_cast<int64_t>(
static_cast<paddle::platform::float16>(i));
int64_t gap = i - idx;
assit[i + dim] = static_cast<paddle::platform::float16>(gap);
}
framework::TensorFromVector(assit, ctx.device_context(), assit_tensor);
}


template <typename DeviceContext, typename T>
class TopkNPUKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
// read input
auto* input = ctx.Input<framework::LoDTensor>("X");
auto* output = ctx.Output<framework::LoDTensor>("Out");
auto* indices = ctx.Output<framework::LoDTensor>("Indices");

size_t k = static_cast<int>(ctx.Attr<int>("k"));

output->mutable_data<T>(ctx.GetPlace());
indices->mutable_data<int>(ctx.GetPlace());

// prepare assit
auto dim = input->dims().size();
framework::Tensor assist_seq_tensor;
assist_seq_tensor.Resize({2 * dim});
assist_seq_tensor.mutable_data<T>(ctx.GetPlace());
gen_assist_seq(&assist_seq_tensor, dim, ctx);

framework::NPUAttributeMap attr_input = {{"sorted", "true"},
{"k", static_cast<int>(k)},
{"dim", -1},
{"largest", true}};

// run ascend
auto runner = NpuOpRunner("TopKD",
{*input, assist_seq_tensor},
{*output, *indices},
attr_input);

auto stream =
ctx.template device_context<paddle::platform::NPUDeviceContext>()
.stream();

runner.Run(stream);
}
};

} // namespace operators
} // namespace paddle

namespace ops = paddle::operators;

// Ascend Op TopKD only support input float 16 dtype
REGISTER_OP_NPU_KERNEL(
top_k,
ops::TopkNPUKernel<paddle::platform::NPUDeviceContext,
paddle::platform::float16>);
95 changes: 95 additions & 0 deletions python/paddle/fluid/tests/unittests/npu/test_top_k_op_npu.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,95 @@
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

from __future__ import print_function

import numpy as np
import unittest
import sys
sys.path.append("..")
from op_test import OpTest
import paddle
import paddle.fluid as fluid
from paddle.fluid import core

paddle.enable_static()
SEED = 2021


@unittest.skipIf(not paddle.is_compiled_with_npu(),
"core is not compiled with NPU")
class TestTopk(OpTest):
def setUp(self):
self.set_npu()
self.place = paddle.NPUPlace(0)
self.op_type = "top_k"
self.init_dtype()

x = np.array([[0.78104149, 0.88745828, 0.32362268],
[0.82196718, 0.48763277, 0.42826136],
[0.96527182, 0.34851612, 0.12959783]]).astype(self.dtype)

self.inputs = {'X': x}
np_out = np.array([[0.88745828], [0.82196718], [0.96527182]]).astype(self.dtype)
np_indices = np.array([[1], [0], [0]])

self.attrs = {'k': 1, "axis": -1}
self.outputs = {'Out': np_out, 'Indices':np_indices}

def set_npu(self):
self.__class__.use_npu = True
self.__class__.no_need_check_grad = True

def init_dtype(self):
self.dtype = np.float16

def test_check_output(self):
self.check_output_with_place(self.place, check_dygraph=False)


@unittest.skipIf(not paddle.is_compiled_with_npu(),
"core is not compiled with NPU")
class TestTopkV2(OpTest):
def setUp(self):
self.set_npu()
self.place = paddle.NPUPlace(0)
self.op_type = "top_k"
self.init_dtype()

x = np.array([[0.78104149, 0.88745828, 0.32362268],
[0.82196718, 0.48763277, 0.42826136],
[0.96527182, 0.34851612, 0.12959783]]).astype(self.dtype)

self.inputs = {'X': x}
np_out = np.array([[0.88745828, 0.78104149], [0.82196718, 0.48763277], [0.96527182, 0.34851612]]).astype(self.dtype)
np_indices = np.array([[1, 0], [0, 1], [0, 1]])

self.attrs = {'k': 2, "axis": -1}
self.outputs = {'Out': np_out, 'Indices':np_indices}

def set_npu(self):
self.__class__.use_npu = True
self.__class__.no_need_check_grad = True

def init_dtype(self):
self.dtype = np.float16

def test_check_output(self):
self.check_output_with_place(self.place, check_dygraph=False)


if __name__ == '__main__':
unittest.main()


0 comments on commit ef15544

Please sign in to comment.