Skip to content

Commit

Permalink
【Paddle Toolkit Development Competition No.2】[Custom OP] Add contiguo…
Browse files Browse the repository at this point in the history
…us api support to paddle::Tensor -part (#68748)

* [Custom OP] Add contiguous api support to paddle::Tensor

* fix

* fix

* CI retrigger

* add null ptr check

* fix null ptr

* fix coverage
  • Loading branch information
BeingGod authored Nov 7, 2024
1 parent 0d542f9 commit f495a6c
Show file tree
Hide file tree
Showing 5 changed files with 203 additions and 0 deletions.
18 changes: 18 additions & 0 deletions paddle/phi/api/include/tensor.h
Original file line number Diff line number Diff line change
Expand Up @@ -637,6 +637,24 @@ class PADDLE_API Tensor final {
*/
Tensor to_dense() const;

/* Part 12: Contiguous methods */

/**
* @brief Determine whether tensor is contiguous
*
* @return bool
*/
bool is_contiguous() const;

/**
* @brief Returns a contiguous in memory tensor containing the same data as
* current Tensor. If self tensor is already contiguous, this function returns
* the current Tensor.
*
* @return Tensor
*/
Tensor contiguous();

private:
/**
* [ Why use abstract TensorImpl interface here? ]
Expand Down
48 changes: 48 additions & 0 deletions paddle/phi/api/lib/tensor.cc
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,7 @@ limitations under the License. */

#include "paddle/common/ddim.h"
#include "paddle/phi/api/include/context_pool.h"
#include "paddle/phi/api/lib/data_transform.h"
#include "paddle/phi/api/lib/utils/allocator.h"
#include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/backends/gpu/gpu_info.h"
Expand Down Expand Up @@ -508,4 +509,51 @@ void Tensor::reset_inplace_version(bool set_to_zero) {
}
}

/* Part 7: Contiguous methods */

bool Tensor::is_contiguous() const {
if (is_dense_tensor() || is_dist_tensor()) {
phi::DenseTensor *dense_tensor = nullptr;
if (is_dist_tensor()) {
dense_tensor = static_cast<phi::distributed::DistTensor *>(impl_.get())
->unsafe_mutable_value();
} else {
dense_tensor = static_cast<phi::DenseTensor *>(impl_.get());
}
return dense_tensor->meta().is_contiguous();
} else {
PADDLE_THROW(
common::errors::Unimplemented("Only support is_contiguous operation on "
"DenseTensor or DistTensor now."));
}
}

Tensor Tensor::contiguous() {
if (is_dense_tensor() || is_dist_tensor()) {
phi::DenseTensor *dense_tensor = nullptr;
if (is_dist_tensor()) {
dense_tensor = static_cast<phi::distributed::DistTensor *>(impl_.get())
->unsafe_mutable_value();
} else {
dense_tensor = static_cast<phi::DenseTensor *>(impl_.get());
}
PADDLE_ENFORCE_NOT_NULL(dense_tensor,
common::errors::InvalidArgument(
"TensorImpl with nullptr is not supported"));
if (!dense_tensor->meta().is_contiguous()) {
auto new_dense_tensor = std::make_shared<phi::DenseTensor>();
*new_dense_tensor = paddle::experimental::Trans2Contiguous(*dense_tensor);

return Tensor(std::shared_ptr<phi::TensorBase>(new_dense_tensor),
autograd_meta_,
name_);
} else {
return *this;
}
} else {
PADDLE_THROW(common::errors::Unimplemented(
"Only support contiguous operation on DenseTensor or DistTensor now."));
}
}

} // namespace paddle
2 changes: 2 additions & 0 deletions test/custom_op/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -71,6 +71,8 @@ if(WITH_TESTING)
py_test(test_custom_tanh_double_grad SRCS test_custom_tanh_double_grad.py)
py_test(test_custom_inplace SRCS test_custom_inplace.py)
py_test(test_custom_optional SRCS test_custom_optional.py)
py_test(test_custom_contiguous SRCS test_custom_contiguous.py)
set_tests_properties(test_custom_contiguous PROPERTIES TIMEOUT 180)

# other tests
py_test(test_sysconfig SRCS test_sysconfig.py)
Expand Down
62 changes: 62 additions & 0 deletions test/custom_op/custom_contiguous.cc
Original file line number Diff line number Diff line change
@@ -0,0 +1,62 @@
// Copyright (c) 2024 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#include <iostream>
#include <vector>

#include "paddle/extension.h"

static paddle::Tensor Transpose(const paddle::Tensor& t,
int64_t dim0,
int64_t dim1) {
int len = t.shape().size();
dim0 = dim0 >= 0 ? dim0 : len + dim0;
dim1 = dim1 >= 0 ? dim1 : len + dim1;
PD_CHECK(dim0 >= 0 && dim0 < len,
"dim0 not in range"
"dim0:%d ,range:%d",
dim0,
len);
PD_CHECK(dim1 >= 0 && dim1 < len,
"dim1 not in range"
"dim1:%d ,range:%d",
dim1,
len);
std::vector<int> transpose_perm(len);
std::iota(transpose_perm.begin(), transpose_perm.end(), 0);
transpose_perm[dim0] = dim1;
transpose_perm[dim1] = dim0;
// maybe there is another way to avoid experiment api
return paddle::experimental::transpose(t, transpose_perm);
}

std::vector<paddle::Tensor> ContiguousForward(paddle::Tensor& x) { // NOLINT
PD_CHECK(x.shape().size() == 2, "x must be a 2-d tensor.");

x = x.contiguous();
PD_CHECK(x.is_contiguous(), "Check failed !");

auto non_contiguous_x = Transpose(x, 0, 1);
PD_CHECK(!non_contiguous_x.is_contiguous(), "Check failed !");

auto contiguous_x = non_contiguous_x.contiguous();
PD_CHECK(contiguous_x.is_contiguous(), "Check failed !");

return {contiguous_x};
}

PD_BUILD_OP(custom_contiguous)
.Inputs({"X"})
.Outputs({"Out"})
.SetKernelFn(PD_KERNEL(ContiguousForward));
73 changes: 73 additions & 0 deletions test/custom_op/test_custom_contiguous.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,73 @@
# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import os
import unittest

import numpy as np
from utils import (
extra_cc_args,
extra_nvcc_args,
paddle_includes,
paddle_libraries,
)

import paddle
from paddle.utils.cpp_extension import get_build_directory, load
from paddle.utils.cpp_extension.extension_utils import run_cmd

# Because Windows don't use docker, the shared lib already exists in the
# cache dir, it will not be compiled again unless the shared lib is removed.
file = f'{get_build_directory()}\\custom_contiguous\\custom_contiguous.pyd'
if os.name == 'nt' and os.path.isfile(file):
cmd = f'del {file}'
run_cmd(cmd, True)

custom_module = load(
name='custom_contiguous',
sources=['custom_contiguous.cc'],
extra_include_paths=paddle_includes, # add for Coverage CI
extra_library_paths=paddle_libraries,
extra_cxx_cflags=extra_cc_args, # test for cc flags
extra_cuda_cflags=extra_nvcc_args, # test for nvcc flags
verbose=True,
)


def custom_contiguous_dynamic(device, np_x):
paddle.set_device(device)

x = paddle.to_tensor(np_x, dtype="float32")
x.stop_gradient = True

x = x.transpose((1, 0))

out = custom_module.custom_contiguous(x)

assert out.is_contiguous()


class TestCustomCastOp(unittest.TestCase):
def setUp(self):
self.dtypes = ['float32', 'float64']
paddle.set_flags({"FLAGS_use_stride_kernel": 1})

def test_dynamic(self):
for dtype in self.dtypes:
x = np.random.uniform(-1, 1, [4, 8]).astype("float32")
custom_contiguous_dynamic('cpu', x)


if __name__ == '__main__':
unittest.main()

0 comments on commit f495a6c

Please sign in to comment.