Skip to content

Commit

Permalink
[Custom OP] Add contiguous api support to paddle::Tensor
Browse files Browse the repository at this point in the history
  • Loading branch information
BeingGod committed Oct 18, 2024
1 parent 4a071e2 commit 123f625
Show file tree
Hide file tree
Showing 5 changed files with 164 additions and 0 deletions.
18 changes: 18 additions & 0 deletions paddle/phi/api/include/tensor.h
Original file line number Diff line number Diff line change
Expand Up @@ -630,6 +630,24 @@ class PADDLE_API Tensor final {
*/
Tensor to_dense() const;

/* Part 12: Contiguous methods */

/**
* @brief Determine whether tensor is contiguous
*
* @return bool
*/
bool is_contiguous() const;

/**
* @brief Returns a contiguous in memory tensor containing the same data as
* current Tensor. If self tensor is already contiguous, this function returns
* the current Tensor.
*
* @return Tensor
*/
Tensor& contiguous();

private:
/**
* [ Why use abstract TensorImpl interface here? ]
Expand Down
40 changes: 40 additions & 0 deletions paddle/phi/api/lib/tensor.cc
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,7 @@ limitations under the License. */

#include "paddle/common/ddim.h"
#include "paddle/phi/api/include/context_pool.h"
#include "paddle/phi/api/lib/data_transform.h"
#include "paddle/phi/api/lib/utils/allocator.h"
#include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/backends/gpu/gpu_info.h"
Expand Down Expand Up @@ -504,4 +505,43 @@ void Tensor::reset_inplace_version(bool set_to_zero) {
}
}

/* Part 7: Contiguous methods */

bool Tensor::is_contiguous() const {
if (is_dense_tensor() || is_dist_tensor()) {
phi::DenseTensor *dense_tensor = nullptr;
if (is_dist_tensor()) {
dense_tensor = static_cast<phi::distributed::DistTensor *>(impl_.get())
->unsafe_mutable_value();
} else {
dense_tensor = static_cast<phi::DenseTensor *>(impl_.get());
}
return dense_tensor->meta().is_contiguous();
} else {
PADDLE_THROW(
common::errors::Unimplemented("Only support is_contiguous operation on "
"DenseTensor or DistTensor now."));
}
}

Tensor &Tensor::contiguous() {
if (is_dense_tensor() || is_dist_tensor()) {
phi::DenseTensor *dense_tensor = nullptr;
if (is_dist_tensor()) {
dense_tensor = static_cast<phi::distributed::DistTensor *>(impl_.get())
->unsafe_mutable_value();
} else {
dense_tensor = static_cast<phi::DenseTensor *>(impl_.get());
}

if (!dense_tensor->meta().is_contiguous()) {
*dense_tensor = paddle::experimental::Trans2Contiguous(*dense_tensor);
}
return *this;
} else {
PADDLE_THROW(common::errors::Unimplemented(
"Only support contiguous operation on DenseTensor or DistTensor now."));
}
}

} // namespace paddle
2 changes: 2 additions & 0 deletions test/custom_op/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -69,6 +69,8 @@ if(WITH_TESTING)
py_test(test_custom_tanh_double_grad SRCS test_custom_tanh_double_grad.py)
py_test(test_custom_inplace SRCS test_custom_inplace.py)
py_test(test_custom_optional SRCS test_custom_optional.py)
py_test(test_custom_contiguous SRCS test_custom_contiguous.py)
set_tests_properties(test_custom_contiguous PROPERTIES TIMEOUT 180)

# other tests
py_test(test_sysconfig SRCS test_sysconfig.py)
Expand Down
31 changes: 31 additions & 0 deletions test/custom_op/custom_contiguous.cc
Original file line number Diff line number Diff line change
@@ -0,0 +1,31 @@
// Copyright (c) 2024 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#include <iostream>
#include <vector>

#include "paddle/extension.h"

std::vector<paddle::Tensor> ContiguousForward(paddle::Tensor& x) { // NOLINT
if (!x.is_contiguous()) {
x = x.contiguous();
}

return {x};
}

PD_BUILD_OP(custom_contiguous)
.Inputs({"X"})
.Outputs({"Out"})
.SetKernelFn(PD_KERNEL(ContiguousForward));
73 changes: 73 additions & 0 deletions test/custom_op/test_custom_contiguous.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,73 @@
# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import os
import unittest

import numpy as np
from utils import (
extra_cc_args,
extra_nvcc_args,
paddle_includes,
paddle_libraries,
)

import paddle
from paddle.utils.cpp_extension import get_build_directory, load
from paddle.utils.cpp_extension.extension_utils import run_cmd

# Because Windows don't use docker, the shared lib already exists in the
# cache dir, it will not be compiled again unless the shared lib is removed.
file = f'{get_build_directory()}\\custom_contiguous\\custom_contiguous.pyd'
if os.name == 'nt' and os.path.isfile(file):
cmd = f'del {file}'
run_cmd(cmd, True)

custom_module = load(
name='custom_contiguous',
sources=['custom_contiguous.cc'],
extra_include_paths=paddle_includes, # add for Coverage CI
extra_library_paths=paddle_libraries,
extra_cxx_cflags=extra_cc_args, # test for cc flags
extra_cuda_cflags=extra_nvcc_args, # test for nvcc flags
verbose=True,
)


def custom_contiguous_dynamic(device, np_x):
paddle.set_device(device)

x = paddle.to_tensor(np_x, dtype="float32")
x.stop_gradient = True

x = x.transpose((1, 0))

out = custom_module.custom_contiguous(x)

assert not x.is_contiguous()
assert out.is_contiguous()


class TestCustomCastOp(unittest.TestCase):
def setUp(self):
self.dtypes = ['float32', 'float64']

def test_dynamic(self):
for dtype in self.dtypes:
x = np.random.uniform(-1, 1, [4, 8]).astype("float32")
custom_contiguous_dynamic('cpu', x)


if __name__ == '__main__':
unittest.main()

0 comments on commit 123f625

Please sign in to comment.