Skip to content

Commit

Permalink
updates pten allocation, test=develop
Browse files Browse the repository at this point in the history
  • Loading branch information
Shixiaowei02 committed Jan 23, 2022
1 parent 563c79f commit f717413
Show file tree
Hide file tree
Showing 5 changed files with 59 additions and 218 deletions.
130 changes: 42 additions & 88 deletions paddle/pten/core/allocator.h
Original file line number Diff line number Diff line change
Expand Up @@ -15,139 +15,93 @@ limitations under the License. */
#pragma once

#include <cstdint>
#include <functional>
#include "paddle/fluid/platform/place.h"
#include "paddle/pten/core/candidate/allocator.h"

namespace pten {
namespace deprecated {

/// \brief Encapsulates strategies for access/addressing, allocation/
/// deallocation and construction/destruction of objects.
class RawAllocator {
public:
using Place = paddle::platform::Place;

/// \brief Default destructor.
virtual ~RawAllocator() = default;

/// \brief Allocates storage suitable for an array object of n bytes
/// and creates the array, but does not construct array elements.
/// May throw exceptions.
/// \param bytes_size The number of bytes to allocate.
/// \return The first address allocated.
virtual void* Allocate(size_t bytes_size) = 0;

/// \brief Deallocates storage pointed to ptr, which must be a value
/// returned by a previous call to allocate that has not been
/// invalidated by an intervening call to deallocate. The bytes_size
/// must match the value previously passed to allocate.
/// \param ptr The first address to deallocate.
/// \param bytes_size The number of bytes to deallocate.
virtual void Deallocate(void* ptr, size_t bytes_size) = 0;

/// \brief Get the place value of the allocator and the allocation.
/// \return The place value of the allocator and the allocation.
virtual const Place& place() const = 0;
};

/// \brief Fancy pointer with context. The use of this data type
/// \brief Fancy pointer with deleter. The use of this data type
/// is to be compatible with allocators from different frameworks
/// without significant performance loss. This class does not
/// support being inherited.
class Allocation final {
class Allocation {
public:
using Place = paddle::platform::Place;
using DeleterFnPtr = void (*)(Allocation*);

Allocation() = default;

// Don't own resources, only provide access.
Allocation(void* data, const Place& place) : data_(data), place_(place) {}
Allocation(void* data, size_t size, const Place& place)
: ptr_(data), size_(size), place_(place) {}

// Own resources.
Allocation(void* data, void* ctx, DeleterFnPtr deleter, const Place& place)
: data_(data), ctx_(ctx), deleter_(deleter), place_(place) {}
Allocation(void* data, size_t size, DeleterFnPtr deleter, const Place& place)
: ptr_(data), size_(size), deleter_(deleter), place_(place) {}

Allocation(Allocation&& other) { swap(*this, other); }
Allocation& operator=(Allocation&& other) {
Allocation(Allocation&& other) noexcept { swap(*this, other); }
Allocation& operator=(Allocation&& other) noexcept {
// Exchange them explicitly to avoid moving is equivalent
// to copying.
swap(*this, other);
return *this;
}
~Allocation() { Clear(); }

void* ptr() const noexcept { return data_; }
void* operator->() const noexcept { return data_; }
operator bool() const noexcept { return data_ || ctx_; }
const Place& place() const noexcept { return place_; }

void Clear() {
virtual ~Allocation() {
if (deleter_) {
deleter_(this);
}
ctx_ = nullptr;
deleter_ = nullptr;
data_ = nullptr;
}

// Returns the holding pointer.
// NOTE: For performance consideration, it is better not to make this method
// as a virtual method. If we want to implement a `defragmentation` later,
// we might need to make `ptr_` field as a protected field, and add a virtual
// method like `defragmentation` to change `ptr_`.
void* ptr() const noexcept { return ptr_; }

// Returns the size of this memory buffer, i.e., ptr() + size() - 1 is the
// last valid element.
//
// NOTE: Some allocator might alloc more memory than request. The size
// could larger than its request. For example,
// the AlignedAllocator will always allocate memory as size + kAlignment.
// The raw pointer might not aligned, so an offset might be added to raw
// the pointer. The size of this allocation will be
// `size + kAlignemnt - offset`.
size_t size() const noexcept { return size_; }

void* operator->() const noexcept { return ptr_; }
operator bool() const noexcept { return ptr_; }
const Place& place() const noexcept { return place_; }
DeleterFnPtr deleter() const noexcept { return deleter_; }

template <typename T>
T* CastContextWithoutCheck() const noexcept {
return static_cast<T*>(ctx_);
}

/// \brief Statically cast the void pointer of the context object to
/// the primitive type. Conversion of any pointer to void* and back
/// to pointer to the original cv type preserves its original value.
/// \param T The primitive type name of the context pointer.
/// \param expected_deleter The destructor passed in to enhance type
/// safety checking.
template <typename T>
T* CastContext(DeleterFnPtr expected_deleter) const {
PADDLE_ENFORCE_EQ(
deleter_ == expected_deleter,
true,
paddle::platform::errors::InvalidArgument(
"The deleter of the allocation does not match, so the pointer "
"cannot be safely removed."));
return CastContextWithoutCheck<T>();
}

private:
protected:
friend void swap(Allocation& a, Allocation& b) noexcept;
void* data_{nullptr};
void* ctx_{nullptr};
void* ptr_{nullptr};
size_t size_{};
DeleterFnPtr deleter_{nullptr};
// TODO(Shixiaowei02): Enum needs to be used instead to reduce
// the construction overhead by more than 50%.
Place place_;
};

inline void swap(Allocation& a, Allocation& b) noexcept {
::std::swap(a.data_, b.data_);
::std::swap(a.ctx_, b.ctx_);
::std::swap(a.ptr_, b.ptr_);
::std::swap(a.deleter_, b.deleter_);
::std::swap(a.place_, b.place_);
::std::swap(a.size_, b.size_);
}

/// \brief Context compatible allocator interface. This allocator is
/// mainly used for general data structures such as Tensor. The raw
/// allocator is more universal and efficient.
class Allocator {
using Place = paddle::platform::Place;

public:
using DeleterType = std::function<void(Allocation*)>;
using AllocationPtr = std::unique_ptr<Allocation, DeleterType>;

virtual ~Allocator() = default;
virtual Allocation Allocate(size_t bytes_size) = 0;
virtual const Place& place() = 0;
};
virtual AllocationPtr Allocate(size_t bytes_size) = 0;

inline Allocation Allocate(const std::shared_ptr<Allocator>& a, size_t n) {
CHECK(a);
return a->Allocate(n);
}
virtual bool IsAllocThreadSafe() const { return false; }
};

} // namespace deprecated
} // namespace pten
107 changes: 0 additions & 107 deletions paddle/pten/core/candidate/allocator.h

This file was deleted.

17 changes: 7 additions & 10 deletions paddle/pten/core/dense_tensor.cc
Original file line number Diff line number Diff line change
Expand Up @@ -38,9 +38,8 @@ DenseTensor::DenseTensor(Allocator* a, const DenseTensorMeta& meta)
DenseTensor::DenseTensor(Allocator* a, DenseTensorMeta&& meta)
: meta_(std::move(meta)), holder_(a->Allocate(SizeOf(dtype()) * numel())) {}

DenseTensor::DenseTensor(
const std::shared_ptr<paddle::memory::Allocation>& holder,
const DenseTensorMeta& meta)
DenseTensor::DenseTensor(const std::shared_ptr<pten::Allocation>& holder,
const DenseTensorMeta& meta)
: meta_(meta), holder_(holder) {}

DenseTensor::DenseTensor(const DenseTensor& other) : meta_(other.meta()) {
Expand Down Expand Up @@ -177,7 +176,7 @@ DenseTensor::DenseTensor() {
meta_.offset = 0;
}

DenseTensor::DenseTensor(const paddle::framework::proto::VarType::Type& dtype) {
DenseTensor::DenseTensor(paddle::framework::proto::VarType::Type dtype) {
inplace_version_counter_ = std::make_shared<TensorInplaceVersion>(0);
meta_.dtype = TransToPtenDataType(dtype);
meta_.offset = 0;
Expand Down Expand Up @@ -224,8 +223,7 @@ void DenseTensor::set_layout(const paddle::framework::DataLayout layout) {
meta_.layout = layout;
}

void DenseTensor::ResetHolder(
const std::shared_ptr<paddle::memory::Allocation>& holder) {
void DenseTensor::ResetHolder(const std::shared_ptr<pten::Allocation>& holder) {
PADDLE_ENFORCE_EQ(
meta_.offset,
0,
Expand All @@ -243,14 +241,13 @@ void DenseTensor::ResetHolder(
}

void DenseTensor::ResetHolderWithType(
const std::shared_ptr<paddle::memory::Allocation>& holder,
const paddle::framework::proto::VarType::Type& type) {
const std::shared_ptr<pten::Allocation>& holder,
paddle::framework::proto::VarType::Type type) {
set_type(type);
ResetHolder(holder);
}

void DenseTensor::set_type(
const paddle::framework::proto::VarType::Type& type) {
void DenseTensor::set_type(paddle::framework::proto::VarType::Type type) {
meta_.dtype = TransToPtenDataType(type);
}

Expand Down
21 changes: 9 additions & 12 deletions paddle/pten/core/dense_tensor.h
Original file line number Diff line number Diff line change
Expand Up @@ -70,7 +70,7 @@ class DenseTensor : public TensorBase,
/// \param meta The meta data of dense tensor.
DenseTensor(Allocator* a, DenseTensorMeta&& meta);

DenseTensor(const std::shared_ptr<paddle::memory::Allocation>& holder,
DenseTensor(const std::shared_ptr<pten::Allocation>& holder,
const DenseTensorMeta& meta);

/// \brief Because dense tensor is a kind of container, we give a default
Expand Down Expand Up @@ -175,7 +175,7 @@ class DenseTensor : public TensorBase,

protected:
DenseTensorMeta meta_;
std::shared_ptr<paddle::memory::Allocation> holder_;
std::shared_ptr<pten::Allocation> holder_;

/* --------------------------- */
/* From framework::Tensor */
Expand All @@ -194,7 +194,7 @@ class DenseTensor : public TensorBase,

/* @jim19930609: Remove dependency on protobuf after Tensor Unification.
*/
explicit DenseTensor(const paddle::framework::proto::VarType::Type& dtype);
explicit DenseTensor(paddle::framework::proto::VarType::Type dtype);

/// \brief Use existing storage space to create dense tensor. This interface
/// can be used to deliberately create an uninitialized dense tensor.
Expand Down Expand Up @@ -265,24 +265,21 @@ class DenseTensor : public TensorBase,
return holder_ && holder_ == src.Holder();
}

const std::shared_ptr<paddle::memory::Allocation>& Holder() const {
return holder_;
}
const std::shared_ptr<pten::Allocation>& Holder() const { return holder_; }

void set_offset(size_t offset) { meta_.offset = offset; }
size_t offset() const { return meta_.offset; }

std::shared_ptr<paddle::memory::Allocation> MoveMemoryHolder() {
std::shared_ptr<pten::Allocation> MoveMemoryHolder() {
return std::move(holder_);
}

void ResetHolder(const std::shared_ptr<paddle::memory::Allocation>& holder);
void ResetHolder(const std::shared_ptr<pten::Allocation>& holder);

void ResetHolderWithType(
const std::shared_ptr<paddle::memory::Allocation>& holder,
const paddle::framework::proto::VarType::Type& type);
void ResetHolderWithType(const std::shared_ptr<pten::Allocation>& holder,
paddle::framework::proto::VarType::Type type);

void set_type(const paddle::framework::proto::VarType::Type& type);
void set_type(paddle::framework::proto::VarType::Type type);

TensorInplaceVersion& InplaceVersionCounter() {
return *inplace_version_counter_;
Expand Down
Loading

0 comments on commit f717413

Please sign in to comment.