diff --git a/backends/aoti/slim/core/Storage.h b/backends/aoti/slim/core/Storage.h new file mode 100644 index 00000000000..f36d4a463c4 --- /dev/null +++ b/backends/aoti/slim/core/Storage.h @@ -0,0 +1,245 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#pragma once + +#include +#include + +#include +#include +#include +#include + +namespace executorch::backends::aoti::slim { + +/// Type alias for deleter function pointer. +using DeleterFn = void (*)(void*); + +namespace detail { +/// No-op deleter for non-owning storage. +inline void noop(void*) {} +} // namespace detail + +/// Default CPU device constant. +const c10::Device CPU_DEVICE = c10::Device(c10::DeviceType::CPU, 0); + +/// DeviceTraits template for device-specific operations. +/// Device-specific implementations provide allocate(), free(), and memcpy(). +template +struct DeviceTraits; + +/// CPU specialization of DeviceTraits. +/// Provides CPU memory allocation and copy operations using malloc/free/memcpy. +template <> +struct DeviceTraits { + /// Allocates CPU memory using malloc. + /// @param nbytes Number of bytes to allocate. + /// @param device The target device (unused for CPU). + /// @return Pointer to allocated memory. + static void* allocate(size_t nbytes, const c10::Device& device = CPU_DEVICE) { + (void)device; + // NOLINTNEXTLINE(cppcoreguidelines-no-malloc) + return malloc(nbytes); + } + + /// Frees CPU memory using free. + /// @param ptr Pointer to memory to free. + static void free(void* ptr) { + // NOLINTNEXTLINE(cppcoreguidelines-no-malloc) + std::free(ptr); + } + + /// Copies memory between CPU locations. + /// @param dst Destination pointer. + /// @param src Source pointer. + /// @param nbytes Number of bytes to copy. + /// @param dst_device Destination device (unused for CPU-to-CPU). + /// @param src_device Source device (unused for CPU-to-CPU). + static void memcpy( + void* dst, + const void* src, + size_t nbytes, + const c10::Device& dst_device, + const c10::Device& src_device) { + (void)dst_device; + (void)src_device; + std::memcpy(dst, src, nbytes); + } +}; + +/** + * MaybeOwningStorage - A storage class that manages tensor data memory. + * + * This class provides owning memory storage for tensor data on CPU. + * Owning storage allocates and manages its own memory, freeing it upon + * destruction. + * + * Current limitations: + * - CPU device only + * - Owning mode only + * The future diffs will add support for non-owning storage and other devices. + * + * Thread Safety: NOT THREAD-SAFE + * - Uses NonAtomicSharedPtr for reference counting + * - Must only be used in single-threaded contexts + */ +class MaybeOwningStorage { + public: + /// Constructs owning storage with allocated memory. + /// @param device The device for storage (must be CPU). + /// @param nbytes Number of bytes to allocate. + MaybeOwningStorage(const c10::Device& device, size_t nbytes) + : device_(device), capacity_(nbytes), is_owning_(true) { + ET_CHECK_MSG( + device.is_cpu(), + "Only CPU device is currently supported, got: %s", + device.str().c_str()); + + data_ = DeviceTraits::allocate(nbytes, device); + deleter_ = DeviceTraits::free; + } + + /// Default constructor is deleted - storage must have a device. + MaybeOwningStorage() = delete; + + /// Copy constructor is deleted - use SharedPtr for shared ownership. + MaybeOwningStorage(const MaybeOwningStorage&) = delete; + + /// Copy assignment is deleted - use SharedPtr for shared ownership. + MaybeOwningStorage& operator=(const MaybeOwningStorage&) = delete; + + /// Move constructor. + MaybeOwningStorage(MaybeOwningStorage&& other) noexcept + : device_(other.device_), + data_(other.data_), + capacity_(other.capacity_), + deleter_(other.deleter_), + is_owning_(other.is_owning_) { + other.data_ = nullptr; + other.capacity_ = 0; + other.deleter_ = detail::noop; + other.is_owning_ = false; + } + + /// Move assignment operator. + MaybeOwningStorage& operator=(MaybeOwningStorage&& other) noexcept { + if (this != &other) { + free_data(); + + device_ = other.device_; + data_ = other.data_; + capacity_ = other.capacity_; + deleter_ = other.deleter_; + is_owning_ = other.is_owning_; + + other.data_ = nullptr; + other.capacity_ = 0; + other.deleter_ = detail::noop; + other.is_owning_ = false; + } + return *this; + } + + /// Destructor - frees owned memory. + ~MaybeOwningStorage() { + free_data(); + } + + /// Copies data between storage locations. + /// @param dst_data_ptr Destination data pointer. + /// @param src_data_ptr Source data pointer. + /// @param nbytes Number of bytes to copy. + /// @param src_device Source device. + void copy_( + void* dst_data_ptr, + void* src_data_ptr, + size_t nbytes, + const c10::Device& src_device) { + ET_CHECK_MSG( + dst_data_ptr, "Storage copy failed: dst_data_ptr cannot be nullptr"); + ET_CHECK_MSG( + src_data_ptr, "Storage copy failed: src_data_ptr cannot be nullptr"); + + if (dst_data_ptr == src_data_ptr) { + return; + } + + ET_CHECK_MSG( + device_.is_cpu() && src_device.is_cpu(), + "Only CPU-to-CPU copy is currently supported"); + + DeviceTraits::memcpy( + dst_data_ptr, src_data_ptr, nbytes, device_, src_device); + } + + /// Creates a clone of this storage on the specified device. + /// @param device Target device for the clone (must be CPU). + /// @return A new MaybeOwningStorage with copied data. + MaybeOwningStorage clone(const c10::Device& device) const { + ET_CHECK_MSG(data_, "Storage clone failed: source data cannot be nullptr"); + ET_CHECK_MSG( + device.is_cpu(), "Only CPU device is currently supported for clone"); + + MaybeOwningStorage cloned_storage(device, capacity_); + + DeviceTraits::memcpy( + cloned_storage.data_, data_, capacity_, device, device_); + + return cloned_storage; + } + + /// Returns the data pointer, or nullptr for zero-sized storage. + void* data() const { + if (capacity_ == 0) { + return nullptr; + } + return data_; + } + + /// Returns the device this storage is on. + const c10::Device& device() const { + return device_; + } + + /// Returns the capacity in bytes. + size_t nbytes() const { + return capacity_; + } + + /// Returns true if this storage owns its memory. + bool is_owning() const { + return is_owning_; + } + + /// Returns true if the storage can be resized (must be owning). + bool is_resizable() const { + return is_owning_; + } + + private: + c10::Device device_ = CPU_DEVICE; + void* data_ = nullptr; + size_t capacity_ = 0; + DeleterFn deleter_ = detail::noop; + bool is_owning_ = false; + + /// Frees the data if non-null. + void free_data() { + if (data_ != nullptr) { + deleter_(data_); + data_ = nullptr; + } + } +}; + +/// Storage is a shared pointer to MaybeOwningStorage. +/// Multiple tensors can share the same underlying storage. +using Storage = SharedPtr; + +} // namespace executorch::backends::aoti::slim diff --git a/backends/aoti/slim/core/TARGETS b/backends/aoti/slim/core/TARGETS new file mode 100644 index 00000000000..77871de4469 --- /dev/null +++ b/backends/aoti/slim/core/TARGETS @@ -0,0 +1,3 @@ +load("targets.bzl", "define_common_targets") + +define_common_targets() diff --git a/backends/aoti/slim/core/targets.bzl b/backends/aoti/slim/core/targets.bzl new file mode 100644 index 00000000000..12de67bf8b1 --- /dev/null +++ b/backends/aoti/slim/core/targets.bzl @@ -0,0 +1,19 @@ +load("@fbsource//xplat/executorch/build:runtime_wrapper.bzl", "runtime") + +def define_common_targets(): + """Define targets for SlimTensor core module.""" + + # Header-only library for Storage + runtime.cxx_library( + name = "storage", + headers = [ + "Storage.h", + ], + visibility = ["@EXECUTORCH_CLIENTS"], + exported_deps = [ + "//executorch/backends/aoti/slim/c10/core:device", + "//executorch/backends/aoti/slim/c10/core:scalar_type", + "//executorch/backends/aoti/slim/util:shared_ptr", + "//executorch/runtime/platform:platform", + ], + ) diff --git a/backends/aoti/slim/core/test/TARGETS b/backends/aoti/slim/core/test/TARGETS new file mode 100644 index 00000000000..77871de4469 --- /dev/null +++ b/backends/aoti/slim/core/test/TARGETS @@ -0,0 +1,3 @@ +load("targets.bzl", "define_common_targets") + +define_common_targets() diff --git a/backends/aoti/slim/core/test/targets.bzl b/backends/aoti/slim/core/test/targets.bzl new file mode 100644 index 00000000000..1bc6029bd2d --- /dev/null +++ b/backends/aoti/slim/core/test/targets.bzl @@ -0,0 +1,14 @@ +load("@fbsource//xplat/executorch/build:runtime_wrapper.bzl", "runtime") + +def define_common_targets(): + """Define test targets for SlimTensor core module.""" + + runtime.cxx_test( + name = "test_storage_cpu", + srcs = [ + "test_storage_cpu.cpp", + ], + deps = [ + "//executorch/backends/aoti/slim/core:storage", + ], + ) diff --git a/backends/aoti/slim/core/test/test_storage_cpu.cpp b/backends/aoti/slim/core/test/test_storage_cpu.cpp new file mode 100644 index 00000000000..a22c3d92ebe --- /dev/null +++ b/backends/aoti/slim/core/test/test_storage_cpu.cpp @@ -0,0 +1,259 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#include + +#include + +namespace executorch::backends::aoti::slim { + +// ============================================================================= +// DeviceTraits Tests +// ============================================================================= + +TEST(DeviceTraitsCPUTest, AllocateAndFree) { + constexpr size_t kSize = 1024; + void* ptr = DeviceTraits::allocate(kSize); + ASSERT_NE(ptr, nullptr); + + DeviceTraits::free(ptr); +} + +TEST(DeviceTraitsCPUTest, AllocateZeroBytes) { + void* ptr = DeviceTraits::allocate(0); + DeviceTraits::free(ptr); +} + +TEST(DeviceTraitsCPUTest, MemcpyCPUToCPU) { + constexpr size_t kSize = 256; + float* src = static_cast( + DeviceTraits::allocate(kSize * sizeof(float))); + float* dst = static_cast( + DeviceTraits::allocate(kSize * sizeof(float))); + + for (size_t i = 0; i < kSize; ++i) { + src[i] = static_cast(i) * 1.5f; + } + + DeviceTraits::memcpy( + dst, src, kSize * sizeof(float), CPU_DEVICE, CPU_DEVICE); + + for (size_t i = 0; i < kSize; ++i) { + EXPECT_FLOAT_EQ(dst[i], static_cast(i) * 1.5f); + } + + DeviceTraits::free(src); + DeviceTraits::free(dst); +} + +// ============================================================================= +// MaybeOwningStorage Tests - Owning Mode +// ============================================================================= + +TEST(MaybeOwningStorageTest, ConstructOwning) { + constexpr size_t kNbytes = 512; + MaybeOwningStorage storage(CPU_DEVICE, kNbytes); + + EXPECT_NE(storage.data(), nullptr); + EXPECT_EQ(storage.nbytes(), kNbytes); + EXPECT_TRUE(storage.device().is_cpu()); + EXPECT_TRUE(storage.is_owning()); + EXPECT_TRUE(storage.is_resizable()); +} + +TEST(MaybeOwningStorageTest, ConstructOwningZeroBytes) { + MaybeOwningStorage storage(CPU_DEVICE, 0); + + EXPECT_EQ(storage.data(), nullptr); + EXPECT_EQ(storage.nbytes(), 0); + EXPECT_TRUE(storage.device().is_cpu()); + EXPECT_TRUE(storage.is_owning()); +} + +TEST(MaybeOwningStorageTest, DataPersistence) { + constexpr size_t kNumFloats = 64; + constexpr size_t kNbytes = kNumFloats * sizeof(float); + MaybeOwningStorage storage(CPU_DEVICE, kNbytes); + + float* data = static_cast(storage.data()); + for (size_t i = 0; i < kNumFloats; ++i) { + data[i] = static_cast(i) * 2.0f; + } + + float* read_data = static_cast(storage.data()); + for (size_t i = 0; i < kNumFloats; ++i) { + EXPECT_FLOAT_EQ(read_data[i], static_cast(i) * 2.0f); + } +} + +TEST(MaybeOwningStorageTest, MoveConstruct) { + constexpr size_t kNbytes = 256; + MaybeOwningStorage original(CPU_DEVICE, kNbytes); + void* original_data = original.data(); + + MaybeOwningStorage moved(std::move(original)); + + EXPECT_EQ(moved.data(), original_data); + EXPECT_EQ(moved.nbytes(), kNbytes); + EXPECT_TRUE(moved.is_owning()); + + EXPECT_EQ(original.data(), nullptr); + EXPECT_EQ(original.nbytes(), 0); + EXPECT_FALSE(original.is_owning()); +} + +TEST(MaybeOwningStorageTest, MoveAssign) { + constexpr size_t kNbytes1 = 256; + constexpr size_t kNbytes2 = 512; + MaybeOwningStorage storage1(CPU_DEVICE, kNbytes1); + MaybeOwningStorage storage2(CPU_DEVICE, kNbytes2); + void* storage2_data = storage2.data(); + + storage1 = std::move(storage2); + + EXPECT_EQ(storage1.data(), storage2_data); + EXPECT_EQ(storage1.nbytes(), kNbytes2); + EXPECT_TRUE(storage1.is_owning()); + + EXPECT_EQ(storage2.data(), nullptr); + EXPECT_EQ(storage2.nbytes(), 0); + EXPECT_FALSE(storage2.is_owning()); +} + +TEST(MaybeOwningStorageTest, Clone) { + constexpr size_t kNumFloats = 32; + constexpr size_t kNbytes = kNumFloats * sizeof(float); + MaybeOwningStorage original(CPU_DEVICE, kNbytes); + + float* data = static_cast(original.data()); + for (size_t i = 0; i < kNumFloats; ++i) { + data[i] = static_cast(i) * 3.0f; + } + + MaybeOwningStorage cloned = original.clone(CPU_DEVICE); + + EXPECT_NE(cloned.data(), original.data()); + EXPECT_EQ(cloned.nbytes(), original.nbytes()); + EXPECT_TRUE(cloned.is_owning()); + + float* cloned_data = static_cast(cloned.data()); + for (size_t i = 0; i < kNumFloats; ++i) { + EXPECT_FLOAT_EQ(cloned_data[i], static_cast(i) * 3.0f); + } + + data[0] = 999.0f; + EXPECT_FLOAT_EQ(cloned_data[0], 0.0f); +} + +TEST(MaybeOwningStorageTest, CopyFunction) { + constexpr size_t kNumFloats = 16; + constexpr size_t kNbytes = kNumFloats * sizeof(float); + MaybeOwningStorage src_storage(CPU_DEVICE, kNbytes); + MaybeOwningStorage dst_storage(CPU_DEVICE, kNbytes); + + float* src_data = static_cast(src_storage.data()); + for (size_t i = 0; i < kNumFloats; ++i) { + src_data[i] = static_cast(i) + 0.5f; + } + + dst_storage.copy_( + dst_storage.data(), src_storage.data(), kNbytes, CPU_DEVICE); + + float* dst_data = static_cast(dst_storage.data()); + for (size_t i = 0; i < kNumFloats; ++i) { + EXPECT_FLOAT_EQ(dst_data[i], static_cast(i) + 0.5f); + } +} + +// ============================================================================= +// Storage (SharedPtr) Tests +// ============================================================================= + +TEST(StorageSharedPtrTest, BasicUsage) { + constexpr size_t kNbytes = 128; + Storage storage(new MaybeOwningStorage(CPU_DEVICE, kNbytes)); + + EXPECT_NE(storage.get(), nullptr); + EXPECT_NE(storage->data(), nullptr); + EXPECT_EQ(storage->nbytes(), kNbytes); + EXPECT_TRUE(storage->device().is_cpu()); + EXPECT_EQ(storage.use_count(), 1); +} + +TEST(StorageSharedPtrTest, SharedOwnership) { + constexpr size_t kNbytes = 128; + Storage storage1(new MaybeOwningStorage(CPU_DEVICE, kNbytes)); + void* data_ptr = storage1->data(); + + Storage storage2 = storage1; + + EXPECT_EQ(storage1.use_count(), 2); + EXPECT_EQ(storage2.use_count(), 2); + EXPECT_EQ(storage1->data(), storage2->data()); + EXPECT_EQ(storage2->data(), data_ptr); +} + +TEST(StorageSharedPtrTest, SharedOwnershipModification) { + constexpr size_t kNumFloats = 8; + constexpr size_t kNbytes = kNumFloats * sizeof(float); + Storage storage1(new MaybeOwningStorage(CPU_DEVICE, kNbytes)); + + float* data = static_cast(storage1->data()); + for (size_t i = 0; i < kNumFloats; ++i) { + data[i] = 0.0f; + } + + Storage storage2 = storage1; + + float* data2 = static_cast(storage2->data()); + for (size_t i = 0; i < kNumFloats; ++i) { + data2[i] = static_cast(i) * 10.0f; + } + + float* data1 = static_cast(storage1->data()); + for (size_t i = 0; i < kNumFloats; ++i) { + EXPECT_FLOAT_EQ(data1[i], static_cast(i) * 10.0f); + } +} + +TEST(StorageSharedPtrTest, ReferenceCountDecrement) { + constexpr size_t kNbytes = 64; + Storage storage1(new MaybeOwningStorage(CPU_DEVICE, kNbytes)); + EXPECT_EQ(storage1.use_count(), 1); + + { + Storage storage2 = storage1; + EXPECT_EQ(storage1.use_count(), 2); + } + + EXPECT_EQ(storage1.use_count(), 1); +} + +TEST(StorageSharedPtrTest, MoveSemantics) { + constexpr size_t kNbytes = 64; + Storage storage1(new MaybeOwningStorage(CPU_DEVICE, kNbytes)); + void* data_ptr = storage1->data(); + + Storage storage2 = std::move(storage1); + + EXPECT_EQ(storage1.get(), nullptr); + EXPECT_EQ(storage2->data(), data_ptr); + EXPECT_EQ(storage2.use_count(), 1); +} + +TEST(StorageSharedPtrTest, MakeShared) { + constexpr size_t kNbytes = 256; + Storage storage = make_shared(CPU_DEVICE, kNbytes); + + EXPECT_NE(storage.get(), nullptr); + EXPECT_NE(storage->data(), nullptr); + EXPECT_EQ(storage->nbytes(), kNbytes); + EXPECT_EQ(storage.use_count(), 1); +} + +} // namespace executorch::backends::aoti::slim diff --git a/backends/aoti/slim/util/SharedPtr.h b/backends/aoti/slim/util/SharedPtr.h new file mode 100644 index 00000000000..e4e439ee4cb --- /dev/null +++ b/backends/aoti/slim/util/SharedPtr.h @@ -0,0 +1,192 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#pragma once + +#include +#include + +#include + +namespace executorch::backends::aoti::slim { + +/** + * SharedPtr - A lightweight shared pointer implementation optimized for + * single-threaded execution contexts. + * + * This class provides shared ownership semantics similar to std::shared_ptr but + * without atomic operations, making it faster in single-threaded contexts. + * ExecuTorch AOTI-drive backends operate in a single-threaded context, so + * this optimization is safe and provides better performance. + * + * Primary Use Cases: + * 1. Intermediate SlimTensor Storage Management: + * - Manages temporary tensors created during model execution + * - Avoids the overhead of atomic reference counting in std::shared_ptr + * + * 2. Input/Output Tensor References: + * - Provides reference counting for input/output tensors + * - Uses dummy deleters to prevent premature deallocation when needed + */ +template +class SharedPtr { + private: + struct ControlBlock { + int count = 1; + T* ptr; + using Deleter = void (*)(T*); + Deleter deleter; + + ControlBlock(T* p, Deleter d) : ptr(p), deleter(d) {} + ControlBlock(const ControlBlock&) = delete; + ControlBlock& operator=(const ControlBlock&) = delete; + ControlBlock(ControlBlock&&) = delete; + ControlBlock& operator=(ControlBlock&&) = delete; + + ~ControlBlock() { + if (ptr) { + deleter(ptr); + } + } + }; + + ControlBlock* cb_; + + static void default_deleter(T* p) { + delete p; + } + + void cleanup() { + if (cb_ && --cb_->count == 0) { + delete cb_; + } + cb_ = nullptr; + } + + public: + /// Default constructor - creates an empty shared pointer. + SharedPtr() noexcept : cb_(nullptr) {} + + /// Constructor from raw pointer. + explicit SharedPtr(T* p, typename ControlBlock::Deleter d = default_deleter) + : cb_(p ? new ControlBlock(p, d) : nullptr) {} + + /// Copy constructor. + SharedPtr(const SharedPtr& other) noexcept : cb_(other.cb_) { + if (cb_) { + ++cb_->count; + } + } + + /// Move constructor. + SharedPtr(SharedPtr&& other) noexcept : cb_(other.cb_) { + other.cb_ = nullptr; + } + + /// Destructor. + ~SharedPtr() { + cleanup(); + } + + /// Copy assignment. + SharedPtr& operator=(const SharedPtr& other) noexcept { + if (this != &other) { + cleanup(); + cb_ = other.cb_; + if (cb_) { + ++cb_->count; + } + } + return *this; + } + + /// Move assignment. + SharedPtr& operator=(SharedPtr&& other) noexcept { + if (this != &other) { + cleanup(); + cb_ = other.cb_; + other.cb_ = nullptr; + } + return *this; + } + + /// Resets the shared pointer to manage a new object. + void reset( + T* p = nullptr, + typename ControlBlock::Deleter d = default_deleter) { + *this = SharedPtr(p, d); + } + + /// Swaps the contents with another shared pointer. + void swap(SharedPtr& other) noexcept { + std::swap(cb_, other.cb_); + } + + /// Returns the managed pointer. + T* get() const noexcept { + return cb_ ? cb_->ptr : nullptr; + } + + /// Dereferences the managed pointer. + T& operator*() const { + ET_CHECK_MSG(cb_, "Dereferencing null SharedPtr"); + return *cb_->ptr; + } + + /// Accesses members of the managed object. + T* operator->() const { + ET_CHECK_MSG(cb_, "Accessing member of null SharedPtr"); + return cb_->ptr; + } + + /// Returns the reference count. + long use_count() const noexcept { + return cb_ ? cb_->count : 0; + } + + /// Returns true if the shared pointer is not null. + explicit operator bool() const noexcept { + return cb_ != nullptr; + } + + friend void swap(SharedPtr& a, SharedPtr& b) noexcept { + a.swap(b); + } + + friend bool operator==(const SharedPtr& lhs, const SharedPtr& rhs) noexcept { + return lhs.get() == rhs.get(); + } + + friend bool operator!=(const SharedPtr& lhs, const SharedPtr& rhs) noexcept { + return !(lhs == rhs); + } + + friend bool operator==(const SharedPtr& lhs, std::nullptr_t) noexcept { + return lhs.get() == nullptr; + } + + friend bool operator!=(const SharedPtr& lhs, std::nullptr_t) noexcept { + return lhs.get() != nullptr; + } + + friend bool operator==(std::nullptr_t, const SharedPtr& rhs) noexcept { + return rhs.get() == nullptr; + } + + friend bool operator!=(std::nullptr_t, const SharedPtr& rhs) noexcept { + return rhs.get() != nullptr; + } +}; + +/// Creates a SharedPtr managing a new object constructed with the given args. +template +SharedPtr make_shared(Args&&... args) { + return SharedPtr(new T(std::forward(args)...)); +} + +} // namespace executorch::backends::aoti::slim diff --git a/backends/aoti/slim/util/TARGETS b/backends/aoti/slim/util/TARGETS new file mode 100644 index 00000000000..77871de4469 --- /dev/null +++ b/backends/aoti/slim/util/TARGETS @@ -0,0 +1,3 @@ +load("targets.bzl", "define_common_targets") + +define_common_targets() diff --git a/backends/aoti/slim/util/targets.bzl b/backends/aoti/slim/util/targets.bzl new file mode 100644 index 00000000000..13f49168a0f --- /dev/null +++ b/backends/aoti/slim/util/targets.bzl @@ -0,0 +1,16 @@ +load("@fbsource//xplat/executorch/build:runtime_wrapper.bzl", "runtime") + +def define_common_targets(): + """Define targets for SlimTensor util module.""" + + # Header-only library for SharedPtr + runtime.cxx_library( + name = "shared_ptr", + headers = [ + "SharedPtr.h", + ], + visibility = ["@EXECUTORCH_CLIENTS"], + exported_deps = [ + "//executorch/runtime/platform:platform", + ], + )