Skip to content

Commit e6e5e8f

Browse files
nlutsenkometa-codesync[bot]
authored andcommitted
clang-format | Format fbsource with clang-format 21.
Reviewed By: ChristianK275 Differential Revision: D85317706 fbshipit-source-id: b399c5c4b75252999442b7d7d2778e7a241b0025
1 parent d50069c commit e6e5e8f

File tree

13 files changed

+133
-101
lines changed

13 files changed

+133
-101
lines changed

contrib/dynamic_embedding/src/tde/bind.cpp

Lines changed: 8 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -38,13 +38,14 @@ TORCH_LIBRARY(tde, m) {
3838
m.class_<FetchHandle>("FetchHandle").def("wait", &FetchHandle::Wait);
3939

4040
m.class_<PS>("PS")
41-
.def(torch::init<
42-
std::string,
43-
c10::intrusive_ptr<LocalShardList>,
44-
int64_t,
45-
int64_t,
46-
std::string,
47-
int64_t>())
41+
.def(
42+
torch::init<
43+
std::string,
44+
c10::intrusive_ptr<LocalShardList>,
45+
int64_t,
46+
int64_t,
47+
std::string,
48+
int64_t>())
4849
.def("fetch", &PS::Fetch)
4950
.def("evict", &PS::Evict);
5051
}

contrib/dynamic_embedding/src/tde/details/cacheline_id_transformer_impl.h

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -40,9 +40,10 @@ inline CachelineIDTransformer<
4040
: num_groups_(
4141
((capacity == 0 ? 2 * num_embedding : capacity) + group_size_ - 1) /
4242
group_size_) /*capacity by default is 2 * num_embedding */,
43-
cache_values_(reinterpret_cast<CacheValue*>(alignMalloc(
44-
CachelineSize,
45-
sizeof(CacheValue) * num_groups_ * group_size_))),
43+
cache_values_(
44+
reinterpret_cast<CacheValue*>(alignMalloc(
45+
CachelineSize,
46+
sizeof(CacheValue) * num_groups_ * group_size_))),
4647
bitmap_(num_embedding) {
4748
memset(
4849
cache_values_.get(), 0, sizeof(CacheValue) * num_groups_ * group_size_);

contrib/dynamic_embedding/src/tde/details/io_redis_test.cpp

Lines changed: 12 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -43,12 +43,18 @@ TEST(TDE, IO_redis) {
4343
torch::kF32,
4444
[&](std::vector<torch::Tensor> val) {
4545
ASSERT_EQ(val.size(), 3);
46-
ASSERT_TRUE(val[0].allclose(
47-
torch::tensor({1, 2}, torch::TensorOptions().dtype(torch::kF32))));
48-
ASSERT_TRUE(val[1].allclose(
49-
torch::tensor({3, 4}, torch::TensorOptions().dtype(torch::kF32))));
50-
ASSERT_TRUE(val[2].allclose(
51-
torch::tensor({5, 9}, torch::TensorOptions().dtype(torch::kF32))));
46+
ASSERT_TRUE(
47+
val[0].allclose(
48+
torch::tensor(
49+
{1, 2}, torch::TensorOptions().dtype(torch::kF32))));
50+
ASSERT_TRUE(
51+
val[1].allclose(
52+
torch::tensor(
53+
{3, 4}, torch::TensorOptions().dtype(torch::kF32))));
54+
ASSERT_TRUE(
55+
val[2].allclose(
56+
torch::tensor(
57+
{5, 9}, torch::TensorOptions().dtype(torch::kF32))));
5258

5359
notification.Done();
5460
});

contrib/dynamic_embedding/src/tde/details/redis_io_v1.cpp

Lines changed: 20 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -92,9 +92,10 @@ struct DB {
9292

9393
struct Prefix {
9494
constexpr static auto rule = LEXY_LIT("prefix=") >>
95-
dsl::capture(dsl::token(dsl::identifier(
96-
dsl::ascii::alpha_underscore,
97-
dsl::ascii::alpha_digit_underscore)));
95+
dsl::capture(dsl::token(
96+
dsl::identifier(
97+
dsl::ascii::alpha_underscore,
98+
dsl::ascii::alpha_digit_underscore)));
9899
constexpr static auto value =
99100
lexy::callback<PrefixOpt>([](auto&& str) -> PrefixOpt {
100101
return PrefixOpt{std::string(str.data(), str.size())};
@@ -263,8 +264,9 @@ void RedisV1::StartThread() {
263264
void RedisV1::HeartBeat(redis::ContextPtr& connection) {
264265
for (uint32_t retry = 0; retry < opt_.retry_limit_; ++retry) {
265266
try {
266-
auto reply = redis::ReplyPtr(reinterpret_cast<redisReply*>(
267-
redisCommand(connection.get(), "PING")));
267+
auto reply = redis::ReplyPtr(
268+
reinterpret_cast<redisReply*>(
269+
redisCommand(connection.get(), "PING")));
268270
TORCH_CHECK(
269271
reply && reply->type == REDIS_REPLY_STRING,
270272
"Ping should return string");
@@ -283,7 +285,7 @@ redis::ContextPtr RedisV1::Connect() const {
283285
connection =
284286
redis::ContextPtr(redisConnect(opt_.host_.c_str(), opt_.port_));
285287
} else {
286-
struct timeval interval {};
288+
struct timeval interval{};
287289
interval.tv_sec = opt_.timeout_ms_ / 1000;
288290
interval.tv_usec = opt_.timeout_ms_ % 1000 * 1000;
289291
connection = redis::ContextPtr(
@@ -299,21 +301,24 @@ redis::ContextPtr RedisV1::Connect() const {
299301
if (!opt_.password_.empty()) {
300302
redis::ReplyPtr reply;
301303
if (opt_.username_.empty()) {
302-
reply = redis::ReplyPtr(reinterpret_cast<redisReply*>(
303-
redisCommand(connection.get(), "AUTH %s", opt_.password_.c_str())));
304+
reply = redis::ReplyPtr(
305+
reinterpret_cast<redisReply*>(redisCommand(
306+
connection.get(), "AUTH %s", opt_.password_.c_str())));
304307
} else {
305-
reply = redis::ReplyPtr(reinterpret_cast<redisReply*>(redisCommand(
306-
connection.get(),
307-
"AUTH %s %s",
308-
opt_.username_.c_str(),
309-
opt_.password_.c_str())));
308+
reply = redis::ReplyPtr(
309+
reinterpret_cast<redisReply*>(redisCommand(
310+
connection.get(),
311+
"AUTH %s %s",
312+
opt_.username_.c_str(),
313+
opt_.password_.c_str())));
310314
}
311315
CheckStatus("auth error", connection, reply);
312316
}
313317

314318
if (opt_.db_ != 0) {
315-
auto reply = redis::ReplyPtr(reinterpret_cast<redisReply*>(
316-
redisCommand(connection.get(), "SELECT %d", opt_.db_)));
319+
auto reply = redis::ReplyPtr(
320+
reinterpret_cast<redisReply*>(
321+
redisCommand(connection.get(), "SELECT %d", opt_.db_)));
317322
CheckStatus("select db error", connection, reply);
318323
}
319324

contrib/dynamic_embedding/src/tde/ps.h

Lines changed: 5 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -50,10 +50,11 @@ class LocalShardList : public torch::CustomClassHolder {
5050
int64_t col_size,
5151
c10::intrusive_ptr<TensorList> tensors) {
5252
// col_start/col_size not used for now.
53-
shards_.emplace_back(LocalShard{
54-
.row_start_ = row_start,
55-
.row_size_ = row_size,
56-
.tensors_ = std::move(tensors)});
53+
shards_.emplace_back(
54+
LocalShard{
55+
.row_start_ = row_start,
56+
.row_size_ = row_size,
57+
.tensors_ = std::move(tensors)});
5758
}
5859

5960
Container::const_iterator begin() const {

torchrec/csrc/dynamic_embedding/bind.cpp

Lines changed: 8 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -35,13 +35,14 @@ TORCH_LIBRARY(tde, m) {
3535
m.class_<FetchHandle>("FetchHandle").def("wait", &FetchHandle::wait);
3636

3737
m.class_<PS>("PS")
38-
.def(torch::init<
39-
std::string,
40-
c10::intrusive_ptr<LocalShardList>,
41-
int64_t,
42-
int64_t,
43-
std::string,
44-
int64_t>())
38+
.def(
39+
torch::init<
40+
std::string,
41+
c10::intrusive_ptr<LocalShardList>,
42+
int64_t,
43+
int64_t,
44+
std::string,
45+
int64_t>())
4546
.def("fetch", &PS::fetch)
4647
.def("evict", &PS::evict);
4748
}

torchrec/csrc/dynamic_embedding/details/redis/redis_io.cpp

Lines changed: 16 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -142,8 +142,9 @@ void Redis::start_thread() {
142142
void Redis::heartbeat(helper::ContextPtr& connection) {
143143
for (uint32_t retry = 0; retry < opt_.retry_limit; ++retry) {
144144
try {
145-
auto reply = helper::ReplyPtr(reinterpret_cast<redisReply*>(
146-
redisCommand(connection.get(), "PING")));
145+
auto reply = helper::ReplyPtr(
146+
reinterpret_cast<redisReply*>(
147+
redisCommand(connection.get(), "PING")));
147148
TORCH_CHECK(
148149
reply && reply->type == REDIS_REPLY_STRING,
149150
"Ping should return string");
@@ -161,7 +162,7 @@ helper::ContextPtr Redis::connect() const {
161162
if (opt_.timeout_ms == 0) {
162163
connection = helper::ContextPtr(redisConnect(opt_.host.c_str(), opt_.port));
163164
} else {
164-
struct timeval interval {};
165+
struct timeval interval{};
165166
interval.tv_sec = opt_.timeout_ms / 1000;
166167
interval.tv_usec = opt_.timeout_ms % 1000 * 1000;
167168
connection = helper::ContextPtr(
@@ -177,21 +178,24 @@ helper::ContextPtr Redis::connect() const {
177178
if (!opt_.password.empty()) {
178179
helper::ReplyPtr reply;
179180
if (opt_.username.empty()) {
180-
reply = helper::ReplyPtr(reinterpret_cast<redisReply*>(
181-
redisCommand(connection.get(), "AUTH %s", opt_.password.c_str())));
181+
reply = helper::ReplyPtr(
182+
reinterpret_cast<redisReply*>(redisCommand(
183+
connection.get(), "AUTH %s", opt_.password.c_str())));
182184
} else {
183-
reply = helper::ReplyPtr(reinterpret_cast<redisReply*>(redisCommand(
184-
connection.get(),
185-
"AUTH %s %s",
186-
opt_.username.c_str(),
187-
opt_.password.c_str())));
185+
reply = helper::ReplyPtr(
186+
reinterpret_cast<redisReply*>(redisCommand(
187+
connection.get(),
188+
"AUTH %s %s",
189+
opt_.username.c_str(),
190+
opt_.password.c_str())));
188191
}
189192
check_status("auth error", connection, reply);
190193
}
191194

192195
if (opt_.db != 0) {
193-
auto reply = helper::ReplyPtr(reinterpret_cast<redisReply*>(
194-
redisCommand(connection.get(), "SELECT %d", opt_.db)));
196+
auto reply = helper::ReplyPtr(
197+
reinterpret_cast<redisReply*>(
198+
redisCommand(connection.get(), "SELECT %d", opt_.db)));
195199
check_status("select db error", connection, reply);
196200
}
197201

torchrec/csrc/dynamic_embedding/ps.h

Lines changed: 5 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -65,10 +65,11 @@ class LocalShardList : public torch::CustomClassHolder {
6565
int64_t col_size,
6666
std::vector<torch::Tensor> tensors) {
6767
// col_start/col_size not supported now.
68-
shards_.emplace_back(LocalShard{
69-
.row_start = row_start,
70-
.row_size = row_size,
71-
.tensors = std::move(tensors)});
68+
shards_.emplace_back(
69+
LocalShard{
70+
.row_start = row_start,
71+
.row_size = row_size,
72+
.tensors = std::move(tensors)});
7273
}
7374

7475
Container::const_iterator begin() const {

torchrec/inference/inference_legacy/src/BatchingQueue.cpp

Lines changed: 25 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -97,13 +97,14 @@ void BatchingQueue::add(
9797
promise = std::move(promise),
9898
addedTime = addedTime](auto& queue) mutable {
9999
const auto batchSize = request->batch_size;
100-
queue.push(QueryQueueEntry{
101-
std::move(request),
102-
RequestContext{
103-
batchSize,
104-
std::move(promise),
105-
folly::RequestContext::saveContext()},
106-
addedTime});
100+
queue.push(
101+
QueryQueueEntry{
102+
std::move(request),
103+
RequestContext{
104+
batchSize,
105+
std::move(promise),
106+
folly::RequestContext::saveContext()},
107+
addedTime});
107108
});
108109
}
109110

@@ -165,10 +166,11 @@ void BatchingQueue::createBatch() {
165166
config_.batchingInterval))) {
166167
const auto requestsCount = requests.size();
167168

168-
batchingQueues_[roundRobinIdx++]->blockingWrite(BatchingQueueEntry{
169-
.requests = std::move(requests),
170-
.contexts = std::move(contexts),
171-
.addedTime = *startTime});
169+
batchingQueues_[roundRobinIdx++]->blockingWrite(
170+
BatchingQueueEntry{
171+
.requests = std::move(requests),
172+
.contexts = std::move(contexts),
173+
.addedTime = *startTime});
172174

173175
observer_->addRequestsCount(requestsCount);
174176
observer_->recordBatchCreationLatency(
@@ -192,8 +194,9 @@ void BatchingQueue::createBatch() {
192194

193195
void BatchingQueue::pinMemory(int gpuIdx) {
194196
at::cuda::CUDAGuard deviceGuard(gpuIdx);
195-
at::cuda::CUDAStreamGuard streamGuard(at::cuda::getStreamFromPool(
196-
/* isHighPriority */ FLAGS_batching_queue_use_high_pri_stream));
197+
at::cuda::CUDAStreamGuard streamGuard(
198+
at::cuda::getStreamFromPool(
199+
/* isHighPriority */ FLAGS_batching_queue_use_high_pri_stream));
197200
if (config_.warmupFn) {
198201
config_.warmupFn();
199202
}
@@ -295,14 +298,15 @@ void BatchingQueue::pinMemory(int gpuIdx) {
295298

296299
for (auto& [featureName, metadata] : config_.batchingMetadata) {
297300
const auto batchingFuncStart = std::chrono::steady_clock::now();
298-
combineForwardArgs(batchingFuncs_[metadata.type]->batch(
299-
featureName,
300-
requests,
301-
combinedBatchSize,
302-
batchOffsetsLazy,
303-
metadata.device == "cpu" ? c10::Device(c10::kCPU)
304-
: c10::Device(c10::kCUDA, gpuIdx),
305-
batchItemsLazy));
301+
combineForwardArgs(
302+
batchingFuncs_[metadata.type]->batch(
303+
featureName,
304+
requests,
305+
combinedBatchSize,
306+
batchOffsetsLazy,
307+
metadata.device == "cpu" ? c10::Device(c10::kCPU)
308+
: c10::Device(c10::kCUDA, gpuIdx),
309+
batchItemsLazy));
306310
observer_->recordBatchingFuncLatency(
307311
getTimeElapsedMS(batchingFuncStart).count(), metadata.type);
308312
}

torchrec/inference/inference_legacy/src/GPUExecutor.cpp

Lines changed: 6 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -166,11 +166,12 @@ void GPUExecutor::process(int idx) {
166166
c10::InferenceMode inferenceModeGuard;
167167
std::vector<c10::cuda::CUDAStream> streams;
168168
for (size_t i = 0; i < worldSize_; ++i) {
169-
streams.push_back(at::cuda::getStreamFromPool(
170-
/* isHighPriority */ i == rank_
171-
? FLAGS_gpu_executor_use_high_pri_stream_main_device
172-
: FLAGS_gpu_executor_use_high_pri_stream_peer_device,
173-
i));
169+
streams.push_back(
170+
at::cuda::getStreamFromPool(
171+
/* isHighPriority */ i == rank_
172+
? FLAGS_gpu_executor_use_high_pri_stream_main_device
173+
: FLAGS_gpu_executor_use_high_pri_stream_peer_device,
174+
i));
174175
}
175176
at::cuda::CUDAMultiStreamGuard streamGuard(streams);
176177
at::cuda::CUDAGuard deviceGuard(rank_);

0 commit comments

Comments
 (0)