Skip to content

Commit fe2bf76

Browse files
committed
clang-tidy
1 parent 166daaa commit fe2bf76

File tree

3 files changed

+19
-12
lines changed

3 files changed

+19
-12
lines changed

torch/csrc/jit/codegen/cuda/fusion.cpp

Lines changed: 10 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -280,9 +280,11 @@ void Fusion::swapInputsTo(const std::vector<Val*>& inputs, bool update_tv_mem) {
280280
inputs_ = inputs;
281281
for (auto in : inputs_) {
282282
in->setIsFusionInput(true);
283-
if (update_tv_mem && in->getValType().value() == ValType::TensorView) {
284-
auto tv = in->as<TensorView>();
285-
tv->setMemoryType(MemoryType::Global);
283+
if (update_tv_mem) {
284+
if (in->getValType().value() == ValType::TensorView) {
285+
auto tv = in->as<TensorView>();
286+
tv->setMemoryType(MemoryType::Global);
287+
}
286288
}
287289
}
288290
all_tv_uses_valid_ = false;
@@ -297,9 +299,11 @@ void Fusion::swapOutputsTo(
297299
outputs_ = outputs;
298300
for (auto out : outputs_) {
299301
out->setIsFusionOutput(true);
300-
if (update_tv_mem && out->getValType().value() == ValType::TensorView) {
301-
auto tv = out->as<TensorView>();
302-
tv->setMemoryType(MemoryType::Global);
302+
if (update_tv_mem) {
303+
if (out->getValType().value() == ValType::TensorView) {
304+
auto tv = out->as<TensorView>();
305+
tv->setMemoryType(MemoryType::Global);
306+
}
303307
}
304308
}
305309
all_tv_uses_valid_ = false;

torch/csrc/jit/codegen/cuda/fusion_segmenter.h

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -232,8 +232,12 @@ class TORCH_CUDA_CU_API FusionSegmentGuard : public NonCopyable {
232232
fusion_->swapOutputsTo(new_outputs_, update_tv_mem_);
233233
}
234234

235+
// TODO: clang-tidy doesn't like the possibility of exceptions
236+
// from use of c10::optional and TORCH_INTERNAL_ASSERT, even though
237+
// there shouldn't be anyway this is throwing. Would
238+
// need an assertion free path for this in another cleanup.
239+
// NOLINTNEXTLINE(bugprone-exception-escape)
235240
~FusionSegmentGuard() {
236-
FUSER_PERF_SCOPE("~Segmenter::FusionSegmentGuard");
237241
if (fusion_ == nullptr) {
238242
return;
239243
}

torch/csrc/jit/codegen/cuda/kernel_cache.cpp

Lines changed: 4 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -345,7 +345,7 @@ FusionKernelRuntime* FusionExecutorCache::getKernelRuntimeFor(
345345
return kernel_runtime->getMaybeHeuristicsFor(inputs);
346346
});
347347

348-
FusionKernelRuntime* kernel_runtime;
348+
FusionKernelRuntime* kernel_runtime = nullptr;
349349
if (reuse_it != kernel_runtimes.end()) {
350350
kernel_runtime = reuse_it->get();
351351
} else {
@@ -520,13 +520,13 @@ void FusionKernelRuntime::prepareRuntimeOrder() {
520520
std::unordered_set<Val*> available_input;
521521

522522
// setup the order tensor dimensions are bound
523-
for (size_t i = 0; i < segmented_fusion_->inputs().size(); i++) {
523+
for (size_t i : c10::irange(segmented_fusion_->inputs().size())) {
524524
auto input_val = segmented_fusion_->inputs()[i];
525525
available_input.insert(input_val);
526526

527527
if (auto input_tv = dynamic_cast<TensorView*>(input_val)) {
528528
auto root_dom = TensorDomain::noReductions(input_tv->getRootDomain());
529-
for (size_t dim = 0; dim < root_dom.size(); dim++) {
529+
for (size_t dim : c10::irange(root_dom.size())) {
530530
const auto extent = root_dom[dim]->extent();
531531
available_input.insert(extent);
532532
runtime_workspace_.group_extent_binding_order.push_back(extent);
@@ -559,8 +559,7 @@ void FusionKernelRuntime::prepareRuntimeOrder() {
559559
const auto& group_outputs = group->outputs();
560560

561561
// Insert graph segment output to tensor map
562-
for (size_t group_out_i = 0; group_out_i < group_outputs.size();
563-
group_out_i++) {
562+
for (size_t group_out_i : c10::irange(group_outputs.size())) {
564563
available_input.insert(group_outputs[group_out_i]);
565564
}
566565
group_ran[group_i] = true;

0 commit comments

Comments
 (0)