Skip to content

Commit 6384c2d

Browse files
Meghan Lelefacebook-github-bot
Meghan Lele
authored andcommitted
[JIT] clang-format JIT code (pytorch#35115)
Summary: Pull Request resolved: pytorch#35115 This commit runs the newly added tools/clang_format.py on the JIT codebase and includes all of the formatting changes thus produced. Testing: Ran the script, CI. Test Plan: Imported from OSS Reviewed By: eellison Differential Revision: D20568523 Pulled By: SplitInfinity fbshipit-source-id: e09bdb982ccf090eecfb7c7b461b8d0681eef82b
1 parent 1422d2c commit 6384c2d

File tree

225 files changed

+3571
-3211
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

225 files changed

+3571
-3211
lines changed

Diff for: test/cpp/jit/test_alias_analysis.cpp

+60-65
Original file line numberDiff line numberDiff line change
@@ -1,9 +1,9 @@
11
#include <torch/csrc/autograd/generated/variable_factories.h>
22
#include <torch/csrc/jit/ir/irparser.h>
33
#include "test/cpp/jit/test_base.h"
4-
#include "torch/csrc/jit/runtime/custom_operator.h"
5-
#include "torch/csrc/jit/ir/alias_analysis.h"
64
#include "torch/csrc/jit/frontend/ir_emitter.h"
5+
#include "torch/csrc/jit/ir/alias_analysis.h"
6+
#include "torch/csrc/jit/runtime/custom_operator.h"
77
#include "torch/csrc/utils/memory.h"
88

99
namespace torch {
@@ -391,32 +391,30 @@ void testAliasAnalysis() {
391391
}
392392

393393
// test none value does not have writers
394-
{
395-
{
396-
auto graph = std::make_shared<Graph>();
397-
std::unordered_map<std::string, Value*> vmap;
398-
parseIR(
399-
R"IR(
394+
{{auto graph = std::make_shared<Graph>();
395+
std::unordered_map<std::string, Value*> vmap;
396+
parseIR(
397+
R"IR(
400398
graph():
401399
%opt : Tensor? = prim::Constant()
402400
%out : Tensor = prim::unchecked_unwrap_optional(%opt)
403401
%ret.2 : Tensor = aten::div(%out, %out, %out)
404402
return (%opt, %out, %ret.2)
405403
)IR",
406-
&*graph,
407-
vmap);
404+
&*graph,
405+
vmap);
408406

409-
AliasDb aliasDb(graph);
410-
AT_ASSERT(!aliasDb.hasWriters(vmap["opt"]->node()));
411-
}
412-
}
407+
AliasDb aliasDb(graph);
408+
AT_ASSERT(!aliasDb.hasWriters(vmap["opt"]->node()));
409+
}
410+
} // namespace jit
413411

414-
// test safeToIntroduceAliasingRelationship
415-
{
416-
auto graph = std::make_shared<Graph>();
417-
std::unordered_map<std::string, Value*> vmap;
418-
parseIR(
419-
R"IR(
412+
// test safeToIntroduceAliasingRelationship
413+
{
414+
auto graph = std::make_shared<Graph>();
415+
std::unordered_map<std::string, Value*> vmap;
416+
parseIR(
417+
R"IR(
420418
graph(%x : Tensor):
421419
%3 : int = prim::Constant[value=1]()
422420
%2 : int = prim::Constant[value=0]()
@@ -428,31 +426,33 @@ void testAliasAnalysis() {
428426
%14 : (Tensor, Tensor) = prim::TupleConstruct(%b, %c)
429427
return (%14)
430428
)IR",
431-
&*graph,
432-
vmap);
433-
434-
AliasDb aliasDb(graph);
435-
// x, b, c escape scope, so we can't introduce an aliasing relationship
436-
TORCH_INTERNAL_ASSERT(!aliasDb.safeToChangeAliasingRelationship(vmap["x"], vmap["b"]));
437-
TORCH_INTERNAL_ASSERT(
438-
!aliasDb.safeToChangeAliasingRelationship(vmap["b"], vmap["x"]));
439-
TORCH_INTERNAL_ASSERT(
440-
!aliasDb.safeToChangeAliasingRelationship(vmap["b"], vmap["c"]));
441-
TORCH_INTERNAL_ASSERT(
442-
!aliasDb.safeToChangeAliasingRelationship(vmap["c"], vmap["b"]));
443-
444-
// e aliases the wildcard set because it's contained in a list
445-
TORCH_INTERNAL_ASSERT(
446-
!aliasDb.safeToChangeAliasingRelationship(vmap["e"], vmap["x"]));
447-
TORCH_INTERNAL_ASSERT(
448-
!aliasDb.safeToChangeAliasingRelationship(vmap["x"], vmap["e"]));
449-
450-
// d is a temporary with no writers, safe to change aliasing relationship here
451-
TORCH_INTERNAL_ASSERT(aliasDb.safeToChangeAliasingRelationship(vmap["c"], vmap["d"]));
452-
TORCH_INTERNAL_ASSERT(
453-
aliasDb.safeToChangeAliasingRelationship(vmap["d"], vmap["c"]));
454-
}
429+
&*graph,
430+
vmap);
431+
432+
AliasDb aliasDb(graph);
433+
// x, b, c escape scope, so we can't introduce an aliasing relationship
434+
TORCH_INTERNAL_ASSERT(
435+
!aliasDb.safeToChangeAliasingRelationship(vmap["x"], vmap["b"]));
436+
TORCH_INTERNAL_ASSERT(
437+
!aliasDb.safeToChangeAliasingRelationship(vmap["b"], vmap["x"]));
438+
TORCH_INTERNAL_ASSERT(
439+
!aliasDb.safeToChangeAliasingRelationship(vmap["b"], vmap["c"]));
440+
TORCH_INTERNAL_ASSERT(
441+
!aliasDb.safeToChangeAliasingRelationship(vmap["c"], vmap["b"]));
442+
443+
// e aliases the wildcard set because it's contained in a list
444+
TORCH_INTERNAL_ASSERT(
445+
!aliasDb.safeToChangeAliasingRelationship(vmap["e"], vmap["x"]));
446+
TORCH_INTERNAL_ASSERT(
447+
!aliasDb.safeToChangeAliasingRelationship(vmap["x"], vmap["e"]));
448+
449+
// d is a temporary with no writers, safe to change aliasing relationship here
450+
TORCH_INTERNAL_ASSERT(
451+
aliasDb.safeToChangeAliasingRelationship(vmap["c"], vmap["d"]));
452+
TORCH_INTERNAL_ASSERT(
453+
aliasDb.safeToChangeAliasingRelationship(vmap["d"], vmap["c"]));
455454
}
455+
} // namespace torch
456456

457457
void testWriteTracking() {
458458
RegisterOperators reg({Operator(
@@ -720,7 +720,8 @@ graph():
720720
%d : Tensor[] = prim::ListConstruct(%y)
721721
return (%c, %d)
722722
)IR",
723-
&*graph, vmap);
723+
&*graph,
724+
vmap);
724725

725726
AliasDb aliasDb(graph);
726727
auto x = vmap["x"];
@@ -1179,11 +1180,10 @@ void testAliasRegistration() {
11791180
auto a = graph->addInput();
11801181
graph->insert(rand_op, {a});
11811182

1182-
// Registration time is okay, but throw exception when fetch from registration.
1183+
// Registration time is okay, but throw exception when fetch from
1184+
// registration.
11831185
expectThrows<c10::Error>(
1184-
[&graph] {
1185-
AliasDb aliasDb(graph);
1186-
},
1186+
[&graph] { AliasDb aliasDb(graph); },
11871187
"Tried to register operator foo::rand3(Tensor(a) arg1) -> (Tensor(b)) with aliasing information in the schema but without AliasAnalysisKind::FROM_SCHEMA");
11881188
}
11891189
{
@@ -1199,11 +1199,10 @@ void testAliasRegistration() {
11991199
auto a = graph->addInput();
12001200
graph->insert(rand_op, {a});
12011201

1202-
// Registration time is okay, but throw exception when fetch from registration.
1202+
// Registration time is okay, but throw exception when fetch from
1203+
// registration.
12031204
expectThrows<c10::Error>(
1204-
[&graph] {
1205-
AliasDb aliasDb(graph);
1206-
},
1205+
[&graph] { AliasDb aliasDb(graph); },
12071206
"Tried to register operator foo::rand4(Tensor(a) arg1) -> (Tensor(a)) with aliasing information in the schema but without AliasAnalysisKind::FROM_SCHEMA");
12081207
}
12091208
{
@@ -1301,38 +1300,34 @@ void testAliasRegistration() {
13011300
auto registry = torch::RegisterOperators().op(
13021301
"foo::rand11(Tensor(a) arg1) -> Tensor(a)",
13031302
torch::RegisterOperators::options()
1304-
.catchAllKernel(
1305-
[](at::Tensor t) -> at::Tensor { return t * 2; })
1303+
.catchAllKernel([](at::Tensor t) -> at::Tensor { return t * 2; })
13061304
.aliasAnalysis(AliasAnalysisKind::PURE_FUNCTION));
13071305
const auto rand_op = Symbol::fromQualString("foo::rand11");
13081306
auto graph = std::make_shared<Graph>();
13091307
auto a = graph->addInput();
13101308
graph->insert(rand_op, {a});
13111309

1312-
// Registration time is okay, but throw exception when fetch from registration.
1310+
// Registration time is okay, but throw exception when fetch from
1311+
// registration.
13131312
expectThrows<c10::Error>(
1314-
[&graph] {
1315-
AliasDb aliasDb(graph);
1316-
},
1313+
[&graph] { AliasDb aliasDb(graph); },
13171314
"Tried to register operator foo::rand11(Tensor(a) arg1) -> (Tensor(a)) with aliasing information in the schema but without AliasAnalysisKind::FROM_SCHEMA");
13181315
}
13191316
{
13201317
auto registry = torch::RegisterOperators().op(
13211318
"foo::rand12(Tensor(a) arg1) -> Tensor(b)",
13221319
torch::RegisterOperators::options()
1323-
.catchAllKernel(
1324-
[](at::Tensor t) -> at::Tensor { return t * 2; })
1320+
.catchAllKernel([](at::Tensor t) -> at::Tensor { return t * 2; })
13251321
.aliasAnalysis(AliasAnalysisKind::PURE_FUNCTION));
13261322
const auto rand_op = Symbol::fromQualString("foo::rand12");
13271323
auto graph = std::make_shared<Graph>();
13281324
auto a = graph->addInput();
13291325
graph->insert(rand_op, {a});
13301326

1331-
// Registration time is okay, but throw exception when fetch from registration.
1327+
// Registration time is okay, but throw exception when fetch from
1328+
// registration.
13321329
expectThrows<c10::Error>(
1333-
[&graph] {
1334-
AliasDb aliasDb(graph);
1335-
},
1330+
[&graph] { AliasDb aliasDb(graph); },
13361331
"Tried to register operator foo::rand12(Tensor(a) arg1) -> (Tensor(b)) with aliasing information in the schema but without AliasAnalysisKind::FROM_SCHEMA");
13371332
}
13381333
}

Diff for: test/cpp/jit/test_argument_spec.cpp

+4-2
Original file line numberDiff line numberDiff line change
@@ -29,7 +29,10 @@ bool isEqual(const ArgumentInfo& ti, const autograd::Variable& v) {
2929
ti.type() == v.scalar_type() && ti.dim() == v.dim();
3030
}
3131

32-
autograd::Variable var(at::TensorOptions t, at::IntArrayRef sizes, bool requires_grad) {
32+
autograd::Variable var(
33+
at::TensorOptions t,
34+
at::IntArrayRef sizes,
35+
bool requires_grad) {
3336
return autograd::make_variable(at::rand(sizes, t), requires_grad);
3437
}
3538
autograd::Variable undef() {
@@ -157,7 +160,6 @@ void testArgumentSpec() {
157160
undef()});
158161
list2[1].toTensor().transpose_(0, 1);
159162

160-
161163
ArgumentSpec a = arg_spec_creator.create(true, list);
162164
ArgumentSpec b = arg_spec_creator.create(true, list);
163165
ASSERT_EQ(a.hashCode(), b.hashCode());

Diff for: test/cpp/jit/test_autodiff.cpp

+37-22
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,6 @@
11
#include "test/cpp/jit/test_base.h"
22
#include "test/cpp/jit/test_utils.h"
3-
#include "torch/csrc/jit/runtime/argument_spec.h"
4-
#include "torch/csrc/jit/runtime/autodiff.h"
3+
#include "torch/csrc/jit/frontend/tracer.h"
54
#include "torch/csrc/jit/passes/common_subexpression_elimination.h"
65
#include "torch/csrc/jit/passes/constant_propagation.h"
76
#include "torch/csrc/jit/passes/create_autodiff_subgraphs.h"
@@ -11,7 +10,8 @@
1110
#include "torch/csrc/jit/passes/requires_grad_analysis.h"
1211
#include "torch/csrc/jit/passes/shape_analysis.h"
1312
#include "torch/csrc/jit/passes/utils/subgraph_utils.h"
14-
#include "torch/csrc/jit/frontend/tracer.h"
13+
#include "torch/csrc/jit/runtime/argument_spec.h"
14+
#include "torch/csrc/jit/runtime/autodiff.h"
1515

1616
#include <ATen/ATen.h>
1717
#include "torch/csrc/autograd/engine.h"
@@ -28,8 +28,15 @@ using var_meta_list = std::vector<var_meta_type>;
2828
using test_fn_type = std::function<variable_list(const variable_list&)>;
2929

3030
struct ADTestSpec {
31-
ADTestSpec(const char* name, var_meta_list input_meta, test_fn_type test_fn, float clampMax = -1.0f)
32-
: name(name), input_meta(input_meta), test_fn(test_fn), clampMax(clampMax) {}
31+
ADTestSpec(
32+
const char* name,
33+
var_meta_list input_meta,
34+
test_fn_type test_fn,
35+
float clampMax = -1.0f)
36+
: name(name),
37+
input_meta(input_meta),
38+
test_fn(test_fn),
39+
clampMax(clampMax) {}
3340

3441
variable_list operator()(const variable_list& inputs) const {
3542
return test_fn(inputs);
@@ -39,7 +46,8 @@ struct ADTestSpec {
3946
std::vector<Variable> out;
4047
for (const auto& m : input_meta) {
4148
if (clampMax > 0.0f) {
42-
out.push_back(torch::randn(m, at::requires_grad(true)).clamp(-clampMax, clampMax));
49+
out.push_back(torch::randn(m, at::requires_grad(true))
50+
.clamp(-clampMax, clampMax));
4351
continue;
4452
}
4553
out.push_back(torch::randn(m, at::requires_grad(true)));
@@ -63,7 +71,9 @@ variable_list grad(
6371
const variable_list& outputs,
6472
const variable_list& inputs,
6573
const variable_list& grad_outputs) {
66-
const auto get_edge = [](const Variable& v) { return torch::autograd::impl::gradient_edge(v); };
74+
const auto get_edge = [](const Variable& v) {
75+
return torch::autograd::impl::gradient_edge(v);
76+
};
6777
auto& engine = torch::autograd::Engine::get_default_engine();
6878
return engine.execute(
6979
fmap(outputs, get_edge),
@@ -74,7 +84,9 @@ variable_list grad(
7484
}
7585

7686
void testADFormulas() {
77-
const auto cast = [](const Variable& v) { return static_cast<at::Tensor>(v); };
87+
const auto cast = [](const Variable& v) {
88+
return static_cast<at::Tensor>(v);
89+
};
7890

7991
using VL = variable_list;
8092
const var_meta_list binary_pointwise = {{2, 3, 4, 5}, {2, 3, 4, 5}};
@@ -97,7 +109,8 @@ void testADFormulas() {
97109
// to set a minimum on gradient absolute values
98110
{"tanh",
99111
unary_pointwise,
100-
[](const VL& v) -> VL { return {v[0].tanh()}; }, 3.0f},
112+
[](const VL& v) -> VL { return {v[0].tanh()}; },
113+
3.0f},
101114
{"t", unary_pointwise_2d, [](const VL& v) -> VL { return {v[0].t()}; }},
102115
{"view",
103116
unary_pointwise_2d,
@@ -133,15 +146,15 @@ void testADFormulas() {
133146

134147
// Trace and differentiate the op
135148
auto graph = tracer::trace(
136-
fmap<IValue>(vars_in),
137-
[&test](Stack in) -> Stack {
138-
auto ivalue_inps = fmap(in, [](const IValue& v){
139-
return Variable(v.toTensor());
140-
});
141-
return fmap<IValue>(test(ivalue_inps));
142-
},
143-
[](const Variable& var) { return "";}
144-
).first->graph;
149+
fmap<IValue>(vars_in),
150+
[&test](Stack in) -> Stack {
151+
auto ivalue_inps = fmap(in, [](const IValue& v) {
152+
return Variable(v.toTensor());
153+
});
154+
return fmap<IValue>(test(ivalue_inps));
155+
},
156+
[](const Variable& var) { return ""; })
157+
.first->graph;
145158
EliminateDeadCode(graph); // Tracing of some ops depends on the DCE trick
146159
ConstantPropagation(graph);
147160
auto grad_spec = differentiate(graph);
@@ -164,22 +177,24 @@ void testADFormulas() {
164177
void testDifferentiate() {
165178
// Note: can't use IRParser for this test due to issue #23989
166179
auto graph = std::make_shared<Graph>();
167-
const auto type = TensorType::create(at::ScalarType::Float, at::kCPU, {2, 3, 4}, {12, 4, 1});
180+
const auto type = TensorType::create(
181+
at::ScalarType::Float, at::kCPU, {2, 3, 4}, {12, 4, 1});
168182

169183
// Builds graph a * b * a + b
170184
auto* a = graph->addInput()->setType(type);
171185
auto* b = graph->addInput()->setType(type);
172186
auto* cOne = graph->insertConstant(1);
173187

174-
auto* ab = graph->insertNode(graph->create(aten::mul, /*num_outputs =*/ 1));
188+
auto* ab = graph->insertNode(graph->create(aten::mul, /*num_outputs =*/1));
175189
ab->addInput(a);
176190
ab->addInput(b);
177191

178-
auto* aba = graph->insertNode(graph->create(aten::mul, /*num_outputs =*/ 1));
192+
auto* aba = graph->insertNode(graph->create(aten::mul, /*num_outputs =*/1));
179193
aba->addInput(ab->output());
180194
aba->addInput(a);
181195

182-
auto* abaplusb = graph->insertNode(graph->create(aten::add, /*num_outputs =*/ 1));
196+
auto* abaplusb =
197+
graph->insertNode(graph->create(aten::add, /*num_outputs =*/1));
183198
abaplusb->addInput(aba->output());
184199
abaplusb->addInput(b);
185200
abaplusb->addInput(cOne);

Diff for: test/cpp/jit/test_class_import.cpp

+2-4
Original file line numberDiff line numberDiff line change
@@ -2,8 +2,8 @@
22
#include <test/cpp/jit/test_utils.h>
33

44
#include <ATen/core/qualified_name.h>
5-
#include <torch/csrc/jit/serialization/import_source.h>
65
#include <torch/csrc/jit/frontend/resolver.h>
6+
#include <torch/csrc/jit/serialization/import_source.h>
77
#include <torch/torch.h>
88

99
namespace torch {
@@ -40,9 +40,7 @@ static void import_libs(
4040
SourceImporter si(
4141
cu,
4242
&tensor_table,
43-
[&](const std::string& name) -> std::shared_ptr<Source> {
44-
return src;
45-
},
43+
[&](const std::string& name) -> std::shared_ptr<Source> { return src; },
4644
/*version=*/2);
4745
si.loadNamedType(QualifiedName(class_name));
4846
}

Diff for: test/cpp/jit/test_custom_class.cpp

-1
Original file line numberDiff line numberDiff line change
@@ -138,7 +138,6 @@ torch::RegisterOperators& register_take_instance() {
138138

139139
static auto& ensure_take_instance_registered = register_take_instance();
140140

141-
142141
} // namespace
143142

144143
void testTorchbindIValueAPI() {

0 commit comments

Comments
 (0)