Skip to content

Commit 9a99f8f

Browse files
committed
Add comments for public functions and classes
ghstack-source-id: d94aecc Pull Request resolved: #222
1 parent 8b5b7ca commit 9a99f8f

File tree

11 files changed

+140
-15
lines changed

11 files changed

+140
-15
lines changed

.gitignore

Lines changed: 0 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -2,11 +2,7 @@
22
.git/**
33
**/__pycache__/**
44
**.coverage
5-
.coverage
6-
multipy/runtime/interpreter/cpython
7-
multipy/runtime/interpreter/cpython/**
85
**/build/**
96
**/CMakeFiles/**
10-
multipy/runtime/interpreter/frozen/**
117
multipy/runtime/example/generated/
128
*.egg-info

README.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -153,7 +153,7 @@ cmake --build . --config Release
153153

154154
### Running unit tests for `multipy::runtime`
155155

156-
We first need to generate the neccessary examples. First make sure your python enviroment has [torch](https://pytorch.org). Afterwards, once `multipy::runtime` is built, run the following (executed automatically for `docker` and `pip` above):
156+
We first need to generate the neccessary examples. First make sure your python environment has [torch](https://pytorch.org). Afterwards, once `multipy::runtime` is built, run the following (executed automatically for `docker` and `pip` above):
157157

158158
```
159159
cd multipy/multipy/runtime

multipy/runtime/deploy.h

Lines changed: 80 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -46,7 +46,8 @@ struct TORCH_API InterpreterSession {
4646
// NOLINTNEXTLINE(bugprone-exception-escape)
4747
~InterpreterSession();
4848

49-
// global imports a python object from the specified module.
49+
// `global` imports a python object from the specified module.
50+
// Specifically `global` is analogous to "import `name` from `module`" in python.
5051
Obj global(const char* module, const char* name) {
5152
return impl_->global(module, name);
5253
}
@@ -57,6 +58,8 @@ struct TORCH_API InterpreterSession {
5758
// InterpreterSession* I)' instead. We will have no backwards compatibility
5859
// guarentees for this function.
5960
ReplicatedObj createMovable(Obj obj);
61+
62+
// Converts a `ReplicatedObj` to an `Obj` on this InterpreterSession.
6063
Obj fromMovable(const ReplicatedObj& obj);
6164

6265
protected:
@@ -73,6 +76,9 @@ struct TORCH_API InterpreterSession {
7376
std::function<void()> deconstruction_callback_ = nullptr;
7477
};
7578

79+
// An `Interpreter` represents an invidual subinterpreter created by
80+
// `torch::deploy`. It allows for the creation of `InterpreterSession` objects
81+
// which allow users to interact with python objects.
7682
class TORCH_API Interpreter {
7783
private:
7884
void* handle_;
@@ -84,17 +90,24 @@ class TORCH_API Interpreter {
8490
multipy::optional<EmbeddedFile> torchPluginFile_;
8591

8692
public:
93+
// Creates an Interpreter which is managed by `manager` and using the
94+
// environment `env`
8795
Interpreter(InterpreterManager* manager, std::shared_ptr<Environment> env);
96+
97+
// Creates an Interpreter manager using environment `env` which is not tied to
98+
// an Interpreter Manager.
8899
explicit Interpreter(std::shared_ptr<Environment> env)
89100
: Interpreter(nullptr, env) {}
90101

102+
// Gets a new `InterpreterSession` from this Interpreter.
91103
InterpreterSession acquireSession() const {
92104
if (manager_) {
93105
return InterpreterSession(pImpl_->acquireSession(), manager_);
94106
} else {
95107
return InterpreterSession(pImpl_->acquireSession());
96108
}
97109
}
110+
98111
~Interpreter();
99112
Interpreter(Interpreter&& rhs) noexcept
100113
: handle_(rhs.handle_),
@@ -113,17 +126,28 @@ class TORCH_API Interpreter {
113126

114127
struct Package;
115128

129+
// The default LoadBalancer for torch::deploy which handles allocating and
130+
// freeing subinterpreters.
116131
struct TORCH_API LoadBalancer {
132+
// Creates a Loadbalancer which handles `n` interpreters.
117133
explicit LoadBalancer(size_t n)
118134
: uses_(new uint64_t[8 * n]), allocated_(n), n_(n) {
119135
// 8*... to avoid false sharing of atomics on the same cache line
120136
memset(uses_.get(), 0, 8 * n_ * sizeof(uint64_t));
121137
}
138+
139+
// Changes the amount of subinterpreters which is handled by the load
140+
// balancer.
122141
void setResourceLimit(size_t n) {
123142
MULTIPY_INTERNAL_ASSERT(n <= allocated_);
124143
n_ = n;
125144
}
145+
146+
// Allocates an subinterpreter, and return its ID which is used to free it.
126147
int acquire();
148+
149+
// Frees the subinterpreter with ID `where`. This ID is returned by
150+
// `LoadBalancer::acquire()`
127151
void free(int where);
128152

129153
private:
@@ -134,13 +158,19 @@ struct TORCH_API LoadBalancer {
134158
size_t n_;
135159
};
136160

161+
// An `InterpreterManager` handles the interaction of multiple subinterpreters
162+
// such as allocating subinterpreters, or load balancing the subinterpreters.
137163
struct TORCH_API InterpreterManager {
164+
// constructor for `InterpreterManager` which takes the number of interpreters
165+
// (usually correlates to number of cores on your cpu), and a pointer to an
166+
// `Environment`. The default uses the local python env.
138167
explicit InterpreterManager(
139168
size_t nInterp = 2,
140169
std::shared_ptr<Environment> env = std::make_shared<NoopEnvironment>());
141170

142-
// get a free model, guarenteed that no other user of acquireOne has the same
143-
// model. It _is_ possible that other users will be using the interpreter.
171+
// Returns a free interpreter or an arbitrary interpreter if there are none free.
172+
// To ensure data safety it's best to match the number of calling threads to the size of the interpreter
173+
// pool to avoid sharing an interpreter.
144174
InterpreterSession acquireOne() {
145175
int where = resources_.acquire();
146176
InterpreterSession I = instances_[where].acquireSession();
@@ -154,11 +184,19 @@ struct TORCH_API InterpreterManager {
154184
at::ArrayRef<Interpreter> allInstances() {
155185
return instances_;
156186
}
187+
188+
// debugging tool to control the size of the loadBalancer
189+
// and change the number of interpreters on the fly
157190
void debugLimitInterpreters(size_t N) {
158191
AT_ASSERT(N <= instances_.size());
159192
resources_.setResourceLimit(N);
160193
}
194+
195+
// loads a package from a file with name `uri`
161196
Package loadPackage(const std::string& uri);
197+
198+
// loads a package from a `PyTorchStreamReader` or any class other which uses
199+
// `ReadAdapterInterface`
162200
Package loadPackage(
163201
std::shared_ptr<caffe2::serialize::ReadAdapterInterface> reader);
164202

@@ -171,10 +209,12 @@ struct TORCH_API InterpreterManager {
171209
registeredModuleSource_[std::move(name)] = std::move(src);
172210
}
173211

174-
// Util function for debugging.
212+
// Util function for debugging which outputs the number of registered modules.
175213
size_t countRegisteredModuleSources() {
176214
return registeredModuleSource_.size();
177215
}
216+
217+
// Converts `obj` from on `InterpreterSession` I into a `ReplicatedObj`.
178218
ReplicatedObj createMovable(Obj obj, InterpreterSession* I);
179219
InterpreterManager(const InterpreterManager&) = delete;
180220
InterpreterManager& operator=(const InterpreterManager&) = delete;
@@ -204,33 +244,55 @@ struct TORCH_API ReplicatedObjImpl {
204244
InterpreterManager* manager_;
205245
};
206246

247+
// ReplicatedObj represents a python object that can be used on multiple interpreters. Calling
248+
// methods on this will pick an arbitrary interpreter to run on, transfer it there if not already
249+
// and run the method. A replicated object can be converted to an interpreter specific `Obj` using
250+
// `InterpreterSession::fromMovable(ReplicatedObj)`
207251
struct TORCH_API ReplicatedObj {
252+
// Default constructor for `ReplicatedObj`
208253
ReplicatedObj() : pImpl_(nullptr) {}
254+
255+
// Creates a new InterpreterSession on onThisInterpreter if specified else
256+
// uses an arbitrary one from InteprreterManager.
209257
InterpreterSession acquireSession(
210258
const Interpreter* onThisInterpreter = nullptr) const;
211259
at::IValue operator()(at::ArrayRef<at::IValue> args) const {
212260
auto I = acquireSession();
213261
return I.self(args).toIValue();
214262
}
215263

264+
// Invokes the Python function or class on an arbitrary interpreter with arguments
265+
// given by the tuple args and named arguments given by the dictionary kwargs
266+
// (equivalent to python's `__call__`).
216267
[[nodiscard]] at::IValue callKwargs(
217268
std::vector<at::IValue> args,
218269
std::unordered_map<std::string, c10::IValue> kwargs) const {
219270
auto I = acquireSession();
220271
return I.self.callKwargs(std::move(args), std::move(kwargs)).toIValue();
221272
}
222273

274+
// Invokes the Python function or class on an arbitrary interpreter.with named arguments given by the
275+
// dictionary kwargs (equivalent to python's `__call__`).
223276
[[nodiscard]] at::IValue callKwargs(
224277
std::unordered_map<std::string, c10::IValue> kwargs) const {
225278
auto I = acquireSession();
226279
return I.self.callKwargs(std::move(kwargs)).toIValue();
227280
}
228281

229-
[[nodiscard]] bool hasattr(const char* name) const {
282+
// Returns true if `ReplicatedObj` has attribute with name `attr` and false
283+
// otherwise. This is done on an arbitrary `InterpreterSession` which belongs
284+
// to the `ReplicatedObj`'s manager.
285+
[[nodiscard]] bool hasattr(const char* attr) const {
230286
auto I = acquireSession();
231-
return I.self.hasattr(name);
287+
return I.self.hasattr(attr);
232288
}
289+
290+
// Deletes `ReplicatedObj` from onThisInterpreter, if onThisInterpreter is
291+
// `nullptr`, unload is called on all interpreters belonging to the
292+
// ReplicatedObject's InterpreterManager
233293
void unload(const Interpreter* onThisInterpreter = nullptr);
294+
295+
// Converts `ReplicatedObj` to `Obj` on `InterpreterSession` `I`
234296
Obj toObj(InterpreterSession* I);
235297

236298
private:
@@ -242,21 +304,24 @@ struct TORCH_API ReplicatedObj {
242304
friend struct InterpreterManager;
243305
};
244306

307+
// PythonMethodWrapper is a more specific instance of a
308+
// ReplicatedObj which represents a python method, and
309+
// is therefore callable and has argument names accessible.
245310
class PythonMethodWrapper : public torch::IMethod {
246-
// PythonMethodWrapper is a more specific instance of a
247-
// ReplicatedObj which represents a python method, and
248-
// is therefore callable and has argument names accessible.
249311
public:
250312
// TODO(whc) make bound method pickleable, then directly construct from that
313+
251314
PythonMethodWrapper(
252315
torch::deploy::ReplicatedObj model,
253316
std::string methodName)
254317
: model_(std::move(model)), methodName_(std::move(methodName)) {}
255318

319+
// return the name of the python method.
256320
const std::string& name() const override {
257321
return methodName_;
258322
}
259323

324+
// overrides the `()` operater to call the underlying python method.
260325
c10::IValue operator()(
261326
std::vector<c10::IValue> args,
262327
const IValueMap& kwargs = IValueMap()) const override {
@@ -274,6 +339,8 @@ class PythonMethodWrapper : public torch::IMethod {
274339
std::string methodName_;
275340
};
276341

342+
// Package is a wrapper around `torch.package` which allows loading a
343+
// PyTorch model and its dependencies from a package.
277344
struct TORCH_API Package {
278345
// shorthand for getting the object as a pickle resource in the package
279346
ReplicatedObj loadPickle(const std::string& module, const std::string& file) {
@@ -308,12 +375,16 @@ struct TORCH_API Package {
308375
}
309376
#endif
310377

378+
// Allocates an `InterpreterSession` and load the appropriate torch.package
379+
// with it.
311380
InterpreterSession acquireSession() {
312381
auto I = manager_->acquireOne();
313382
I.self =
314383
I.impl_->createOrGetPackageImporterFromContainerFile(containerFile_);
315384
return I;
316385
}
386+
387+
// Converts an `Obj` from `InterpreterSession` `I` into a `ReplicatedObj`.
317388
ReplicatedObj createMovable(Obj obj, InterpreterSession* I) {
318389
return manager_->createMovable(obj, I);
319390
}

multipy/runtime/elf_file.h

Lines changed: 7 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -16,6 +16,7 @@
1616
namespace torch {
1717
namespace deploy {
1818

19+
// A representation of a section of an ElfFile.
1920
struct Section {
2021
Section() {}
2122
explicit Section(
@@ -35,13 +36,18 @@ struct Section {
3536
}
3637
};
3738

39+
// TODO: consolidate other ELF file related functions in loader.cpp to this file
40+
3841
/*
3942
* This class provie utilities to handle ELF file. Only support 64bit ELF file.
4043
*/
41-
// TODO: consolidate other ELF file related functions in loader.cpp to this file
4244
class ElfFile {
4345
public:
46+
// Constructs an Elffile with the corresponding `filename`
4447
explicit ElfFile(const char* filename);
48+
49+
// Finds and return a `Section` with the corresponding `name`. If nothing is
50+
// found, then a `multipy::nullopt` is returned.
4551
multipy::optional<Section> findSection(const char* name) const;
4652

4753
private:

multipy/runtime/embedded_file.h

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -11,17 +11,20 @@
1111
namespace torch {
1212
namespace deploy {
1313

14+
// Specifies which ELF section to load the interpreter from and the associated config.
1415
struct ExeSection {
1516
const char* sectionName;
1617
bool customLoader;
1718
};
1819

20+
// Specifies which ELF symbols to load the interpreter from and the associated config.
1921
struct InterpreterSymbol {
2022
const char* startSym;
2123
const char* endSym;
2224
bool customLoader;
2325
};
2426

27+
// EmbeddedFile makes it easier to load a custom interpreter embedded within the binary.
2528
struct EmbeddedFile {
2629
std::string libraryName{""};
2730
bool customLoader{false};

multipy/runtime/environment.h

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -42,6 +42,7 @@ class Environment {
4242
fclose(zippedFile);
4343
return zipArchive;
4444
}
45+
4546
void setupZippedPythonModules(const std::string& pythonAppDir) {
4647
#ifdef FBCODE_CAFFE2
4748
extraPythonPaths_.push_back(getZippedArchive(
@@ -56,14 +57,19 @@ class Environment {
5657
}
5758

5859
public:
60+
// Environment constructor which creates a random temporary directory as
61+
// a directory for the zipped python modules.
5962
explicit Environment() {
6063
char tempDirName[] = "/tmp/torch_deploy_zipXXXXXX";
6164
char* tempDirectory = mkdtemp(tempDirName);
6265
setupZippedPythonModules(tempDirectory);
6366
}
67+
// Environment constructor which takes a file name for the
68+
// directory for the zipped python modules.
6469
explicit Environment(const std::string& pythonAppDir) {
6570
setupZippedPythonModules(pythonAppDir);
6671
}
72+
// Deconstructor for Environment.
6773
virtual ~Environment() {
6874
auto rmCmd = "rm -rf " + extraPythonLibrariesDir_;
6975
(void)system(rmCmd.c_str());

0 commit comments

Comments
 (0)