From d43942dff881c9fbd7de36e6cb710f33567bca42 Mon Sep 17 00:00:00 2001 From: Sahan Paliskara Date: Thu, 20 Oct 2022 16:55:27 -0700 Subject: [PATCH 1/5] Add comments for public functions and classes [ghstack-poisoned] --- multipy/runtime/deploy.h | 65 +++++++++++++++++-- multipy/runtime/elf_file.h | 3 +- .../runtime/interpreter/interpreter_impl.h | 16 +++++ multipy/runtime/interpreter/plugin_registry.h | 15 +++++ multipy/runtime/mem_file.h | 4 ++ multipy/runtime/noop_environment.h | 1 + multipy/runtime/path_environment.h | 1 + 7 files changed, 98 insertions(+), 7 deletions(-) diff --git a/multipy/runtime/deploy.h b/multipy/runtime/deploy.h index 0e5ff225..4fa67edc 100644 --- a/multipy/runtime/deploy.h +++ b/multipy/runtime/deploy.h @@ -57,6 +57,8 @@ struct TORCH_API InterpreterSession { // InterpreterSession* I)' instead. We will have no backwards compatibility // guarentees for this function. ReplicatedObj createMovable(Obj obj); + + // Converts a `ReplicatedObj` to an `Obj` on this InterpreterSession. Obj fromMovable(const ReplicatedObj& obj); protected: @@ -73,6 +75,9 @@ struct TORCH_API InterpreterSession { std::function deconstruction_callback_ = nullptr; }; +// An `Interpreter` represents an invidual subinterpreter created by `torch::deploy`. +// It allows for the creation of `InterpreterSession` objects which allow users to interact with +// python objects. class TORCH_API Interpreter { private: void* handle_; @@ -84,10 +89,14 @@ class TORCH_API Interpreter { multipy::optional torchPluginFile_; public: + // Create an Interpreter which is managed by `manager` and using the enviroment `env` Interpreter(InterpreterManager* manager, std::shared_ptr env); + + // Create an Interpreter manager using enviroment `env` which is not tied to an Interpreter Manager. explicit Interpreter(std::shared_ptr env) : Interpreter(nullptr, env) {} + // Get a new `InterpreterSession` from this Interpreter. InterpreterSession acquireSession() const { if (manager_) { return InterpreterSession(pImpl_->acquireSession(), manager_); @@ -95,6 +104,7 @@ class TORCH_API Interpreter { return InterpreterSession(pImpl_->acquireSession()); } } + ~Interpreter(); Interpreter(Interpreter&& rhs) noexcept : handle_(rhs.handle_), @@ -113,17 +123,26 @@ class TORCH_API Interpreter { struct Package; +// The default LoadBalancer for torch::deploy which handles allocating and freeing subinterpreters. struct TORCH_API LoadBalancer { + + // create a Loadbalancer which handles `n` interpreters. explicit LoadBalancer(size_t n) : uses_(new uint64_t[8 * n]), allocated_(n), n_(n) { // 8*... to avoid false sharing of atomics on the same cache line memset(uses_.get(), 0, 8 * n_ * sizeof(uint64_t)); } + + // change the amount of subinterpreters which is handled by the load balancer. void setResourceLimit(size_t n) { MULTIPY_INTERNAL_ASSERT(n <= allocated_); n_ = n; } + + // allocate an subinterpreter, and return its ID which is used to free it. int acquire(); + + // free the subinterpreter with ID `where`. This ID is returned by `LoadBalancer::acquire()` void free(int where); private: @@ -134,13 +153,18 @@ struct TORCH_API LoadBalancer { size_t n_; }; +// An `InterpreterManager` handles the interaction of multiple subinterpreters such as allocating +// subinterpreters, or load balancing the subinterpreters. struct TORCH_API InterpreterManager { + + // constructor for `InterpreterManager` which takes the number of interpreters + // (usually correlates to number of cores on your cpu), and a pointer to an `Enviroment`. explicit InterpreterManager( size_t nInterp = 2, std::shared_ptr env = std::make_shared()); - // get a free model, guarenteed that no other user of acquireOne has the same - // model. It _is_ possible that other users will be using the interpreter. + // get a free InterpreterSession, guarenteed that no other user of acquireOne has the same + // InterpreterSession. It _is_ possible that other users will be using the interpreter. InterpreterSession acquireOne() { int where = resources_.acquire(); InterpreterSession I = instances_[where].acquireSession(); @@ -154,11 +178,18 @@ struct TORCH_API InterpreterManager { at::ArrayRef allInstances() { return instances_; } + + // debugging tool to control the size of the loadBalancer + // and change the number of interpreters on the fly void debugLimitInterpreters(size_t N) { AT_ASSERT(N <= instances_.size()); resources_.setResourceLimit(N); } + + // loads a package from a file with name `uri` Package loadPackage(const std::string& uri); + + // loads a package from a `PyTorchStreamReader` or any class other which uses `ReadAdapterInterface` Package loadPackage( std::shared_ptr reader); @@ -171,10 +202,12 @@ struct TORCH_API InterpreterManager { registeredModuleSource_[std::move(name)] = std::move(src); } - // Util function for debugging. + // Util function for debugging which outputs the number of registered modules. size_t countRegisteredModuleSources() { return registeredModuleSource_.size(); } + + // Converts `obj` from on `InterpreterSession` I into a `ReplicatedObj`. ReplicatedObj createMovable(Obj obj, InterpreterSession* I); InterpreterManager(const InterpreterManager&) = delete; InterpreterManager& operator=(const InterpreterManager&) = delete; @@ -189,6 +222,7 @@ struct TORCH_API InterpreterManager { std::unordered_map registeredModuleSource_; }; +// Underlying implementation for ReplicatedObj. struct TORCH_API ReplicatedObjImpl { ReplicatedObjImpl( size_t object_id, @@ -204,8 +238,15 @@ struct TORCH_API ReplicatedObjImpl { InterpreterManager* manager_; }; +// A python object which is Replicated from an `Obj` such that it is able to move around to different `InterpreterSessions` +// by using `InterpreterSession::fromMovable(ReplicatedObj)` struct TORCH_API ReplicatedObj { + + // default constructor for `ReplicatedObj` ReplicatedObj() : pImpl_(nullptr) {} + + // Create an `InterpreterSession` using `onThisInterpreter`. If `onThisInterpreter` is + // a `nullptr', then the associated `InterpreterManager` allocates it. InterpreterSession acquireSession( const Interpreter* onThisInterpreter = nullptr) const; at::IValue operator()(at::ArrayRef args) const { @@ -213,6 +254,7 @@ struct TORCH_API ReplicatedObj { return I.self(args).toIValue(); } + // invokes `callKwargs` using the underlying python object, and returns the output as an `IValue`. [[nodiscard]] at::IValue callKwargs( std::vector args, std::unordered_map kwargs) const { @@ -220,16 +262,20 @@ struct TORCH_API ReplicatedObj { return I.self.callKwargs(std::move(args), std::move(kwargs)).toIValue(); } + // invokes `callKwargs` using the underlying python object, and returns the output as an `IValue`. [[nodiscard]] at::IValue callKwargs( std::unordered_map kwargs) const { auto I = acquireSession(); return I.self.callKwargs(std::move(kwargs)).toIValue(); } + // invokes `hasattr` using the underlying python object, and returns the output as an `IValue`. [[nodiscard]] bool hasattr(const char* name) const { auto I = acquireSession(); return I.self.hasattr(name); } + + void unload(const Interpreter* onThisInterpreter = nullptr); Obj toObj(InterpreterSession* I); @@ -242,21 +288,24 @@ struct TORCH_API ReplicatedObj { friend struct InterpreterManager; }; +// PythonMethodWrapper is a more specific instance of a +// ReplicatedObj which represents a python method, and +// is therefore callable and has argument names accessible. class PythonMethodWrapper : public torch::IMethod { - // PythonMethodWrapper is a more specific instance of a - // ReplicatedObj which represents a python method, and - // is therefore callable and has argument names accessible. public: // TODO(whc) make bound method pickleable, then directly construct from that + PythonMethodWrapper( torch::deploy::ReplicatedObj model, std::string methodName) : model_(std::move(model)), methodName_(std::move(methodName)) {} + // return the name of the python method. const std::string& name() const override { return methodName_; } + // overrides the `()` operater to call the underlying python method. c10::IValue operator()( std::vector args, const IValueMap& kwargs = IValueMap()) const override { @@ -274,6 +323,7 @@ class PythonMethodWrapper : public torch::IMethod { std::string methodName_; }; +// An object to encapsulate a `torch::package` which can act as part (or entire) enviroment for subinterpreters. struct TORCH_API Package { // shorthand for getting the object as a pickle resource in the package ReplicatedObj loadPickle(const std::string& module, const std::string& file) { @@ -308,12 +358,15 @@ struct TORCH_API Package { } #endif + // allocate an `InterpreterSession` and load the appropriate torch.package with it. InterpreterSession acquireSession() { auto I = manager_->acquireOne(); I.self = I.impl_->createOrGetPackageImporterFromContainerFile(containerFile_); return I; } + + // Converts an `Obj` from `InterpreterSession` `I` into a `ReplicatedObj`. ReplicatedObj createMovable(Obj obj, InterpreterSession* I) { return manager_->createMovable(obj, I); } diff --git a/multipy/runtime/elf_file.h b/multipy/runtime/elf_file.h index fdfd550c..3e92d69f 100644 --- a/multipy/runtime/elf_file.h +++ b/multipy/runtime/elf_file.h @@ -35,10 +35,11 @@ struct Section { } }; +// TODO: consolidate other ELF file related functions in loader.cpp to this file + /* * This class provie utilities to handle ELF file. Only support 64bit ELF file. */ -// TODO: consolidate other ELF file related functions in loader.cpp to this file class ElfFile { public: explicit ElfFile(const char* filename); diff --git a/multipy/runtime/interpreter/interpreter_impl.h b/multipy/runtime/interpreter/interpreter_impl.h index 43cf70e2..abbfd20d 100644 --- a/multipy/runtime/interpreter/interpreter_impl.h +++ b/multipy/runtime/interpreter/interpreter_impl.h @@ -19,6 +19,7 @@ namespace deploy { struct InterpreterSessionImpl; struct Obj; +// Representation a Pickled Object struct PickledObject { std::string data_; std::vector storages_; @@ -28,6 +29,7 @@ struct PickledObject { std::shared_ptr containerFile_; }; +// The underlying implementation of `Obj` which holds the underlying `py::object`. struct InterpreterObj { friend struct Obj; friend struct ReplicatedObjImpl; @@ -72,14 +74,26 @@ struct Obj { : isDefault_(false), baseObj_(baseObj) {} Obj() : isDefault_(true), baseObj_(nullptr) {} + // convert underlying `py::object` into an `IValue`. at::IValue toIValue() const; + + // overwrite `()` to use `call` of the underlying `py::object` with `Obj`s as args. + // The output is represented as an `Obj`. Obj operator()(at::ArrayRef args); + + // overwrite `()` to use `call` of the underlying `py::object` with `IValue`s as args. + // The output is represented as an `Obj`. Obj operator()(at::ArrayRef args); + + // calls callKwargs from the underlying `py::object` Obj callKwargs( std::vector args, std::unordered_map kwargs); + // calls callKwargs from the underlying `py::object`. The output is represented as an `Obj`. Obj callKwargs(std::unordered_map kwargs); + // calls hasattr from the underlying `py::object`. The output is represented as an `Obj`. bool hasattr(const char* attr); + // calls attr from the underlying `py::object`. The output is represented as an `Obj`. Obj attr(const char* attr); private: @@ -87,6 +101,7 @@ struct Obj { std::shared_ptr baseObj_; }; +// The underlying implementation of `InterpreterSession` struct InterpreterSessionImpl { friend struct Package; friend struct ReplicatedObj; @@ -132,6 +147,7 @@ struct InterpreterSessionImpl { } }; +// The underlying implementation of `Interpreter` struct InterpreterImpl { virtual InterpreterSessionImpl* acquireSession() = 0; virtual void setFindModule( diff --git a/multipy/runtime/interpreter/plugin_registry.h b/multipy/runtime/interpreter/plugin_registry.h index 593a5cfc..eda8c3c6 100644 --- a/multipy/runtime/interpreter/plugin_registry.h +++ b/multipy/runtime/interpreter/plugin_registry.h @@ -11,21 +11,36 @@ namespace py = pybind11; namespace multipy { +// A `Converter` is used in order to convert `PyObject`s/`py::object` into +// an `IValue` or some other representation usch as storage. class Converter { public: virtual ~Converter() = default; + // convert a `py::handle` to an `IValue` virtual multipy::optional toTypeInferredIValue( py::handle input) = 0; + + // convert an `IValue` into a `py::object` virtual multipy::optional toPyObject(at::IValue ivalue) = 0; + + // convert an `PyObject` into a `Storage` virtual multipy::optional createStorage(PyObject* obj) = 0; + + // create a `PyObject` from `storage` virtual multipy::optional createPyObject( const at::Storage& storage) = 0; + + // return the `THPDtype` of `scalarType` virtual multipy::optional getTHPDtype( at::ScalarType scalarType) = 0; }; +// register a converter to be used by torch::deploy / multipy. +// The order of the registration of the converters is dictated by the order of compilation. void registerConverter(Converter*); +// deregister a converter from torch::deploy / multipy +// The order of the deregistration of the converters is dictated by the order of compilation. void deregisterConverter(Converter*); at::IValue toTypeInferredIValue(py::handle input); diff --git a/multipy/runtime/mem_file.h b/multipy/runtime/mem_file.h index 404a3fbb..a68ff020 100644 --- a/multipy/runtime/mem_file.h +++ b/multipy/runtime/mem_file.h @@ -51,6 +51,8 @@ struct MemFile { [[nodiscard]] const char* data() const { return (const char*)mem_; } + + // return the file descriptor of the underlying file. int valid() { return fcntl(fd_, F_GETFD) != -1 || errno != EBADF; } @@ -62,6 +64,8 @@ struct MemFile { close(fd_); } } + + // return the size of the underlying file defined by the `MemFile` size_t size() { return n_bytes_; } diff --git a/multipy/runtime/noop_environment.h b/multipy/runtime/noop_environment.h index 4856f502..16f3a173 100644 --- a/multipy/runtime/noop_environment.h +++ b/multipy/runtime/noop_environment.h @@ -11,6 +11,7 @@ namespace torch { namespace deploy { +// An empty Enviroment class NoopEnvironment : public Environment { public: void configureInterpreter(Interpreter* /* interp */) override {} diff --git a/multipy/runtime/path_environment.h b/multipy/runtime/path_environment.h index 268f8dc6..7fef73ff 100644 --- a/multipy/runtime/path_environment.h +++ b/multipy/runtime/path_environment.h @@ -12,6 +12,7 @@ namespace torch { namespace deploy { +// An Enviroment which is defined by a specific path to python code (ie. condas sitepackages) class PathEnvironment : public Environment { public: explicit PathEnvironment(std::string path) : path_(std::move(path)) {} From c564a4999c978eec12b2e601a0ff4dc6eb728f27 Mon Sep 17 00:00:00 2001 From: Sahan Paliskara Date: Fri, 21 Oct 2022 14:47:19 -0700 Subject: [PATCH 2/5] Update on "Add comments for public functions and classes" Adds comments to public functions and public classes such that they show up in documentation in order to guide users. [ghstack-poisoned] --- README.md | 2 +- multipy/runtime/deploy.h | 48 +++++++++++-------- multipy/runtime/elf_file.h | 5 ++ multipy/runtime/embedded_file.h | 3 ++ multipy/runtime/environment.h | 6 +++ .../runtime/interpreter/interpreter_impl.h | 16 +++---- multipy/runtime/noop_environment.h | 2 +- multipy/runtime/path_environment.h | 2 +- 8 files changed, 52 insertions(+), 32 deletions(-) diff --git a/README.md b/README.md index 10355beb..c562b2aa 100644 --- a/README.md +++ b/README.md @@ -163,7 +163,7 @@ cmake --build . --config Release ### Running unit tests for `multipy::runtime` -We first need to generate the neccessary examples. First make sure your python enviroment has [torch](https://pytorch.org). Afterwards, once `multipy::runtime` is built, run the following (executed automatically for `docker` and `pip` above): +We first need to generate the neccessary examples. First make sure your python environment has [torch](https://pytorch.org). Afterwards, once `multipy::runtime` is built, run the following (executed automatically for `docker` and `pip` above): ``` cd multipy/multipy/runtime diff --git a/multipy/runtime/deploy.h b/multipy/runtime/deploy.h index 4fa67edc..08c36bcf 100644 --- a/multipy/runtime/deploy.h +++ b/multipy/runtime/deploy.h @@ -89,14 +89,14 @@ class TORCH_API Interpreter { multipy::optional torchPluginFile_; public: - // Create an Interpreter which is managed by `manager` and using the enviroment `env` + // Creates an Interpreter which is managed by `manager` and using the environment `env` Interpreter(InterpreterManager* manager, std::shared_ptr env); - // Create an Interpreter manager using enviroment `env` which is not tied to an Interpreter Manager. + // Creates an Interpreter manager using environment `env` which is not tied to an Interpreter Manager. explicit Interpreter(std::shared_ptr env) : Interpreter(nullptr, env) {} - // Get a new `InterpreterSession` from this Interpreter. + // Gets a new `InterpreterSession` from this Interpreter. InterpreterSession acquireSession() const { if (manager_) { return InterpreterSession(pImpl_->acquireSession(), manager_); @@ -126,23 +126,23 @@ struct Package; // The default LoadBalancer for torch::deploy which handles allocating and freeing subinterpreters. struct TORCH_API LoadBalancer { - // create a Loadbalancer which handles `n` interpreters. + // Creates a Loadbalancer which handles `n` interpreters. explicit LoadBalancer(size_t n) : uses_(new uint64_t[8 * n]), allocated_(n), n_(n) { // 8*... to avoid false sharing of atomics on the same cache line memset(uses_.get(), 0, 8 * n_ * sizeof(uint64_t)); } - // change the amount of subinterpreters which is handled by the load balancer. + // Changes the amount of subinterpreters which is handled by the load balancer. void setResourceLimit(size_t n) { MULTIPY_INTERNAL_ASSERT(n <= allocated_); n_ = n; } - // allocate an subinterpreter, and return its ID which is used to free it. + // Allocates an subinterpreter, and return its ID which is used to free it. int acquire(); - // free the subinterpreter with ID `where`. This ID is returned by `LoadBalancer::acquire()` + // Frees the subinterpreter with ID `where`. This ID is returned by `LoadBalancer::acquire()` void free(int where); private: @@ -158,13 +158,16 @@ struct TORCH_API LoadBalancer { struct TORCH_API InterpreterManager { // constructor for `InterpreterManager` which takes the number of interpreters - // (usually correlates to number of cores on your cpu), and a pointer to an `Enviroment`. + // (usually correlates to number of cores on your cpu), and a pointer to an `Environment`. + // The defualt uses the local python env. explicit InterpreterManager( size_t nInterp = 2, std::shared_ptr env = std::make_shared()); // get a free InterpreterSession, guarenteed that no other user of acquireOne has the same - // InterpreterSession. It _is_ possible that other users will be using the interpreter. + // InterpreterSession. It _is_ possible that other users will be using the interpreter if there are + // no free InterpreterSessions. Unless you are very careful to only use free interpreters, then do not assume + // that the `Obj`s are isolated from each other. InterpreterSession acquireOne() { int where = resources_.acquire(); InterpreterSession I = instances_[where].acquireSession(); @@ -222,7 +225,6 @@ struct TORCH_API InterpreterManager { std::unordered_map registeredModuleSource_; }; -// Underlying implementation for ReplicatedObj. struct TORCH_API ReplicatedObjImpl { ReplicatedObjImpl( size_t object_id, @@ -242,10 +244,10 @@ struct TORCH_API ReplicatedObjImpl { // by using `InterpreterSession::fromMovable(ReplicatedObj)` struct TORCH_API ReplicatedObj { - // default constructor for `ReplicatedObj` + // Default constructor for `ReplicatedObj` ReplicatedObj() : pImpl_(nullptr) {} - // Create an `InterpreterSession` using `onThisInterpreter`. If `onThisInterpreter` is + // Creates an `InterpreterSession` using `onThisInterpreter`. If `onThisInterpreter` is // a `nullptr', then the associated `InterpreterManager` allocates it. InterpreterSession acquireSession( const Interpreter* onThisInterpreter = nullptr) const; @@ -254,7 +256,8 @@ struct TORCH_API ReplicatedObj { return I.self(args).toIValue(); } - // invokes `callKwargs` using the underlying python object, and returns the output as an `IValue`. + // Calls an `ReplicatedObj` callable, with arguments given by the tuple args and named arguments given by the dictionary + // kwargs. This is done on an arbitrary `InterpreterSession` which belongs to the `ReplicatedObj`'s manager. [[nodiscard]] at::IValue callKwargs( std::vector args, std::unordered_map kwargs) const { @@ -262,21 +265,26 @@ struct TORCH_API ReplicatedObj { return I.self.callKwargs(std::move(args), std::move(kwargs)).toIValue(); } - // invokes `callKwargs` using the underlying python object, and returns the output as an `IValue`. + // Calls an `ReplicatedObj` callable, with named arguments given by the dictionary kwargs. + // This is done on an arbitrary `InterpreterSession` which belongs to the `ReplicatedObj`'s manager. [[nodiscard]] at::IValue callKwargs( std::unordered_map kwargs) const { auto I = acquireSession(); return I.self.callKwargs(std::move(kwargs)).toIValue(); } - // invokes `hasattr` using the underlying python object, and returns the output as an `IValue`. - [[nodiscard]] bool hasattr(const char* name) const { + // Returns true if `ReplicatedObj` has attribute with name `attr` and false otherwise. + // This is done on an arbitrary `InterpreterSession` which belongs to the `ReplicatedObj`'s manager. + [[nodiscard]] bool hasattr(const char* attr) const { auto I = acquireSession(); - return I.self.hasattr(name); + return I.self.hasattr(attr); } - + // Deletes `ReplicatedObj` from onThisInterpreter, if onThisInterpreter is `nullptr`, + // unload is called on all interpreters belonging to the ReplicatedObject's InterpreterManager void unload(const Interpreter* onThisInterpreter = nullptr); + + // Converts `ReplicatedObj` to `Obj` on `InterpreterSession` `I` Obj toObj(InterpreterSession* I); private: @@ -323,7 +331,7 @@ class PythonMethodWrapper : public torch::IMethod { std::string methodName_; }; -// An object to encapsulate a `torch::package` which can act as part (or entire) enviroment for subinterpreters. +// An object to encapsulate a `torch::package` which can act as part (or entire) environment for subinterpreters. struct TORCH_API Package { // shorthand for getting the object as a pickle resource in the package ReplicatedObj loadPickle(const std::string& module, const std::string& file) { @@ -358,7 +366,7 @@ struct TORCH_API Package { } #endif - // allocate an `InterpreterSession` and load the appropriate torch.package with it. + // Allocates an `InterpreterSession` and load the appropriate torch.package with it. InterpreterSession acquireSession() { auto I = manager_->acquireOne(); I.self = diff --git a/multipy/runtime/elf_file.h b/multipy/runtime/elf_file.h index 3e92d69f..033e5d24 100644 --- a/multipy/runtime/elf_file.h +++ b/multipy/runtime/elf_file.h @@ -16,6 +16,7 @@ namespace torch { namespace deploy { +// A representation of a section of an ElfFile. struct Section { Section() {} explicit Section( @@ -42,7 +43,11 @@ struct Section { */ class ElfFile { public: + + // Constructs an Elffile with the corresponding `filename` explicit ElfFile(const char* filename); + + // Finds and return a `Section` with the corresponding `name`. If nothing is found, then a `multipy::nullopt` is returned. multipy::optional
findSection(const char* name) const; private: diff --git a/multipy/runtime/embedded_file.h b/multipy/runtime/embedded_file.h index 21b361b4..ff5f9912 100644 --- a/multipy/runtime/embedded_file.h +++ b/multipy/runtime/embedded_file.h @@ -11,17 +11,20 @@ namespace torch { namespace deploy { +// Represents an ExeSection of an EmbeddedFile. struct ExeSection { const char* sectionName; bool customLoader; }; +// These are symbols used by the subinterpreters. struct InterpreterSymbol { const char* startSym; const char* endSym; bool customLoader; }; +// Represents an EmbeddedFile which is a file which contains a binary for a subinterprerter. struct EmbeddedFile { std::string libraryName{""}; bool customLoader{false}; diff --git a/multipy/runtime/environment.h b/multipy/runtime/environment.h index 3320616b..c2783c80 100644 --- a/multipy/runtime/environment.h +++ b/multipy/runtime/environment.h @@ -42,6 +42,7 @@ class Environment { fclose(zippedFile); return zipArchive; } + void setupZippedPythonModules(const std::string& pythonAppDir) { #ifdef FBCODE_CAFFE2 extraPythonPaths_.push_back(getZippedArchive( @@ -56,14 +57,19 @@ class Environment { } public: + // Environment constructor which creates a random temporary directory as + // a directory for the zipped python modules. explicit Environment() { char tempDirName[] = "/tmp/torch_deploy_zipXXXXXX"; char* tempDirectory = mkdtemp(tempDirName); setupZippedPythonModules(tempDirectory); } + // Environment constructor which takes a file name for the + // directory for the zipped python modules. explicit Environment(const std::string& pythonAppDir) { setupZippedPythonModules(pythonAppDir); } + // Deconstructor for Environment. virtual ~Environment() { auto rmCmd = "rm -rf " + extraPythonLibrariesDir_; (void)system(rmCmd.c_str()); diff --git a/multipy/runtime/interpreter/interpreter_impl.h b/multipy/runtime/interpreter/interpreter_impl.h index abbfd20d..24783a5a 100644 --- a/multipy/runtime/interpreter/interpreter_impl.h +++ b/multipy/runtime/interpreter/interpreter_impl.h @@ -74,26 +74,24 @@ struct Obj { : isDefault_(false), baseObj_(baseObj) {} Obj() : isDefault_(true), baseObj_(nullptr) {} - // convert underlying `py::object` into an `IValue`. + // return `IValue` representation. at::IValue toIValue() const; - // overwrite `()` to use `call` of the underlying `py::object` with `Obj`s as args. - // The output is represented as an `Obj`. + // Call an `Obj` callable, with arguments given by the tuple args. Obj operator()(at::ArrayRef args); - // overwrite `()` to use `call` of the underlying `py::object` with `IValue`s as args. - // The output is represented as an `Obj`. + // Call an `Obj` callable, with arguments given by the tuple args. Obj operator()(at::ArrayRef args); - // calls callKwargs from the underlying `py::object` + // Call an `Obj` callable, with arguments given by the tuple args, and named arguments given by the dictionary kwargs. Obj callKwargs( std::vector args, std::unordered_map kwargs); - // calls callKwargs from the underlying `py::object`. The output is represented as an `Obj`. + // Call an `Obj` callable, with named arguments given by the dictionary kwargs. Obj callKwargs(std::unordered_map kwargs); - // calls hasattr from the underlying `py::object`. The output is represented as an `Obj`. + // Returns true if `Obj` has attribute with name `attr` and false otherwise. bool hasattr(const char* attr); - // calls attr from the underlying `py::object`. The output is represented as an `Obj`. + // Returns attribute `attr` from `Obj`. This is equivalent to calling `getattr(Obj, attr)` in python. Obj attr(const char* attr); private: diff --git a/multipy/runtime/noop_environment.h b/multipy/runtime/noop_environment.h index 16f3a173..2891e29a 100644 --- a/multipy/runtime/noop_environment.h +++ b/multipy/runtime/noop_environment.h @@ -11,7 +11,7 @@ namespace torch { namespace deploy { -// An empty Enviroment +// The local python Environment class NoopEnvironment : public Environment { public: void configureInterpreter(Interpreter* /* interp */) override {} diff --git a/multipy/runtime/path_environment.h b/multipy/runtime/path_environment.h index 7fef73ff..620f6f61 100644 --- a/multipy/runtime/path_environment.h +++ b/multipy/runtime/path_environment.h @@ -12,7 +12,7 @@ namespace torch { namespace deploy { -// An Enviroment which is defined by a specific path to python code (ie. condas sitepackages) +// An Environment which is defined by a specific path to python code (ie. condas sitepackages) class PathEnvironment : public Environment { public: explicit PathEnvironment(std::string path) : path_(std::move(path)) {} From a59a83f8ca8a4e0c2742d56ed032781d44f9b26a Mon Sep 17 00:00:00 2001 From: Sahan Paliskara Date: Fri, 21 Oct 2022 15:10:17 -0700 Subject: [PATCH 3/5] Update on "Add comments for public functions and classes" Adds comments to public functions and public classes such that they show up in documentation in order to guide users. Differential Revision: [D40606131](https://our.internmc.facebook.com/intern/diff/D40606131) [ghstack-poisoned] --- .gitignore | 4 - multipy/runtime/deploy.h | 229 +++++++++--------- multipy/runtime/elf_file.h | 43 ++-- multipy/runtime/embedded_file.h | 18 +- multipy/runtime/environment.h | 29 ++- .../runtime/interpreter/interpreter_impl.h | 119 +++++---- multipy/runtime/interpreter/plugin_registry.h | 33 +-- multipy/runtime/mem_file.h | 46 ++-- multipy/runtime/noop_environment.h | 4 +- multipy/runtime/path_environment.h | 9 +- 10 files changed, 256 insertions(+), 278 deletions(-) diff --git a/.gitignore b/.gitignore index f0494a34..7dd3dc60 100644 --- a/.gitignore +++ b/.gitignore @@ -2,11 +2,7 @@ .git/** **/__pycache__/** **.coverage -.coverage -multipy/runtime/interpreter/cpython -multipy/runtime/interpreter/cpython/** **/build/** **/CMakeFiles/** -multipy/runtime/interpreter/frozen/** multipy/runtime/example/generated/ *.egg-info diff --git a/multipy/runtime/deploy.h b/multipy/runtime/deploy.h index 08c36bcf..4db0da8f 100644 --- a/multipy/runtime/deploy.h +++ b/multipy/runtime/deploy.h @@ -12,10 +12,10 @@ #include #include -#include #include #include #include +#include #include #include #include @@ -30,24 +30,21 @@ struct LoadBalancer; struct TORCH_API InterpreterSession { friend struct LoadBalancer; - explicit InterpreterSession(InterpreterSessionImpl* impl) noexcept + explicit InterpreterSession(InterpreterSessionImpl *impl) noexcept : impl_(impl), manager_(nullptr) {} - InterpreterSession( - InterpreterSessionImpl* impl, - InterpreterManager* manager) noexcept + InterpreterSession(InterpreterSessionImpl *impl, + InterpreterManager *manager) noexcept : impl_(impl), manager_(manager) {} PickledObject pickleObj(Obj obj); - bool isOwner(Obj obj) { - return impl_->isOwner(obj); - } + bool isOwner(Obj obj) { return impl_->isOwner(obj); } // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes) Obj self; // when retrieved from a PythonMovable this will be set. - InterpreterSession(InterpreterSession&&) noexcept = default; + InterpreterSession(InterpreterSession &&) noexcept = default; // NOLINTNEXTLINE(bugprone-exception-escape) ~InterpreterSession(); // global imports a python object from the specified module. - Obj global(const char* module, const char* name) { + Obj global(const char *module, const char *name) { return impl_->global(module, name); } Obj fromIValue(at::IValue ivalue) { @@ -59,40 +56,42 @@ struct TORCH_API InterpreterSession { ReplicatedObj createMovable(Obj obj); // Converts a `ReplicatedObj` to an `Obj` on this InterpreterSession. - Obj fromMovable(const ReplicatedObj& obj); + Obj fromMovable(const ReplicatedObj &obj); - protected: +protected: bool attachDeconstructorCallback(std::function func); - private: +private: friend struct ReplicatedObj; friend struct Package; friend struct InterpreterManager; friend struct ReplicatedObjImpl; inline static size_t nextObjectId_ = 0; std::unique_ptr impl_; - InterpreterManager* manager_; // if created from one + InterpreterManager *manager_; // if created from one std::function deconstruction_callback_ = nullptr; }; -// An `Interpreter` represents an invidual subinterpreter created by `torch::deploy`. -// It allows for the creation of `InterpreterSession` objects which allow users to interact with -// python objects. +// An `Interpreter` represents an invidual subinterpreter created by +// `torch::deploy`. It allows for the creation of `InterpreterSession` objects +// which allow users to interact with python objects. class TORCH_API Interpreter { - private: - void* handle_; +private: + void *handle_; std::unique_ptr pImpl_; - InterpreterManager* manager_; // optional if managed by one + InterpreterManager *manager_; // optional if managed by one std::shared_ptr env_; EmbeddedFile interpreterFile_; multipy::optional torchPluginFile_; - public: - // Creates an Interpreter which is managed by `manager` and using the environment `env` - Interpreter(InterpreterManager* manager, std::shared_ptr env); +public: + // Creates an Interpreter which is managed by `manager` and using the + // environment `env` + Interpreter(InterpreterManager *manager, std::shared_ptr env); - // Creates an Interpreter manager using environment `env` which is not tied to an Interpreter Manager. + // Creates an Interpreter manager using environment `env` which is not tied to + // an Interpreter Manager. explicit Interpreter(std::shared_ptr env) : Interpreter(nullptr, env) {} @@ -106,24 +105,24 @@ class TORCH_API Interpreter { } ~Interpreter(); - Interpreter(Interpreter&& rhs) noexcept - : handle_(rhs.handle_), - pImpl_(std::move(rhs.pImpl_)), + Interpreter(Interpreter &&rhs) noexcept + : handle_(rhs.handle_), pImpl_(std::move(rhs.pImpl_)), manager_(rhs.manager_), interpreterFile_(std::move(rhs.interpreterFile_)), torchPluginFile_(std::move(rhs.torchPluginFile_)) { rhs.handle_ = nullptr; } - Interpreter(const Interpreter&) = delete; - Interpreter& operator=(const Interpreter&) = delete; - Interpreter& operator=(Interpreter&&) = delete; + Interpreter(const Interpreter &) = delete; + Interpreter &operator=(const Interpreter &) = delete; + Interpreter &operator=(Interpreter &&) = delete; friend struct InterpreterManager; }; struct Package; -// The default LoadBalancer for torch::deploy which handles allocating and freeing subinterpreters. +// The default LoadBalancer for torch::deploy which handles allocating and +// freeing subinterpreters. struct TORCH_API LoadBalancer { // Creates a Loadbalancer which handles `n` interpreters. @@ -133,7 +132,8 @@ struct TORCH_API LoadBalancer { memset(uses_.get(), 0, 8 * n_ * sizeof(uint64_t)); } - // Changes the amount of subinterpreters which is handled by the load balancer. + // Changes the amount of subinterpreters which is handled by the load + // balancer. void setResourceLimit(size_t n) { MULTIPY_INTERNAL_ASSERT(n <= allocated_); n_ = n; @@ -142,10 +142,11 @@ struct TORCH_API LoadBalancer { // Allocates an subinterpreter, and return its ID which is used to free it. int acquire(); - // Frees the subinterpreter with ID `where`. This ID is returned by `LoadBalancer::acquire()` + // Frees the subinterpreter with ID `where`. This ID is returned by + // `LoadBalancer::acquire()` void free(int where); - private: +private: // NOLINTNEXTLINE(modernize-avoid-c-arrays,cppcoreguidelines-avoid-c-arrays) std::unique_ptr uses_; // the approximate count of the number of users of interpreter @@ -153,21 +154,22 @@ struct TORCH_API LoadBalancer { size_t n_; }; -// An `InterpreterManager` handles the interaction of multiple subinterpreters such as allocating -// subinterpreters, or load balancing the subinterpreters. +// An `InterpreterManager` handles the interaction of multiple subinterpreters +// such as allocating subinterpreters, or load balancing the subinterpreters. struct TORCH_API InterpreterManager { // constructor for `InterpreterManager` which takes the number of interpreters - // (usually correlates to number of cores on your cpu), and a pointer to an `Environment`. - // The defualt uses the local python env. + // (usually correlates to number of cores on your cpu), and a pointer to an + // `Environment`. The defualt uses the local python env. explicit InterpreterManager( size_t nInterp = 2, std::shared_ptr env = std::make_shared()); - // get a free InterpreterSession, guarenteed that no other user of acquireOne has the same - // InterpreterSession. It _is_ possible that other users will be using the interpreter if there are - // no free InterpreterSessions. Unless you are very careful to only use free interpreters, then do not assume - // that the `Obj`s are isolated from each other. + // get a free InterpreterSession, guarenteed that no other user of acquireOne + // has the same InterpreterSession. It _is_ possible that other users will be + // using the interpreter if there are no free InterpreterSessions. Unless you + // are very careful to only use free interpreters, then do not assume that the + // `Obj`s are isolated from each other. InterpreterSession acquireOne() { int where = resources_.acquire(); InterpreterSession I = instances_[where].acquireSession(); @@ -178,9 +180,7 @@ struct TORCH_API InterpreterManager { // use to make sure something gets run on all interpreters, such as loading or // unloading a model eagerly - at::ArrayRef allInstances() { - return instances_; - } + at::ArrayRef allInstances() { return instances_; } // debugging tool to control the size of the loadBalancer // and change the number of interpreters on the fly @@ -190,11 +190,12 @@ struct TORCH_API InterpreterManager { } // loads a package from a file with name `uri` - Package loadPackage(const std::string& uri); + Package loadPackage(const std::string &uri); - // loads a package from a `PyTorchStreamReader` or any class other which uses `ReadAdapterInterface` - Package loadPackage( - std::shared_ptr reader); + // loads a package from a `PyTorchStreamReader` or any class other which uses + // `ReadAdapterInterface` + Package + loadPackage(std::shared_ptr reader); // convience function for loading some python source code as a module across // all interpreters. this can be used for writing tests of deploy that need to @@ -211,12 +212,12 @@ struct TORCH_API InterpreterManager { } // Converts `obj` from on `InterpreterSession` I into a `ReplicatedObj`. - ReplicatedObj createMovable(Obj obj, InterpreterSession* I); - InterpreterManager(const InterpreterManager&) = delete; - InterpreterManager& operator=(const InterpreterManager&) = delete; - InterpreterManager& operator=(InterpreterManager&&) = delete; + ReplicatedObj createMovable(Obj obj, InterpreterSession *I); + InterpreterManager(const InterpreterManager &) = delete; + InterpreterManager &operator=(const InterpreterManager &) = delete; + InterpreterManager &operator=(InterpreterManager &&) = delete; - private: +private: friend struct Package; friend struct InterpreterSession; friend struct InterpreterSessionImpl; @@ -226,68 +227,73 @@ struct TORCH_API InterpreterManager { }; struct TORCH_API ReplicatedObjImpl { - ReplicatedObjImpl( - size_t object_id, - // NOLINTNEXTLINE(modernize-pass-by-value) - PickledObject data, - InterpreterManager* manager) + ReplicatedObjImpl(size_t object_id, + // NOLINTNEXTLINE(modernize-pass-by-value) + PickledObject data, InterpreterManager *manager) : objectId_(object_id), data_(data), manager_(manager) {} // NOLINTNEXTLINE(bugprone-exception-escape) ~ReplicatedObjImpl(); - void unload(const Interpreter* onThisInterpreter); + void unload(const Interpreter *onThisInterpreter); int64_t objectId_; PickledObject data_; - InterpreterManager* manager_; + InterpreterManager *manager_; }; -// A python object which is Replicated from an `Obj` such that it is able to move around to different `InterpreterSessions` -// by using `InterpreterSession::fromMovable(ReplicatedObj)` +// A python object which is Replicated from an `Obj` such that it is able to +// move around to different `InterpreterSessions` by using +// `InterpreterSession::fromMovable(ReplicatedObj)` struct TORCH_API ReplicatedObj { // Default constructor for `ReplicatedObj` ReplicatedObj() : pImpl_(nullptr) {} - // Creates an `InterpreterSession` using `onThisInterpreter`. If `onThisInterpreter` is - // a `nullptr', then the associated `InterpreterManager` allocates it. - InterpreterSession acquireSession( - const Interpreter* onThisInterpreter = nullptr) const; + // Creates an `InterpreterSession` using `onThisInterpreter`. If + // `onThisInterpreter` is a `nullptr', then the associated + // `InterpreterManager` allocates it. + InterpreterSession + acquireSession(const Interpreter *onThisInterpreter = nullptr) const; at::IValue operator()(at::ArrayRef args) const { auto I = acquireSession(); return I.self(args).toIValue(); } - // Calls an `ReplicatedObj` callable, with arguments given by the tuple args and named arguments given by the dictionary - // kwargs. This is done on an arbitrary `InterpreterSession` which belongs to the `ReplicatedObj`'s manager. - [[nodiscard]] at::IValue callKwargs( - std::vector args, - std::unordered_map kwargs) const { + // Calls an `ReplicatedObj` callable, with arguments given by the tuple args + // and named arguments given by the dictionary kwargs. This is done on an + // arbitrary `InterpreterSession` which belongs to the `ReplicatedObj`'s + // manager. + [[nodiscard]] at::IValue + callKwargs(std::vector args, + std::unordered_map kwargs) const { auto I = acquireSession(); return I.self.callKwargs(std::move(args), std::move(kwargs)).toIValue(); } - // Calls an `ReplicatedObj` callable, with named arguments given by the dictionary kwargs. - // This is done on an arbitrary `InterpreterSession` which belongs to the `ReplicatedObj`'s manager. - [[nodiscard]] at::IValue callKwargs( - std::unordered_map kwargs) const { + // Calls an `ReplicatedObj` callable, with named arguments given by the + // dictionary kwargs. This is done on an arbitrary `InterpreterSession` which + // belongs to the `ReplicatedObj`'s manager. + [[nodiscard]] at::IValue + callKwargs(std::unordered_map kwargs) const { auto I = acquireSession(); return I.self.callKwargs(std::move(kwargs)).toIValue(); } - // Returns true if `ReplicatedObj` has attribute with name `attr` and false otherwise. - // This is done on an arbitrary `InterpreterSession` which belongs to the `ReplicatedObj`'s manager. - [[nodiscard]] bool hasattr(const char* attr) const { + // Returns true if `ReplicatedObj` has attribute with name `attr` and false + // otherwise. This is done on an arbitrary `InterpreterSession` which belongs + // to the `ReplicatedObj`'s manager. + [[nodiscard]] bool hasattr(const char *attr) const { auto I = acquireSession(); return I.self.hasattr(attr); } - // Deletes `ReplicatedObj` from onThisInterpreter, if onThisInterpreter is `nullptr`, - // unload is called on all interpreters belonging to the ReplicatedObject's InterpreterManager - void unload(const Interpreter* onThisInterpreter = nullptr); + // Deletes `ReplicatedObj` from onThisInterpreter, if onThisInterpreter is + // `nullptr`, unload is called on all interpreters belonging to the + // ReplicatedObject's InterpreterManager + void unload(const Interpreter *onThisInterpreter = nullptr); // Converts `ReplicatedObj` to `Obj` on `InterpreterSession` `I` - Obj toObj(InterpreterSession* I); + Obj toObj(InterpreterSession *I); - private: +private: ReplicatedObj(std::shared_ptr pImpl) : pImpl_(std::move(pImpl)) {} std::shared_ptr pImpl_; @@ -300,23 +306,19 @@ struct TORCH_API ReplicatedObj { // ReplicatedObj which represents a python method, and // is therefore callable and has argument names accessible. class PythonMethodWrapper : public torch::IMethod { - public: +public: // TODO(whc) make bound method pickleable, then directly construct from that - PythonMethodWrapper( - torch::deploy::ReplicatedObj model, - std::string methodName) + PythonMethodWrapper(torch::deploy::ReplicatedObj model, + std::string methodName) : model_(std::move(model)), methodName_(std::move(methodName)) {} // return the name of the python method. - const std::string& name() const override { - return methodName_; - } + const std::string &name() const override { return methodName_; } // overrides the `()` operater to call the underlying python method. - c10::IValue operator()( - std::vector args, - const IValueMap& kwargs = IValueMap()) const override { + c10::IValue operator()(std::vector args, + const IValueMap &kwargs = IValueMap()) const override { // TODO(whc) ideally, pickle the method itself as replicatedobj, to skip // this lookup each time auto modelSession = model_.acquireSession(); @@ -324,24 +326,25 @@ class PythonMethodWrapper : public torch::IMethod { return method.callKwargs(args, kwargs).toIValue(); } - private: - void setArgumentNames(std::vector&) const override; +private: + void setArgumentNames(std::vector &) const override; torch::deploy::ReplicatedObj model_; std::string methodName_; }; -// An object to encapsulate a `torch::package` which can act as part (or entire) environment for subinterpreters. +// An object to encapsulate a `torch::package` which can act as part (or entire) +// environment for subinterpreters. struct TORCH_API Package { // shorthand for getting the object as a pickle resource in the package - ReplicatedObj loadPickle(const std::string& module, const std::string& file) { + ReplicatedObj loadPickle(const std::string &module, const std::string &file) { auto I = acquireSession(); auto loaded = I.self.attr("load_pickle")({module, file}); return createMovable(loaded, &I); } #ifdef FBCODE_CAFFE2 - std::string loadText(const std::string& packageName, const std::string& key) { + std::string loadText(const std::string &packageName, const std::string &key) { auto I = acquireSession(); return I.self.attr("load_text")({packageName, key}) .toIValue() @@ -356,9 +359,8 @@ struct TORCH_API Package { // std::string decodedBinary = package->loadBinary("extra_files", // "greeting").toStringRef(); // std::cout << decodedBinary; --> outputs "hello" - std::string loadBinary( - const std::string& packageName, - const std::string& key) { + std::string loadBinary(const std::string &packageName, + const std::string &key) { auto I = acquireSession(); return I.self.attr("load_binary")({packageName, key}) .toIValue() @@ -366,7 +368,8 @@ struct TORCH_API Package { } #endif - // Allocates an `InterpreterSession` and load the appropriate torch.package with it. + // Allocates an `InterpreterSession` and load the appropriate torch.package + // with it. InterpreterSession acquireSession() { auto I = manager_->acquireOne(); I.self = @@ -375,28 +378,26 @@ struct TORCH_API Package { } // Converts an `Obj` from `InterpreterSession` `I` into a `ReplicatedObj`. - ReplicatedObj createMovable(Obj obj, InterpreterSession* I) { + ReplicatedObj createMovable(Obj obj, InterpreterSession *I) { return manager_->createMovable(obj, I); } - private: - Package( - const std::string& uri, - InterpreterManager* - pm) // or really any of the constructors to our zip file format +private: + Package(const std::string &uri, + InterpreterManager + *pm) // or really any of the constructors to our zip file format : manager_(pm), containerFile_( std::make_shared(uri)) {} - Package( - std::shared_ptr reader, - InterpreterManager* - pm) // or really any of the constructors to our zip file format + Package(std::shared_ptr reader, + InterpreterManager + *pm) // or really any of the constructors to our zip file format : manager_(pm), containerFile_( std::make_shared(reader)) {} friend struct ReplicatedObj; friend struct InterpreterManager; - InterpreterManager* manager_; + InterpreterManager *manager_; std::shared_ptr containerFile_; }; diff --git a/multipy/runtime/elf_file.h b/multipy/runtime/elf_file.h index 033e5d24..b15a2389 100644 --- a/multipy/runtime/elf_file.h +++ b/multipy/runtime/elf_file.h @@ -7,10 +7,10 @@ #pragma once #include +#include #include #include #include -#include #include namespace torch { @@ -19,21 +19,16 @@ namespace deploy { // A representation of a section of an ElfFile. struct Section { Section() {} - explicit Section( - std::shared_ptr _memfile, - const char* _name, - const char* _start, - size_t _len = 0) + explicit Section(std::shared_ptr _memfile, const char *_name, + const char *_start, size_t _len = 0) : memfile(_memfile), name(_name), start(_start), len(_len) {} std::shared_ptr memfile; - const char* name{nullptr}; - const char* start{nullptr}; + const char *name{nullptr}; + const char *start{nullptr}; size_t len{0}; - operator bool() const { - return start != nullptr; - } + operator bool() const { return start != nullptr; } }; // TODO: consolidate other ELF file related functions in loader.cpp to this file @@ -42,44 +37,44 @@ struct Section { * This class provie utilities to handle ELF file. Only support 64bit ELF file. */ class ElfFile { - public: - +public: // Constructs an Elffile with the corresponding `filename` - explicit ElfFile(const char* filename); + explicit ElfFile(const char *filename); - // Finds and return a `Section` with the corresponding `name`. If nothing is found, then a `multipy::nullopt` is returned. - multipy::optional
findSection(const char* name) const; + // Finds and return a `Section` with the corresponding `name`. If nothing is + // found, then a `multipy::nullopt` is returned. + multipy::optional
findSection(const char *name) const; - private: - Section toSection(Elf64_Shdr* shdr) { +private: + Section toSection(Elf64_Shdr *shdr) { auto nameOff = shdr->sh_name; auto shOff = shdr->sh_offset; auto len = shdr->sh_size; - const char* name = ""; + const char *name = ""; if (strtabSection_) { MULTIPY_CHECK(nameOff >= 0 && nameOff < strtabSection_.len); name = strtabSection_.start + nameOff; } - const char* start = memFile_->data() + shOff; + const char *start = memFile_->data() + shOff; return Section{memFile_, name, start, len}; } - [[nodiscard]] const char* str(size_t off) const { + [[nodiscard]] const char *str(size_t off) const { MULTIPY_CHECK(off < strtabSection_.len, "String table index out of range"); return strtabSection_.start + off; } void checkFormat() const; std::shared_ptr memFile_; - Elf64_Ehdr* ehdr_; - Elf64_Shdr* shdrList_; + Elf64_Ehdr *ehdr_; + Elf64_Shdr *shdrList_; size_t numSections_; Section strtabSection_; std::vector
sections_; }; -multipy::optional
searchForSection(const char* name); +multipy::optional
searchForSection(const char *name); } // namespace deploy } // namespace torch diff --git a/multipy/runtime/embedded_file.h b/multipy/runtime/embedded_file.h index ff5f9912..6adbfdbc 100644 --- a/multipy/runtime/embedded_file.h +++ b/multipy/runtime/embedded_file.h @@ -13,30 +13,30 @@ namespace deploy { // Represents an ExeSection of an EmbeddedFile. struct ExeSection { - const char* sectionName; + const char *sectionName; bool customLoader; }; // These are symbols used by the subinterpreters. struct InterpreterSymbol { - const char* startSym; - const char* endSym; + const char *startSym; + const char *endSym; bool customLoader; }; -// Represents an EmbeddedFile which is a file which contains a binary for a subinterprerter. +// Represents an EmbeddedFile which is a file which contains a binary for a +// subinterprerter. struct EmbeddedFile { std::string libraryName{""}; bool customLoader{false}; - EmbeddedFile( - std::string name, - const std::initializer_list& sections, - const std::initializer_list symbols); + EmbeddedFile(std::string name, + const std::initializer_list §ions, + const std::initializer_list symbols); ~EmbeddedFile(); - EmbeddedFile& operator=(const EmbeddedFile&) = delete; + EmbeddedFile &operator=(const EmbeddedFile &) = delete; }; } // namespace deploy diff --git a/multipy/runtime/environment.h b/multipy/runtime/environment.h index c2783c80..22047cba 100644 --- a/multipy/runtime/environment.h +++ b/multipy/runtime/environment.h @@ -24,26 +24,25 @@ class Environment { // all zipped python libraries will be written // under this directory std::string extraPythonLibrariesDir_; - std::string getZippedArchive( - const char* zipped_torch_name, - const std::string& pythonAppDir) { + std::string getZippedArchive(const char *zipped_torch_name, + const std::string &pythonAppDir) { // load the zipped torch modules auto zippedTorchSection = searchForSection(zipped_torch_name); - MULTIPY_CHECK( - zippedTorchSection.has_value(), "Missing the zipped torch section"); - const char* zippedTorchStart = zippedTorchSection->start; + MULTIPY_CHECK(zippedTorchSection.has_value(), + "Missing the zipped torch section"); + const char *zippedTorchStart = zippedTorchSection->start; auto zippedTorchSize = zippedTorchSection->len; std::string zipArchive = pythonAppDir; auto zippedFile = fopen(zipArchive.c_str(), "wb"); - MULTIPY_CHECK( - zippedFile != nullptr, "Fail to create file: ", strerror(errno)); + MULTIPY_CHECK(zippedFile != nullptr, + "Fail to create file: ", strerror(errno)); fwrite(zippedTorchStart, 1, zippedTorchSize, zippedFile); fclose(zippedFile); return zipArchive; } - void setupZippedPythonModules(const std::string& pythonAppDir) { + void setupZippedPythonModules(const std::string &pythonAppDir) { #ifdef FBCODE_CAFFE2 extraPythonPaths_.push_back(getZippedArchive( ".torch_python_modules", @@ -56,17 +55,17 @@ class Environment { extraPythonLibrariesDir_ = pythonAppDir; } - public: +public: // Environment constructor which creates a random temporary directory as // a directory for the zipped python modules. explicit Environment() { char tempDirName[] = "/tmp/torch_deploy_zipXXXXXX"; - char* tempDirectory = mkdtemp(tempDirName); + char *tempDirectory = mkdtemp(tempDirName); setupZippedPythonModules(tempDirectory); } - // Environment constructor which takes a file name for the + // Environment constructor which takes a file name for the // directory for the zipped python modules. - explicit Environment(const std::string& pythonAppDir) { + explicit Environment(const std::string &pythonAppDir) { setupZippedPythonModules(pythonAppDir); } // Deconstructor for Environment. @@ -74,8 +73,8 @@ class Environment { auto rmCmd = "rm -rf " + extraPythonLibrariesDir_; (void)system(rmCmd.c_str()); } - virtual void configureInterpreter(Interpreter* interp) = 0; - virtual const std::vector& getExtraPythonPaths() { + virtual void configureInterpreter(Interpreter *interp) = 0; + virtual const std::vector &getExtraPythonPaths() { return extraPythonPaths_; } }; diff --git a/multipy/runtime/interpreter/interpreter_impl.h b/multipy/runtime/interpreter/interpreter_impl.h index 24783a5a..88b3dd89 100644 --- a/multipy/runtime/interpreter/interpreter_impl.h +++ b/multipy/runtime/interpreter/interpreter_impl.h @@ -29,36 +29,37 @@ struct PickledObject { std::shared_ptr containerFile_; }; -// The underlying implementation of `Obj` which holds the underlying `py::object`. +// The underlying implementation of `Obj` which holds the underlying +// `py::object`. struct InterpreterObj { friend struct Obj; friend struct ReplicatedObjImpl; friend struct InterpreterSessionImpl; - protected: - InterpreterSessionImpl* owningSession_; +protected: + InterpreterSessionImpl *owningSession_; - public: +public: InterpreterObj() : owningSession_(nullptr) {} - explicit InterpreterObj(InterpreterSessionImpl* owningSession) + explicit InterpreterObj(InterpreterSessionImpl *owningSession) : owningSession_(owningSession) {} - InterpreterObj(const InterpreterObj& obj) = delete; - InterpreterObj& operator=(const InterpreterObj& obj) = delete; - InterpreterObj(InterpreterObj&& obj) = default; - InterpreterObj& operator=(InterpreterObj&& obj) = default; + InterpreterObj(const InterpreterObj &obj) = delete; + InterpreterObj &operator=(const InterpreterObj &obj) = delete; + InterpreterObj(InterpreterObj &&obj) = default; + InterpreterObj &operator=(InterpreterObj &&obj) = default; virtual ~InterpreterObj() = default; - private: +private: virtual at::IValue toIValue() const = 0; virtual Obj call(at::ArrayRef> args) = 0; virtual Obj call(at::ArrayRef args) = 0; - virtual Obj callKwargs( - std::vector args, - std::unordered_map kwargs) = 0; - virtual Obj callKwargs( - std::unordered_map kwargs) = 0; - virtual bool hasattr(const char* attr) = 0; - virtual Obj attr(const char* attr) = 0; + virtual Obj + callKwargs(std::vector args, + std::unordered_map kwargs) = 0; + virtual Obj + callKwargs(std::unordered_map kwargs) = 0; + virtual bool hasattr(const char *attr) = 0; + virtual Obj attr(const char *attr) = 0; }; // this is a wrapper class that refers to a PyObject* instance in a particular @@ -83,18 +84,20 @@ struct Obj { // Call an `Obj` callable, with arguments given by the tuple args. Obj operator()(at::ArrayRef args); - // Call an `Obj` callable, with arguments given by the tuple args, and named arguments given by the dictionary kwargs. - Obj callKwargs( - std::vector args, - std::unordered_map kwargs); - // Call an `Obj` callable, with named arguments given by the dictionary kwargs. + // Call an `Obj` callable, with arguments given by the tuple args, and named + // arguments given by the dictionary kwargs. + Obj callKwargs(std::vector args, + std::unordered_map kwargs); + // Call an `Obj` callable, with named arguments given by the dictionary + // kwargs. Obj callKwargs(std::unordered_map kwargs); // Returns true if `Obj` has attribute with name `attr` and false otherwise. - bool hasattr(const char* attr); - // Returns attribute `attr` from `Obj`. This is equivalent to calling `getattr(Obj, attr)` in python. - Obj attr(const char* attr); + bool hasattr(const char *attr); + // Returns attribute `attr` from `Obj`. This is equivalent to calling + // `getattr(Obj, attr)` in python. + Obj attr(const char *attr); - private: +private: bool isDefault_; std::shared_ptr baseObj_; }; @@ -109,47 +112,41 @@ struct InterpreterSessionImpl { virtual ~InterpreterSessionImpl() = default; - private: - virtual Obj global(const char* module, const char* name) = 0; +private: + virtual Obj global(const char *module, const char *name) = 0; virtual Obj fromIValue(at::IValue value) = 0; virtual Obj createOrGetPackageImporterFromContainerFile( - const std::shared_ptr& - containerFile_) = 0; + const std::shared_ptr + &containerFile_) = 0; virtual PickledObject pickle(Obj container, Obj obj) = 0; - virtual Obj unpickleOrGet(int64_t id, const PickledObject& obj) = 0; + virtual Obj unpickleOrGet(int64_t id, const PickledObject &obj) = 0; virtual void unload(int64_t id) = 0; virtual at::IValue toIValue(Obj obj) const = 0; virtual Obj call(Obj obj, at::ArrayRef args) = 0; virtual Obj call(Obj obj, at::ArrayRef args) = 0; - virtual Obj callKwargs( - Obj obj, - std::vector args, - std::unordered_map kwargs) = 0; - virtual Obj callKwargs( - Obj obj, - std::unordered_map kwargs) = 0; - virtual Obj attr(Obj obj, const char* attr) = 0; - virtual bool hasattr(Obj obj, const char* attr) = 0; - - protected: - int64_t isDefault(Obj obj) const { - return obj.isDefault_; - } + virtual Obj + callKwargs(Obj obj, std::vector args, + std::unordered_map kwargs) = 0; + virtual Obj + callKwargs(Obj obj, std::unordered_map kwargs) = 0; + virtual Obj attr(Obj obj, const char *attr) = 0; + virtual bool hasattr(Obj obj, const char *attr) = 0; + +protected: + int64_t isDefault(Obj obj) const { return obj.isDefault_; } std::shared_ptr getBaseObj(Obj obj) const { return obj.baseObj_; } - bool isOwner(Obj obj) const { - return this == obj.baseObj_->owningSession_; - } + bool isOwner(Obj obj) const { return this == obj.baseObj_->owningSession_; } }; // The underlying implementation of `Interpreter` struct InterpreterImpl { - virtual InterpreterSessionImpl* acquireSession() = 0; + virtual InterpreterSessionImpl *acquireSession() = 0; virtual void setFindModule( - std::function(const std::string&)> + std::function(const std::string &)> find_module) = 0; virtual ~InterpreterImpl() = default; // this will uninitialize python }; @@ -157,9 +154,7 @@ struct InterpreterImpl { // inline definitions for Objs are necessary to avoid introducing a // source file that would need to exist it both the libinterpreter.so and then // the libtorchpy library. -inline at::IValue Obj::toIValue() const { - return baseObj_->toIValue(); -} +inline at::IValue Obj::toIValue() const { return baseObj_->toIValue(); } inline Obj Obj::operator()(at::ArrayRef args) { std::vector> copy; @@ -173,22 +168,18 @@ inline Obj Obj::operator()(at::ArrayRef args) { return baseObj_->call(args); } -inline Obj Obj::callKwargs( - std::vector args, - std::unordered_map kwargs) { +inline Obj +Obj::callKwargs(std::vector args, + std::unordered_map kwargs) { return baseObj_->callKwargs(std::move(args), std::move(kwargs)); } -inline Obj Obj::callKwargs( - std::unordered_map kwargs) { +inline Obj +Obj::callKwargs(std::unordered_map kwargs) { return baseObj_->callKwargs(std::move(kwargs)); } -inline bool Obj::hasattr(const char* attr) { - return baseObj_->hasattr(attr); -} +inline bool Obj::hasattr(const char *attr) { return baseObj_->hasattr(attr); } -inline Obj Obj::attr(const char* attr) { - return baseObj_->attr(attr); -} +inline Obj Obj::attr(const char *attr) { return baseObj_->attr(attr); } } // namespace deploy } // namespace torch diff --git a/multipy/runtime/interpreter/plugin_registry.h b/multipy/runtime/interpreter/plugin_registry.h index eda8c3c6..1efd1034 100644 --- a/multipy/runtime/interpreter/plugin_registry.h +++ b/multipy/runtime/interpreter/plugin_registry.h @@ -1,6 +1,7 @@ #pragma once #include +#include #include #include #include @@ -14,38 +15,40 @@ namespace multipy { // A `Converter` is used in order to convert `PyObject`s/`py::object` into // an `IValue` or some other representation usch as storage. class Converter { - public: +public: virtual ~Converter() = default; // convert a `py::handle` to an `IValue` - virtual multipy::optional toTypeInferredIValue( - py::handle input) = 0; + virtual multipy::optional + toTypeInferredIValue(py::handle input) = 0; // convert an `IValue` into a `py::object` virtual multipy::optional toPyObject(at::IValue ivalue) = 0; // convert an `PyObject` into a `Storage` - virtual multipy::optional createStorage(PyObject* obj) = 0; + virtual multipy::optional createStorage(PyObject *obj) = 0; // create a `PyObject` from `storage` - virtual multipy::optional createPyObject( - const at::Storage& storage) = 0; + virtual multipy::optional + createPyObject(const at::Storage &storage) = 0; // return the `THPDtype` of `scalarType` - virtual multipy::optional getTHPDtype( - at::ScalarType scalarType) = 0; + virtual multipy::optional + getTHPDtype(at::ScalarType scalarType) = 0; }; // register a converter to be used by torch::deploy / multipy. -// The order of the registration of the converters is dictated by the order of compilation. -void registerConverter(Converter*); +// The order of the registration of the converters is dictated by the order of +// compilation. +void registerConverter(Converter *); // deregister a converter from torch::deploy / multipy -// The order of the deregistration of the converters is dictated by the order of compilation. -void deregisterConverter(Converter*); +// The order of the deregistration of the converters is dictated by the order of +// compilation. +void deregisterConverter(Converter *); at::IValue toTypeInferredIValue(py::handle input); py::object toPyObject(at::IValue ivalue); -at::Storage createStorage(PyObject* obj); -PyObject* createPyObject(const at::Storage& storage); -THPDtype* getTHPDtype(at::ScalarType scalarType); +at::Storage createStorage(PyObject *obj); +PyObject *createPyObject(const at::Storage &storage); +THPDtype *getTHPDtype(at::ScalarType scalarType); } // namespace multipy diff --git a/multipy/runtime/mem_file.h b/multipy/runtime/mem_file.h index a68ff020..32f88698 100644 --- a/multipy/runtime/mem_file.h +++ b/multipy/runtime/mem_file.h @@ -6,15 +6,15 @@ #pragma once +#include +#include #include +#include #include #include #include #include #include -#include -#include -#include namespace torch { namespace deploy { @@ -26,39 +26,35 @@ namespace deploy { // // 2. Used in unity to load the elf file. struct MemFile { - explicit MemFile(const char* filename_) + explicit MemFile(const char *filename_) : fd_(0), mem_(nullptr), n_bytes_(0), name_(filename_) { fd_ = open(filename_, O_RDONLY); - MULTIPY_CHECK( - fd_ != -1, "failed to open {}: {}" + filename_ + strerror(errno)); + MULTIPY_CHECK(fd_ != -1, + "failed to open {}: {}" + filename_ + strerror(errno)); // NOLINTNEXTLINE struct stat s; if (-1 == fstat(fd_, &s)) { close(fd_); // destructors don't run during exceptions - MULTIPY_CHECK( - false, "failed to stat {}: {}" + filename_ + strerror(errno)); + MULTIPY_CHECK(false, + "failed to stat {}: {}" + filename_ + strerror(errno)); } n_bytes_ = s.st_size; mem_ = mmap(nullptr, n_bytes_, PROT_READ, MAP_SHARED, fd_, 0); if (MAP_FAILED == mem_) { close(fd_); - MULTIPY_CHECK( - false, "failed to mmap {}: {}" + filename_ + strerror(errno)); + MULTIPY_CHECK(false, + "failed to mmap {}: {}" + filename_ + strerror(errno)); } } - MemFile(const MemFile&) = delete; - MemFile& operator=(const MemFile&) = delete; - [[nodiscard]] const char* data() const { - return (const char*)mem_; - } + MemFile(const MemFile &) = delete; + MemFile &operator=(const MemFile &) = delete; + [[nodiscard]] const char *data() const { return (const char *)mem_; } // return the file descriptor of the underlying file. - int valid() { - return fcntl(fd_, F_GETFD) != -1 || errno != EBADF; - } + int valid() { return fcntl(fd_, F_GETFD) != -1 || errno != EBADF; } ~MemFile() { if (mem_) { - munmap((void*)mem_, n_bytes_); + munmap((void *)mem_, n_bytes_); } if (fd_) { close(fd_); @@ -66,16 +62,12 @@ struct MemFile { } // return the size of the underlying file defined by the `MemFile` - size_t size() { - return n_bytes_; - } - [[nodiscard]] int fd() const { - return fd_; - } + size_t size() { return n_bytes_; } + [[nodiscard]] int fd() const { return fd_; } - private: +private: int fd_; - void* mem_; + void *mem_; size_t n_bytes_; std::string name_; }; diff --git a/multipy/runtime/noop_environment.h b/multipy/runtime/noop_environment.h index 2891e29a..d9a3aa2c 100644 --- a/multipy/runtime/noop_environment.h +++ b/multipy/runtime/noop_environment.h @@ -13,8 +13,8 @@ namespace deploy { // The local python Environment class NoopEnvironment : public Environment { - public: - void configureInterpreter(Interpreter* /* interp */) override {} +public: + void configureInterpreter(Interpreter * /* interp */) override {} }; } // namespace deploy diff --git a/multipy/runtime/path_environment.h b/multipy/runtime/path_environment.h index 620f6f61..e2e0aa99 100644 --- a/multipy/runtime/path_environment.h +++ b/multipy/runtime/path_environment.h @@ -12,13 +12,14 @@ namespace torch { namespace deploy { -// An Environment which is defined by a specific path to python code (ie. condas sitepackages) +// An Environment which is defined by a specific path to python code (ie. condas +// sitepackages) class PathEnvironment : public Environment { - public: +public: explicit PathEnvironment(std::string path) : path_(std::move(path)) {} - void configureInterpreter(Interpreter* interp) override; + void configureInterpreter(Interpreter *interp) override; - private: +private: std::string path_; }; From 04f74f763599dfec9f974ed7d3cecf23dd275fe6 Mon Sep 17 00:00:00 2001 From: Sahan Paliskara Date: Fri, 21 Oct 2022 16:15:43 -0700 Subject: [PATCH 4/5] Update on "Add comments for public functions and classes" Adds comments to public functions and public classes such that they show up in documentation in order to guide users. Differential Revision: [D40606131](https://our.internmc.facebook.com/intern/diff/D40606131) [ghstack-poisoned] --- multipy/runtime/deploy.h | 156 ++++++++++-------- multipy/runtime/elf_file.h | 39 +++-- multipy/runtime/embedded_file.h | 15 +- multipy/runtime/environment.h | 27 +-- .../runtime/interpreter/interpreter_impl.h | 107 ++++++------ multipy/runtime/interpreter/plugin_registry.h | 28 ++-- multipy/runtime/mem_file.h | 46 +++--- multipy/runtime/noop_environment.h | 4 +- multipy/runtime/path_environment.h | 6 +- 9 files changed, 234 insertions(+), 194 deletions(-) diff --git a/multipy/runtime/deploy.h b/multipy/runtime/deploy.h index 4db0da8f..1c2e6f9a 100644 --- a/multipy/runtime/deploy.h +++ b/multipy/runtime/deploy.h @@ -12,10 +12,10 @@ #include #include +#include #include #include #include -#include #include #include #include @@ -30,21 +30,24 @@ struct LoadBalancer; struct TORCH_API InterpreterSession { friend struct LoadBalancer; - explicit InterpreterSession(InterpreterSessionImpl *impl) noexcept + explicit InterpreterSession(InterpreterSessionImpl* impl) noexcept : impl_(impl), manager_(nullptr) {} - InterpreterSession(InterpreterSessionImpl *impl, - InterpreterManager *manager) noexcept + InterpreterSession( + InterpreterSessionImpl* impl, + InterpreterManager* manager) noexcept : impl_(impl), manager_(manager) {} PickledObject pickleObj(Obj obj); - bool isOwner(Obj obj) { return impl_->isOwner(obj); } + bool isOwner(Obj obj) { + return impl_->isOwner(obj); + } // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes) Obj self; // when retrieved from a PythonMovable this will be set. - InterpreterSession(InterpreterSession &&) noexcept = default; + InterpreterSession(InterpreterSession&&) noexcept = default; // NOLINTNEXTLINE(bugprone-exception-escape) ~InterpreterSession(); // global imports a python object from the specified module. - Obj global(const char *module, const char *name) { + Obj global(const char* module, const char* name) { return impl_->global(module, name); } Obj fromIValue(at::IValue ivalue) { @@ -56,19 +59,19 @@ struct TORCH_API InterpreterSession { ReplicatedObj createMovable(Obj obj); // Converts a `ReplicatedObj` to an `Obj` on this InterpreterSession. - Obj fromMovable(const ReplicatedObj &obj); + Obj fromMovable(const ReplicatedObj& obj); -protected: + protected: bool attachDeconstructorCallback(std::function func); -private: + private: friend struct ReplicatedObj; friend struct Package; friend struct InterpreterManager; friend struct ReplicatedObjImpl; inline static size_t nextObjectId_ = 0; std::unique_ptr impl_; - InterpreterManager *manager_; // if created from one + InterpreterManager* manager_; // if created from one std::function deconstruction_callback_ = nullptr; }; @@ -76,19 +79,19 @@ struct TORCH_API InterpreterSession { // `torch::deploy`. It allows for the creation of `InterpreterSession` objects // which allow users to interact with python objects. class TORCH_API Interpreter { -private: - void *handle_; + private: + void* handle_; std::unique_ptr pImpl_; - InterpreterManager *manager_; // optional if managed by one + InterpreterManager* manager_; // optional if managed by one std::shared_ptr env_; EmbeddedFile interpreterFile_; multipy::optional torchPluginFile_; -public: + public: // Creates an Interpreter which is managed by `manager` and using the // environment `env` - Interpreter(InterpreterManager *manager, std::shared_ptr env); + Interpreter(InterpreterManager* manager, std::shared_ptr env); // Creates an Interpreter manager using environment `env` which is not tied to // an Interpreter Manager. @@ -105,17 +108,18 @@ class TORCH_API Interpreter { } ~Interpreter(); - Interpreter(Interpreter &&rhs) noexcept - : handle_(rhs.handle_), pImpl_(std::move(rhs.pImpl_)), + Interpreter(Interpreter&& rhs) noexcept + : handle_(rhs.handle_), + pImpl_(std::move(rhs.pImpl_)), manager_(rhs.manager_), interpreterFile_(std::move(rhs.interpreterFile_)), torchPluginFile_(std::move(rhs.torchPluginFile_)) { rhs.handle_ = nullptr; } - Interpreter(const Interpreter &) = delete; - Interpreter &operator=(const Interpreter &) = delete; - Interpreter &operator=(Interpreter &&) = delete; + Interpreter(const Interpreter&) = delete; + Interpreter& operator=(const Interpreter&) = delete; + Interpreter& operator=(Interpreter&&) = delete; friend struct InterpreterManager; }; @@ -124,7 +128,6 @@ struct Package; // The default LoadBalancer for torch::deploy which handles allocating and // freeing subinterpreters. struct TORCH_API LoadBalancer { - // Creates a Loadbalancer which handles `n` interpreters. explicit LoadBalancer(size_t n) : uses_(new uint64_t[8 * n]), allocated_(n), n_(n) { @@ -146,7 +149,7 @@ struct TORCH_API LoadBalancer { // `LoadBalancer::acquire()` void free(int where); -private: + private: // NOLINTNEXTLINE(modernize-avoid-c-arrays,cppcoreguidelines-avoid-c-arrays) std::unique_ptr uses_; // the approximate count of the number of users of interpreter @@ -157,10 +160,9 @@ struct TORCH_API LoadBalancer { // An `InterpreterManager` handles the interaction of multiple subinterpreters // such as allocating subinterpreters, or load balancing the subinterpreters. struct TORCH_API InterpreterManager { - // constructor for `InterpreterManager` which takes the number of interpreters // (usually correlates to number of cores on your cpu), and a pointer to an - // `Environment`. The defualt uses the local python env. + // `Environment`. The default uses the local python env. explicit InterpreterManager( size_t nInterp = 2, std::shared_ptr env = std::make_shared()); @@ -180,7 +182,9 @@ struct TORCH_API InterpreterManager { // use to make sure something gets run on all interpreters, such as loading or // unloading a model eagerly - at::ArrayRef allInstances() { return instances_; } + at::ArrayRef allInstances() { + return instances_; + } // debugging tool to control the size of the loadBalancer // and change the number of interpreters on the fly @@ -190,12 +194,12 @@ struct TORCH_API InterpreterManager { } // loads a package from a file with name `uri` - Package loadPackage(const std::string &uri); + Package loadPackage(const std::string& uri); // loads a package from a `PyTorchStreamReader` or any class other which uses // `ReadAdapterInterface` - Package - loadPackage(std::shared_ptr reader); + Package loadPackage( + std::shared_ptr reader); // convience function for loading some python source code as a module across // all interpreters. this can be used for writing tests of deploy that need to @@ -212,12 +216,12 @@ struct TORCH_API InterpreterManager { } // Converts `obj` from on `InterpreterSession` I into a `ReplicatedObj`. - ReplicatedObj createMovable(Obj obj, InterpreterSession *I); - InterpreterManager(const InterpreterManager &) = delete; - InterpreterManager &operator=(const InterpreterManager &) = delete; - InterpreterManager &operator=(InterpreterManager &&) = delete; + ReplicatedObj createMovable(Obj obj, InterpreterSession* I); + InterpreterManager(const InterpreterManager&) = delete; + InterpreterManager& operator=(const InterpreterManager&) = delete; + InterpreterManager& operator=(InterpreterManager&&) = delete; -private: + private: friend struct Package; friend struct InterpreterSession; friend struct InterpreterSessionImpl; @@ -227,31 +231,32 @@ struct TORCH_API InterpreterManager { }; struct TORCH_API ReplicatedObjImpl { - ReplicatedObjImpl(size_t object_id, - // NOLINTNEXTLINE(modernize-pass-by-value) - PickledObject data, InterpreterManager *manager) + ReplicatedObjImpl( + size_t object_id, + // NOLINTNEXTLINE(modernize-pass-by-value) + PickledObject data, + InterpreterManager* manager) : objectId_(object_id), data_(data), manager_(manager) {} // NOLINTNEXTLINE(bugprone-exception-escape) ~ReplicatedObjImpl(); - void unload(const Interpreter *onThisInterpreter); + void unload(const Interpreter* onThisInterpreter); int64_t objectId_; PickledObject data_; - InterpreterManager *manager_; + InterpreterManager* manager_; }; // A python object which is Replicated from an `Obj` such that it is able to // move around to different `InterpreterSessions` by using // `InterpreterSession::fromMovable(ReplicatedObj)` struct TORCH_API ReplicatedObj { - // Default constructor for `ReplicatedObj` ReplicatedObj() : pImpl_(nullptr) {} // Creates an `InterpreterSession` using `onThisInterpreter`. If // `onThisInterpreter` is a `nullptr', then the associated // `InterpreterManager` allocates it. - InterpreterSession - acquireSession(const Interpreter *onThisInterpreter = nullptr) const; + InterpreterSession acquireSession( + const Interpreter* onThisInterpreter = nullptr) const; at::IValue operator()(at::ArrayRef args) const { auto I = acquireSession(); return I.self(args).toIValue(); @@ -261,9 +266,9 @@ struct TORCH_API ReplicatedObj { // and named arguments given by the dictionary kwargs. This is done on an // arbitrary `InterpreterSession` which belongs to the `ReplicatedObj`'s // manager. - [[nodiscard]] at::IValue - callKwargs(std::vector args, - std::unordered_map kwargs) const { + [[nodiscard]] at::IValue callKwargs( + std::vector args, + std::unordered_map kwargs) const { auto I = acquireSession(); return I.self.callKwargs(std::move(args), std::move(kwargs)).toIValue(); } @@ -271,8 +276,8 @@ struct TORCH_API ReplicatedObj { // Calls an `ReplicatedObj` callable, with named arguments given by the // dictionary kwargs. This is done on an arbitrary `InterpreterSession` which // belongs to the `ReplicatedObj`'s manager. - [[nodiscard]] at::IValue - callKwargs(std::unordered_map kwargs) const { + [[nodiscard]] at::IValue callKwargs( + std::unordered_map kwargs) const { auto I = acquireSession(); return I.self.callKwargs(std::move(kwargs)).toIValue(); } @@ -280,7 +285,7 @@ struct TORCH_API ReplicatedObj { // Returns true if `ReplicatedObj` has attribute with name `attr` and false // otherwise. This is done on an arbitrary `InterpreterSession` which belongs // to the `ReplicatedObj`'s manager. - [[nodiscard]] bool hasattr(const char *attr) const { + [[nodiscard]] bool hasattr(const char* attr) const { auto I = acquireSession(); return I.self.hasattr(attr); } @@ -288,12 +293,12 @@ struct TORCH_API ReplicatedObj { // Deletes `ReplicatedObj` from onThisInterpreter, if onThisInterpreter is // `nullptr`, unload is called on all interpreters belonging to the // ReplicatedObject's InterpreterManager - void unload(const Interpreter *onThisInterpreter = nullptr); + void unload(const Interpreter* onThisInterpreter = nullptr); // Converts `ReplicatedObj` to `Obj` on `InterpreterSession` `I` - Obj toObj(InterpreterSession *I); + Obj toObj(InterpreterSession* I); -private: + private: ReplicatedObj(std::shared_ptr pImpl) : pImpl_(std::move(pImpl)) {} std::shared_ptr pImpl_; @@ -306,19 +311,23 @@ struct TORCH_API ReplicatedObj { // ReplicatedObj which represents a python method, and // is therefore callable and has argument names accessible. class PythonMethodWrapper : public torch::IMethod { -public: + public: // TODO(whc) make bound method pickleable, then directly construct from that - PythonMethodWrapper(torch::deploy::ReplicatedObj model, - std::string methodName) + PythonMethodWrapper( + torch::deploy::ReplicatedObj model, + std::string methodName) : model_(std::move(model)), methodName_(std::move(methodName)) {} // return the name of the python method. - const std::string &name() const override { return methodName_; } + const std::string& name() const override { + return methodName_; + } // overrides the `()` operater to call the underlying python method. - c10::IValue operator()(std::vector args, - const IValueMap &kwargs = IValueMap()) const override { + c10::IValue operator()( + std::vector args, + const IValueMap& kwargs = IValueMap()) const override { // TODO(whc) ideally, pickle the method itself as replicatedobj, to skip // this lookup each time auto modelSession = model_.acquireSession(); @@ -326,8 +335,8 @@ class PythonMethodWrapper : public torch::IMethod { return method.callKwargs(args, kwargs).toIValue(); } -private: - void setArgumentNames(std::vector &) const override; + private: + void setArgumentNames(std::vector&) const override; torch::deploy::ReplicatedObj model_; std::string methodName_; @@ -337,14 +346,14 @@ class PythonMethodWrapper : public torch::IMethod { // environment for subinterpreters. struct TORCH_API Package { // shorthand for getting the object as a pickle resource in the package - ReplicatedObj loadPickle(const std::string &module, const std::string &file) { + ReplicatedObj loadPickle(const std::string& module, const std::string& file) { auto I = acquireSession(); auto loaded = I.self.attr("load_pickle")({module, file}); return createMovable(loaded, &I); } #ifdef FBCODE_CAFFE2 - std::string loadText(const std::string &packageName, const std::string &key) { + std::string loadText(const std::string& packageName, const std::string& key) { auto I = acquireSession(); return I.self.attr("load_text")({packageName, key}) .toIValue() @@ -359,8 +368,9 @@ struct TORCH_API Package { // std::string decodedBinary = package->loadBinary("extra_files", // "greeting").toStringRef(); // std::cout << decodedBinary; --> outputs "hello" - std::string loadBinary(const std::string &packageName, - const std::string &key) { + std::string loadBinary( + const std::string& packageName, + const std::string& key) { auto I = acquireSession(); return I.self.attr("load_binary")({packageName, key}) .toIValue() @@ -378,26 +388,28 @@ struct TORCH_API Package { } // Converts an `Obj` from `InterpreterSession` `I` into a `ReplicatedObj`. - ReplicatedObj createMovable(Obj obj, InterpreterSession *I) { + ReplicatedObj createMovable(Obj obj, InterpreterSession* I) { return manager_->createMovable(obj, I); } -private: - Package(const std::string &uri, - InterpreterManager - *pm) // or really any of the constructors to our zip file format + private: + Package( + const std::string& uri, + InterpreterManager* + pm) // or really any of the constructors to our zip file format : manager_(pm), containerFile_( std::make_shared(uri)) {} - Package(std::shared_ptr reader, - InterpreterManager - *pm) // or really any of the constructors to our zip file format + Package( + std::shared_ptr reader, + InterpreterManager* + pm) // or really any of the constructors to our zip file format : manager_(pm), containerFile_( std::make_shared(reader)) {} friend struct ReplicatedObj; friend struct InterpreterManager; - InterpreterManager *manager_; + InterpreterManager* manager_; std::shared_ptr containerFile_; }; diff --git a/multipy/runtime/elf_file.h b/multipy/runtime/elf_file.h index b15a2389..46fe7d9d 100644 --- a/multipy/runtime/elf_file.h +++ b/multipy/runtime/elf_file.h @@ -7,10 +7,10 @@ #pragma once #include -#include #include #include #include +#include #include namespace torch { @@ -19,16 +19,21 @@ namespace deploy { // A representation of a section of an ElfFile. struct Section { Section() {} - explicit Section(std::shared_ptr _memfile, const char *_name, - const char *_start, size_t _len = 0) + explicit Section( + std::shared_ptr _memfile, + const char* _name, + const char* _start, + size_t _len = 0) : memfile(_memfile), name(_name), start(_start), len(_len) {} std::shared_ptr memfile; - const char *name{nullptr}; - const char *start{nullptr}; + const char* name{nullptr}; + const char* start{nullptr}; size_t len{0}; - operator bool() const { return start != nullptr; } + operator bool() const { + return start != nullptr; + } }; // TODO: consolidate other ELF file related functions in loader.cpp to this file @@ -37,44 +42,44 @@ struct Section { * This class provie utilities to handle ELF file. Only support 64bit ELF file. */ class ElfFile { -public: + public: // Constructs an Elffile with the corresponding `filename` - explicit ElfFile(const char *filename); + explicit ElfFile(const char* filename); // Finds and return a `Section` with the corresponding `name`. If nothing is // found, then a `multipy::nullopt` is returned. - multipy::optional
findSection(const char *name) const; + multipy::optional
findSection(const char* name) const; -private: - Section toSection(Elf64_Shdr *shdr) { + private: + Section toSection(Elf64_Shdr* shdr) { auto nameOff = shdr->sh_name; auto shOff = shdr->sh_offset; auto len = shdr->sh_size; - const char *name = ""; + const char* name = ""; if (strtabSection_) { MULTIPY_CHECK(nameOff >= 0 && nameOff < strtabSection_.len); name = strtabSection_.start + nameOff; } - const char *start = memFile_->data() + shOff; + const char* start = memFile_->data() + shOff; return Section{memFile_, name, start, len}; } - [[nodiscard]] const char *str(size_t off) const { + [[nodiscard]] const char* str(size_t off) const { MULTIPY_CHECK(off < strtabSection_.len, "String table index out of range"); return strtabSection_.start + off; } void checkFormat() const; std::shared_ptr memFile_; - Elf64_Ehdr *ehdr_; - Elf64_Shdr *shdrList_; + Elf64_Ehdr* ehdr_; + Elf64_Shdr* shdrList_; size_t numSections_; Section strtabSection_; std::vector
sections_; }; -multipy::optional
searchForSection(const char *name); +multipy::optional
searchForSection(const char* name); } // namespace deploy } // namespace torch diff --git a/multipy/runtime/embedded_file.h b/multipy/runtime/embedded_file.h index 6adbfdbc..45bc286e 100644 --- a/multipy/runtime/embedded_file.h +++ b/multipy/runtime/embedded_file.h @@ -13,14 +13,14 @@ namespace deploy { // Represents an ExeSection of an EmbeddedFile. struct ExeSection { - const char *sectionName; + const char* sectionName; bool customLoader; }; // These are symbols used by the subinterpreters. struct InterpreterSymbol { - const char *startSym; - const char *endSym; + const char* startSym; + const char* endSym; bool customLoader; }; @@ -30,13 +30,14 @@ struct EmbeddedFile { std::string libraryName{""}; bool customLoader{false}; - EmbeddedFile(std::string name, - const std::initializer_list §ions, - const std::initializer_list symbols); + EmbeddedFile( + std::string name, + const std::initializer_list& sections, + const std::initializer_list symbols); ~EmbeddedFile(); - EmbeddedFile &operator=(const EmbeddedFile &) = delete; + EmbeddedFile& operator=(const EmbeddedFile&) = delete; }; } // namespace deploy diff --git a/multipy/runtime/environment.h b/multipy/runtime/environment.h index 22047cba..4b3906ed 100644 --- a/multipy/runtime/environment.h +++ b/multipy/runtime/environment.h @@ -24,25 +24,26 @@ class Environment { // all zipped python libraries will be written // under this directory std::string extraPythonLibrariesDir_; - std::string getZippedArchive(const char *zipped_torch_name, - const std::string &pythonAppDir) { + std::string getZippedArchive( + const char* zipped_torch_name, + const std::string& pythonAppDir) { // load the zipped torch modules auto zippedTorchSection = searchForSection(zipped_torch_name); - MULTIPY_CHECK(zippedTorchSection.has_value(), - "Missing the zipped torch section"); - const char *zippedTorchStart = zippedTorchSection->start; + MULTIPY_CHECK( + zippedTorchSection.has_value(), "Missing the zipped torch section"); + const char* zippedTorchStart = zippedTorchSection->start; auto zippedTorchSize = zippedTorchSection->len; std::string zipArchive = pythonAppDir; auto zippedFile = fopen(zipArchive.c_str(), "wb"); - MULTIPY_CHECK(zippedFile != nullptr, - "Fail to create file: ", strerror(errno)); + MULTIPY_CHECK( + zippedFile != nullptr, "Fail to create file: ", strerror(errno)); fwrite(zippedTorchStart, 1, zippedTorchSize, zippedFile); fclose(zippedFile); return zipArchive; } - void setupZippedPythonModules(const std::string &pythonAppDir) { + void setupZippedPythonModules(const std::string& pythonAppDir) { #ifdef FBCODE_CAFFE2 extraPythonPaths_.push_back(getZippedArchive( ".torch_python_modules", @@ -55,17 +56,17 @@ class Environment { extraPythonLibrariesDir_ = pythonAppDir; } -public: + public: // Environment constructor which creates a random temporary directory as // a directory for the zipped python modules. explicit Environment() { char tempDirName[] = "/tmp/torch_deploy_zipXXXXXX"; - char *tempDirectory = mkdtemp(tempDirName); + char* tempDirectory = mkdtemp(tempDirName); setupZippedPythonModules(tempDirectory); } // Environment constructor which takes a file name for the // directory for the zipped python modules. - explicit Environment(const std::string &pythonAppDir) { + explicit Environment(const std::string& pythonAppDir) { setupZippedPythonModules(pythonAppDir); } // Deconstructor for Environment. @@ -73,8 +74,8 @@ class Environment { auto rmCmd = "rm -rf " + extraPythonLibrariesDir_; (void)system(rmCmd.c_str()); } - virtual void configureInterpreter(Interpreter *interp) = 0; - virtual const std::vector &getExtraPythonPaths() { + virtual void configureInterpreter(Interpreter* interp) = 0; + virtual const std::vector& getExtraPythonPaths() { return extraPythonPaths_; } }; diff --git a/multipy/runtime/interpreter/interpreter_impl.h b/multipy/runtime/interpreter/interpreter_impl.h index 88b3dd89..c2ee21cc 100644 --- a/multipy/runtime/interpreter/interpreter_impl.h +++ b/multipy/runtime/interpreter/interpreter_impl.h @@ -36,30 +36,30 @@ struct InterpreterObj { friend struct ReplicatedObjImpl; friend struct InterpreterSessionImpl; -protected: - InterpreterSessionImpl *owningSession_; + protected: + InterpreterSessionImpl* owningSession_; -public: + public: InterpreterObj() : owningSession_(nullptr) {} - explicit InterpreterObj(InterpreterSessionImpl *owningSession) + explicit InterpreterObj(InterpreterSessionImpl* owningSession) : owningSession_(owningSession) {} - InterpreterObj(const InterpreterObj &obj) = delete; - InterpreterObj &operator=(const InterpreterObj &obj) = delete; - InterpreterObj(InterpreterObj &&obj) = default; - InterpreterObj &operator=(InterpreterObj &&obj) = default; + InterpreterObj(const InterpreterObj& obj) = delete; + InterpreterObj& operator=(const InterpreterObj& obj) = delete; + InterpreterObj(InterpreterObj&& obj) = default; + InterpreterObj& operator=(InterpreterObj&& obj) = default; virtual ~InterpreterObj() = default; -private: + private: virtual at::IValue toIValue() const = 0; virtual Obj call(at::ArrayRef> args) = 0; virtual Obj call(at::ArrayRef args) = 0; - virtual Obj - callKwargs(std::vector args, - std::unordered_map kwargs) = 0; - virtual Obj - callKwargs(std::unordered_map kwargs) = 0; - virtual bool hasattr(const char *attr) = 0; - virtual Obj attr(const char *attr) = 0; + virtual Obj callKwargs( + std::vector args, + std::unordered_map kwargs) = 0; + virtual Obj callKwargs( + std::unordered_map kwargs) = 0; + virtual bool hasattr(const char* attr) = 0; + virtual Obj attr(const char* attr) = 0; }; // this is a wrapper class that refers to a PyObject* instance in a particular @@ -86,18 +86,19 @@ struct Obj { // Call an `Obj` callable, with arguments given by the tuple args, and named // arguments given by the dictionary kwargs. - Obj callKwargs(std::vector args, - std::unordered_map kwargs); + Obj callKwargs( + std::vector args, + std::unordered_map kwargs); // Call an `Obj` callable, with named arguments given by the dictionary // kwargs. Obj callKwargs(std::unordered_map kwargs); // Returns true if `Obj` has attribute with name `attr` and false otherwise. - bool hasattr(const char *attr); + bool hasattr(const char* attr); // Returns attribute `attr` from `Obj`. This is equivalent to calling // `getattr(Obj, attr)` in python. - Obj attr(const char *attr); + Obj attr(const char* attr); -private: + private: bool isDefault_; std::shared_ptr baseObj_; }; @@ -112,41 +113,47 @@ struct InterpreterSessionImpl { virtual ~InterpreterSessionImpl() = default; -private: - virtual Obj global(const char *module, const char *name) = 0; + private: + virtual Obj global(const char* module, const char* name) = 0; virtual Obj fromIValue(at::IValue value) = 0; virtual Obj createOrGetPackageImporterFromContainerFile( - const std::shared_ptr - &containerFile_) = 0; + const std::shared_ptr& + containerFile_) = 0; virtual PickledObject pickle(Obj container, Obj obj) = 0; - virtual Obj unpickleOrGet(int64_t id, const PickledObject &obj) = 0; + virtual Obj unpickleOrGet(int64_t id, const PickledObject& obj) = 0; virtual void unload(int64_t id) = 0; virtual at::IValue toIValue(Obj obj) const = 0; virtual Obj call(Obj obj, at::ArrayRef args) = 0; virtual Obj call(Obj obj, at::ArrayRef args) = 0; - virtual Obj - callKwargs(Obj obj, std::vector args, - std::unordered_map kwargs) = 0; - virtual Obj - callKwargs(Obj obj, std::unordered_map kwargs) = 0; - virtual Obj attr(Obj obj, const char *attr) = 0; - virtual bool hasattr(Obj obj, const char *attr) = 0; - -protected: - int64_t isDefault(Obj obj) const { return obj.isDefault_; } + virtual Obj callKwargs( + Obj obj, + std::vector args, + std::unordered_map kwargs) = 0; + virtual Obj callKwargs( + Obj obj, + std::unordered_map kwargs) = 0; + virtual Obj attr(Obj obj, const char* attr) = 0; + virtual bool hasattr(Obj obj, const char* attr) = 0; + + protected: + int64_t isDefault(Obj obj) const { + return obj.isDefault_; + } std::shared_ptr getBaseObj(Obj obj) const { return obj.baseObj_; } - bool isOwner(Obj obj) const { return this == obj.baseObj_->owningSession_; } + bool isOwner(Obj obj) const { + return this == obj.baseObj_->owningSession_; + } }; // The underlying implementation of `Interpreter` struct InterpreterImpl { - virtual InterpreterSessionImpl *acquireSession() = 0; + virtual InterpreterSessionImpl* acquireSession() = 0; virtual void setFindModule( - std::function(const std::string &)> + std::function(const std::string&)> find_module) = 0; virtual ~InterpreterImpl() = default; // this will uninitialize python }; @@ -154,7 +161,9 @@ struct InterpreterImpl { // inline definitions for Objs are necessary to avoid introducing a // source file that would need to exist it both the libinterpreter.so and then // the libtorchpy library. -inline at::IValue Obj::toIValue() const { return baseObj_->toIValue(); } +inline at::IValue Obj::toIValue() const { + return baseObj_->toIValue(); +} inline Obj Obj::operator()(at::ArrayRef args) { std::vector> copy; @@ -168,18 +177,22 @@ inline Obj Obj::operator()(at::ArrayRef args) { return baseObj_->call(args); } -inline Obj -Obj::callKwargs(std::vector args, - std::unordered_map kwargs) { +inline Obj Obj::callKwargs( + std::vector args, + std::unordered_map kwargs) { return baseObj_->callKwargs(std::move(args), std::move(kwargs)); } -inline Obj -Obj::callKwargs(std::unordered_map kwargs) { +inline Obj Obj::callKwargs( + std::unordered_map kwargs) { return baseObj_->callKwargs(std::move(kwargs)); } -inline bool Obj::hasattr(const char *attr) { return baseObj_->hasattr(attr); } +inline bool Obj::hasattr(const char* attr) { + return baseObj_->hasattr(attr); +} -inline Obj Obj::attr(const char *attr) { return baseObj_->attr(attr); } +inline Obj Obj::attr(const char* attr) { + return baseObj_->attr(attr); +} } // namespace deploy } // namespace torch diff --git a/multipy/runtime/interpreter/plugin_registry.h b/multipy/runtime/interpreter/plugin_registry.h index 1efd1034..0aaf355e 100644 --- a/multipy/runtime/interpreter/plugin_registry.h +++ b/multipy/runtime/interpreter/plugin_registry.h @@ -1,10 +1,10 @@ #pragma once #include -#include #include #include #include +#include #include @@ -15,40 +15,40 @@ namespace multipy { // A `Converter` is used in order to convert `PyObject`s/`py::object` into // an `IValue` or some other representation usch as storage. class Converter { -public: + public: virtual ~Converter() = default; // convert a `py::handle` to an `IValue` - virtual multipy::optional - toTypeInferredIValue(py::handle input) = 0; + virtual multipy::optional toTypeInferredIValue( + py::handle input) = 0; // convert an `IValue` into a `py::object` virtual multipy::optional toPyObject(at::IValue ivalue) = 0; // convert an `PyObject` into a `Storage` - virtual multipy::optional createStorage(PyObject *obj) = 0; + virtual multipy::optional createStorage(PyObject* obj) = 0; // create a `PyObject` from `storage` - virtual multipy::optional - createPyObject(const at::Storage &storage) = 0; + virtual multipy::optional createPyObject( + const at::Storage& storage) = 0; // return the `THPDtype` of `scalarType` - virtual multipy::optional - getTHPDtype(at::ScalarType scalarType) = 0; + virtual multipy::optional getTHPDtype( + at::ScalarType scalarType) = 0; }; // register a converter to be used by torch::deploy / multipy. // The order of the registration of the converters is dictated by the order of // compilation. -void registerConverter(Converter *); +void registerConverter(Converter*); // deregister a converter from torch::deploy / multipy // The order of the deregistration of the converters is dictated by the order of // compilation. -void deregisterConverter(Converter *); +void deregisterConverter(Converter*); at::IValue toTypeInferredIValue(py::handle input); py::object toPyObject(at::IValue ivalue); -at::Storage createStorage(PyObject *obj); -PyObject *createPyObject(const at::Storage &storage); -THPDtype *getTHPDtype(at::ScalarType scalarType); +at::Storage createStorage(PyObject* obj); +PyObject* createPyObject(const at::Storage& storage); +THPDtype* getTHPDtype(at::ScalarType scalarType); } // namespace multipy diff --git a/multipy/runtime/mem_file.h b/multipy/runtime/mem_file.h index 32f88698..a68ff020 100644 --- a/multipy/runtime/mem_file.h +++ b/multipy/runtime/mem_file.h @@ -6,15 +6,15 @@ #pragma once -#include -#include #include -#include #include #include #include #include #include +#include +#include +#include namespace torch { namespace deploy { @@ -26,35 +26,39 @@ namespace deploy { // // 2. Used in unity to load the elf file. struct MemFile { - explicit MemFile(const char *filename_) + explicit MemFile(const char* filename_) : fd_(0), mem_(nullptr), n_bytes_(0), name_(filename_) { fd_ = open(filename_, O_RDONLY); - MULTIPY_CHECK(fd_ != -1, - "failed to open {}: {}" + filename_ + strerror(errno)); + MULTIPY_CHECK( + fd_ != -1, "failed to open {}: {}" + filename_ + strerror(errno)); // NOLINTNEXTLINE struct stat s; if (-1 == fstat(fd_, &s)) { close(fd_); // destructors don't run during exceptions - MULTIPY_CHECK(false, - "failed to stat {}: {}" + filename_ + strerror(errno)); + MULTIPY_CHECK( + false, "failed to stat {}: {}" + filename_ + strerror(errno)); } n_bytes_ = s.st_size; mem_ = mmap(nullptr, n_bytes_, PROT_READ, MAP_SHARED, fd_, 0); if (MAP_FAILED == mem_) { close(fd_); - MULTIPY_CHECK(false, - "failed to mmap {}: {}" + filename_ + strerror(errno)); + MULTIPY_CHECK( + false, "failed to mmap {}: {}" + filename_ + strerror(errno)); } } - MemFile(const MemFile &) = delete; - MemFile &operator=(const MemFile &) = delete; - [[nodiscard]] const char *data() const { return (const char *)mem_; } + MemFile(const MemFile&) = delete; + MemFile& operator=(const MemFile&) = delete; + [[nodiscard]] const char* data() const { + return (const char*)mem_; + } // return the file descriptor of the underlying file. - int valid() { return fcntl(fd_, F_GETFD) != -1 || errno != EBADF; } + int valid() { + return fcntl(fd_, F_GETFD) != -1 || errno != EBADF; + } ~MemFile() { if (mem_) { - munmap((void *)mem_, n_bytes_); + munmap((void*)mem_, n_bytes_); } if (fd_) { close(fd_); @@ -62,12 +66,16 @@ struct MemFile { } // return the size of the underlying file defined by the `MemFile` - size_t size() { return n_bytes_; } - [[nodiscard]] int fd() const { return fd_; } + size_t size() { + return n_bytes_; + } + [[nodiscard]] int fd() const { + return fd_; + } -private: + private: int fd_; - void *mem_; + void* mem_; size_t n_bytes_; std::string name_; }; diff --git a/multipy/runtime/noop_environment.h b/multipy/runtime/noop_environment.h index d9a3aa2c..2891e29a 100644 --- a/multipy/runtime/noop_environment.h +++ b/multipy/runtime/noop_environment.h @@ -13,8 +13,8 @@ namespace deploy { // The local python Environment class NoopEnvironment : public Environment { -public: - void configureInterpreter(Interpreter * /* interp */) override {} + public: + void configureInterpreter(Interpreter* /* interp */) override {} }; } // namespace deploy diff --git a/multipy/runtime/path_environment.h b/multipy/runtime/path_environment.h index e2e0aa99..06efaaf0 100644 --- a/multipy/runtime/path_environment.h +++ b/multipy/runtime/path_environment.h @@ -15,11 +15,11 @@ namespace deploy { // An Environment which is defined by a specific path to python code (ie. condas // sitepackages) class PathEnvironment : public Environment { -public: + public: explicit PathEnvironment(std::string path) : path_(std::move(path)) {} - void configureInterpreter(Interpreter *interp) override; + void configureInterpreter(Interpreter* interp) override; -private: + private: std::string path_; }; From 2b03d89a7e038179496777ffaa697c08705ad8bd Mon Sep 17 00:00:00 2001 From: Sahan Paliskara Date: Fri, 21 Oct 2022 16:29:52 -0700 Subject: [PATCH 5/5] Update on "Add comments for public functions and classes" Adds comments to public functions and public classes such that they show up in documentation in order to guide users. Differential Revision: [D40606131](https://our.internmc.facebook.com/intern/diff/D40606131) [ghstack-poisoned] --- multipy/runtime/deploy.h | 9 +++++---- multipy/runtime/interpreter/interpreter_impl.h | 12 ++++++------ multipy/runtime/interpreter/plugin_registry.h | 2 +- 3 files changed, 12 insertions(+), 11 deletions(-) diff --git a/multipy/runtime/deploy.h b/multipy/runtime/deploy.h index 1c2e6f9a..304c9781 100644 --- a/multipy/runtime/deploy.h +++ b/multipy/runtime/deploy.h @@ -245,8 +245,9 @@ struct TORCH_API ReplicatedObjImpl { InterpreterManager* manager_; }; -// A python object which is Replicated from an `Obj` such that it is able to -// move around to different `InterpreterSessions` by using +//ReplicatedObj represents a python object that can be used on multiple interpreters. Calling +// methods on this will pick an arbitrary interpreter to run on, transfer it there if not already +// and run the method. A replicated object can be converted to an interpreter specific `Obj` using // `InterpreterSession::fromMovable(ReplicatedObj)` struct TORCH_API ReplicatedObj { // Default constructor for `ReplicatedObj` @@ -263,7 +264,7 @@ struct TORCH_API ReplicatedObj { } // Calls an `ReplicatedObj` callable, with arguments given by the tuple args - // and named arguments given by the dictionary kwargs. This is done on an + // and named arguments given by the dictionary kwargs (equivalent to python's `__call__`). This is done on an // arbitrary `InterpreterSession` which belongs to the `ReplicatedObj`'s // manager. [[nodiscard]] at::IValue callKwargs( @@ -274,7 +275,7 @@ struct TORCH_API ReplicatedObj { } // Calls an `ReplicatedObj` callable, with named arguments given by the - // dictionary kwargs. This is done on an arbitrary `InterpreterSession` which + // dictionary kwargs (equivalent to python's `__call__`). This is done on an arbitrary `InterpreterSession` which // belongs to the `ReplicatedObj`'s manager. [[nodiscard]] at::IValue callKwargs( std::unordered_map kwargs) const { diff --git a/multipy/runtime/interpreter/interpreter_impl.h b/multipy/runtime/interpreter/interpreter_impl.h index c2ee21cc..b424cf72 100644 --- a/multipy/runtime/interpreter/interpreter_impl.h +++ b/multipy/runtime/interpreter/interpreter_impl.h @@ -29,8 +29,8 @@ struct PickledObject { std::shared_ptr containerFile_; }; -// The underlying implementation of `Obj` which holds the underlying -// `py::object`. +// PickledObject contains a python object that's been pickled with the tensors saved separately. +// Unpickling this will share the underlying data across multiple copies/interpreters. struct InterpreterObj { friend struct Obj; friend struct ReplicatedObjImpl; @@ -78,19 +78,19 @@ struct Obj { // return `IValue` representation. at::IValue toIValue() const; - // Call an `Obj` callable, with arguments given by the tuple args. + // Call an `Obj` callable, with arguments given by the tuple args. Equivalent to `__call__` in python. Obj operator()(at::ArrayRef args); - // Call an `Obj` callable, with arguments given by the tuple args. + // Call an `Obj` callable, with arguments given by the tuple args. Equivalent to `__call__` in python. Obj operator()(at::ArrayRef args); // Call an `Obj` callable, with arguments given by the tuple args, and named - // arguments given by the dictionary kwargs. + // arguments given by the dictionary kwargs. Equivalent to `__call__` in python. Obj callKwargs( std::vector args, std::unordered_map kwargs); // Call an `Obj` callable, with named arguments given by the dictionary - // kwargs. + // kwargs. Equivalent to `__call__` in python. Obj callKwargs(std::unordered_map kwargs); // Returns true if `Obj` has attribute with name `attr` and false otherwise. bool hasattr(const char* attr); diff --git a/multipy/runtime/interpreter/plugin_registry.h b/multipy/runtime/interpreter/plugin_registry.h index 0aaf355e..97be6002 100644 --- a/multipy/runtime/interpreter/plugin_registry.h +++ b/multipy/runtime/interpreter/plugin_registry.h @@ -13,7 +13,7 @@ namespace py = pybind11; namespace multipy { // A `Converter` is used in order to convert `PyObject`s/`py::object` into -// an `IValue` or some other representation usch as storage. +// an `IValue` or some other representation such as storage. class Converter { public: virtual ~Converter() = default;