Skip to content

Commit

Permalink
Enable python debug runtime for exported network libraries (apache#8793)
Browse files Browse the repository at this point in the history
* Add get_json method to graph_eceutor factory

Signed-off-by: Alexander Peskov <peskovnn@gmail.com>

* Update Debugger runtime documentation for exported libraries

* Fix cpplint

* Change module get_json to get_graph_json, add test

* Fix get_graph_json test

* Change verificatino of llvm support in tet to decorator

* Fix sphinx warning in debugger.rst

Co-authored-by: Alexander Peskov <peskovnn@gmail.com>
  • Loading branch information
2 people authored and ylc committed Sep 29, 2021
1 parent ce3d731 commit f8a4b2f
Show file tree
Hide file tree
Showing 3 changed files with 46 additions and 23 deletions.
20 changes: 18 additions & 2 deletions docs/dev/debugger.rst
Original file line number Diff line number Diff line change
Expand Up @@ -123,12 +123,12 @@ Example of loading the parameters
How to use Debugger?
***************************************

1. In ``config.cmake`` set the ``USE_GRAPH_EXECUTOR_DEBUG`` flag to ``ON``
1. In ``config.cmake`` set the ``USE_PROFILER`` flag to ``ON``

::

# Whether enable additional graph debug functions
set(USE_GRAPH_EXECUTOR_DEBUG ON)
set(USE_PROFILER ON)

2. Do 'make' tvm, so that it will make the ``libtvm_runtime.so``

Expand All @@ -148,6 +148,22 @@ How to use Debugger?
m.run()
tvm_out = m.get_output(0, tvm.nd.empty(out_shape, dtype)).numpy()

4. If network previously was exported to external libray using ``lib.export_library("network.so")``
like shared object file/dynamic linked library, the initialization
of debug runtime will be slightly different

::

lib = tvm.runtime.load_module("network.so")
m = graph_executor.create(lib["get_graph_json"](), lib, dev, dump_root="/tmp/tvmdbg")
# set inputs
m.set_input('data', tvm.nd.array(data.astype(dtype)))
m.set_input(**params)
# execute
m.run()
tvm_out = m.get_output(0, tvm.nd.empty(out_shape, dtype)).numpy()


The outputs are dumped to a temporary folder in ``/tmp`` folder or the
folder specified while creating the runtime.

Expand Down
4 changes: 4 additions & 0 deletions src/runtime/graph_executor/graph_executor_factory.cc
Original file line number Diff line number Diff line change
Expand Up @@ -53,6 +53,10 @@ PackedFunc GraphExecutorFactory::GetFunction(
}
*rv = this->ExecutorCreate(devices);
});
} else if (name == "get_graph_json") {
return PackedFunc(
[sptr_to_self, this](TVMArgs args, TVMRetValue* rv) { *rv = this->graph_json_; });

} else if (name == "debug_create") {
return PackedFunc([sptr_to_self, this](TVMArgs args, TVMRetValue* rv) {
ICHECK_GE(args.size(), 2);
Expand Down
45 changes: 24 additions & 21 deletions tests/python/unittest/test_runtime_module_based_interface.py
Original file line number Diff line number Diff line change
Expand Up @@ -46,10 +46,8 @@ def verify(data):
return out


@tvm.testing.requires_llvm
def test_legacy_compatibility():
if not tvm.testing.device_enabled("llvm"):
print("Skip because llvm is not enabled")
return
mod, params = relay.testing.synthetic.get_workload()
with relay.build_config(opt_level=3):
graph, lib, graph_params = relay.build_module.build(mod, "llvm", params=params)
Expand All @@ -63,10 +61,8 @@ def test_legacy_compatibility():
tvm.testing.assert_allclose(out, verify(data), atol=1e-5)


@tvm.testing.requires_llvm
def test_cpu():
if not tvm.testing.device_enabled("llvm"):
print("Skip because llvm is not enabled")
return
mod, params = relay.testing.synthetic.get_workload()
with relay.build_config(opt_level=3):
complied_graph_lib = relay.build_module.build(mod, "llvm", params=params)
Expand All @@ -90,6 +86,23 @@ def test_cpu():
tvm.testing.assert_allclose(out, verify(data), atol=1e-5)


@tvm.testing.requires_llvm
def test_cpu_get_graph_json():
mod, params = relay.testing.synthetic.get_workload()
with relay.build_config(opt_level=3):
complied_graph_lib = relay.build_module.build(mod, "llvm", params=params)
from tvm.contrib import utils

temp = utils.tempdir()
file_name = "deploy_lib.so"
path_lib = temp.relpath(file_name)
complied_graph_lib.export_library(path_lib)
loaded_lib = tvm.runtime.load_module(path_lib)
json = loaded_lib["get_graph_json"]()
assert isinstance(json, str) == True
assert json.find("tvmgen_default_fused_nn_softmax1") > -1


@tvm.testing.requires_cuda
@tvm.testing.requires_gpu
def test_gpu():
Expand Down Expand Up @@ -120,9 +133,6 @@ def test_gpu():
@tvm.testing.uses_gpu
def test_mod_export():
def verify_cpu_export(obj_format):
if not tvm.testing.device_enabled("llvm"):
print("Skip because llvm is not enabled")
return
mod, params = relay.testing.synthetic.get_workload()
with relay.build_config(opt_level=3):
complied_graph_lib = relay.build_module.build(mod, "llvm", params=params)
Expand Down Expand Up @@ -210,10 +220,8 @@ def setup_gmod():
out = gmod.get_output(0).numpy()
tvm.testing.assert_allclose(out, verify(data), atol=1e-5)

@tvm.testing.requires_llvm
def verify_rpc_cpu_export(obj_format):
if not tvm.testing.device_enabled("llvm"):
print("Skip because llvm is not enabled")
return
mod, params = relay.testing.synthetic.get_workload()
with relay.build_config(opt_level=3):
complied_graph_lib = relay.build_module.build(mod, "llvm", params=params)
Expand Down Expand Up @@ -308,12 +316,10 @@ def check_remote(server):
verify_rpc_gpu_export(obj_format)


@tvm.testing.requires_llvm
@tvm.testing.uses_gpu
def test_remove_package_params():
def verify_cpu_remove_package_params(obj_format):
if not tvm.testing.device_enabled("llvm"):
print("Skip because llvm is not enabled")
return
mod, params = relay.testing.synthetic.get_workload()
with relay.build_config(opt_level=3):
complied_graph_lib = relay.build_module.build(mod, "llvm", params=params)
Expand Down Expand Up @@ -404,10 +410,8 @@ def verify_gpu_remove_package_params(obj_format):
out = gmod.get_output(0).numpy()
tvm.testing.assert_allclose(out, verify(data), atol=1e-5)

@tvm.testing.requires_llvm
def verify_rpc_cpu_remove_package_params(obj_format):
if not tvm.testing.device_enabled("llvm"):
print("Skip because llvm is not enabled")
return
mod, params = relay.testing.synthetic.get_workload()
with relay.build_config(opt_level=3):
complied_graph_lib = relay.build_module.build(mod, "llvm", params=params)
Expand Down Expand Up @@ -517,10 +521,8 @@ def verify_rpc_gpu_remove_package_params(obj_format):
verify_rpc_gpu_remove_package_params(obj_format)


@tvm.testing.requires_llvm
def test_debug_graph_executor():
if not tvm.testing.device_enabled("llvm"):
print("Skip because llvm is not enabled")
return
mod, params = relay.testing.synthetic.get_workload()
with relay.build_config(opt_level=3):
complied_graph_lib = relay.build_module.build(mod, "llvm", params=params)
Expand Down Expand Up @@ -619,3 +621,4 @@ def make_module(mod):
test_remove_package_params()
test_debug_graph_executor()
test_multiple_imported_modules()
test_cpu_get_graph_json()

0 comments on commit f8a4b2f

Please sign in to comment.