-
Notifications
You must be signed in to change notification settings - Fork 1.2k
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
Add support for exporting models to Carton
- Loading branch information
1 parent
b52cf60
commit b01cf48
Showing
5 changed files
with
358 additions
and
0 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,133 @@ | ||
import asyncio | ||
import importlib.util | ||
import logging | ||
import os | ||
import shutil | ||
import tempfile | ||
from typing import Any, Dict, List | ||
|
||
import torch | ||
|
||
from ludwig.api import LudwigModel | ||
from ludwig.api_annotations import DeveloperAPI | ||
from ludwig.constants import NAME | ||
from ludwig.types import ModelConfigDict | ||
from ludwig.utils.fs_utils import open_file | ||
|
||
logger = logging.getLogger(__name__) | ||
|
||
|
||
INFERENCE_MODULE_TEMPLATE = """ | ||
from typing import Any, Dict, List, Tuple, Union | ||
import torch | ||
from ludwig.utils.types import TorchscriptPreprocessingInput | ||
class GeneratedInferenceModule(torch.nn.Module): | ||
def __init__(self, inference_module): | ||
super().__init__() | ||
self.inference_module = inference_module | ||
def forward(self, inputs: Dict[str, Any]): | ||
retyped_inputs: Dict[str, TorchscriptPreprocessingInput] = {{}} | ||
for k, v in inputs.items(): | ||
assert isinstance(v, TorchscriptPreprocessingInput) | ||
retyped_inputs[k] = v | ||
results = self.inference_module(retyped_inputs) | ||
return {output_dicts} | ||
""" | ||
|
||
|
||
def _get_output_dicts(config: ModelConfigDict) -> str: | ||
results = [] | ||
for feature in config["output_features"]: | ||
name = feature[NAME] | ||
results.append(f'"{name}": results["{name}"]["predictions"]') | ||
return "{" + ", ".join(results) + "}" | ||
|
||
|
||
@DeveloperAPI | ||
def generate_carton_torchscript(model: LudwigModel): | ||
config = model.config | ||
inference_module = model.to_torchscript() | ||
with tempfile.TemporaryDirectory() as tmpdir: | ||
ts_path = os.path.join(tmpdir, "generated.py") | ||
with open_file(ts_path, "w") as f: | ||
f.write( | ||
INFERENCE_MODULE_TEMPLATE.format( | ||
output_dicts=_get_output_dicts(config), | ||
) | ||
) | ||
|
||
spec = importlib.util.spec_from_file_location("generated.ts", ts_path) | ||
gen_ts = importlib.util.module_from_spec(spec) | ||
spec.loader.exec_module(gen_ts) | ||
|
||
gen_module = gen_ts.GeneratedInferenceModule(inference_module) | ||
scripted_module = torch.jit.script(gen_module) | ||
return scripted_module | ||
|
||
|
||
def _get_input_spec(model: LudwigModel) -> List[Dict[str, Any]]: | ||
from cartonml import TensorSpec | ||
|
||
spec = [] | ||
for feature_name, feature in model.model.input_features.items(): | ||
metadata = model.training_set_metadata[feature_name] | ||
spec.append( | ||
TensorSpec( | ||
name=feature.feature_name, dtype=feature.get_preproc_input_dtype(metadata), shape=("batch_size",) | ||
) | ||
) | ||
return spec | ||
|
||
|
||
def _get_output_spec(model: LudwigModel) -> List[Dict[str, Any]]: | ||
from cartonml import TensorSpec | ||
|
||
spec = [] | ||
for feature_name, feature in model.model.output_features.items(): | ||
metadata = model.training_set_metadata[feature_name] | ||
spec.append( | ||
TensorSpec( | ||
name=feature.feature_name, dtype=feature.get_postproc_output_dtype(metadata), shape=("batch_size",) | ||
) | ||
) | ||
return spec | ||
|
||
|
||
@DeveloperAPI | ||
def export_carton(model: LudwigModel, carton_path: str, carton_model_name="ludwig_model"): | ||
try: | ||
import cartonml as carton | ||
except ImportError: | ||
raise RuntimeError('The "cartonml-nightly" package is not installed in your environment.') | ||
|
||
# Generate a torchscript model | ||
model_ts = generate_carton_torchscript(model) | ||
|
||
with tempfile.TemporaryDirectory() as tmpdir: | ||
# Save the model to a temp dir | ||
input_model_path = os.path.join(tmpdir, "model.pt") | ||
torch.jit.save(model_ts, input_model_path) | ||
|
||
# carton.pack is an async function so we run it and wait until it's complete | ||
# See https://pyo3.rs/v0.20.0/ecosystem/async-await#a-note-about-asynciorun for why we wrap it | ||
# in another function | ||
async def pack(): | ||
return await carton.pack( | ||
input_model_path, | ||
runner_name="torchscript", | ||
# Any 2.x.x version is okay | ||
# TODO: improve this | ||
required_framework_version="=2", | ||
model_name=carton_model_name, | ||
inputs=_get_input_spec(model), | ||
outputs=_get_output_spec(model), | ||
) | ||
|
||
loop = asyncio.get_event_loop() | ||
tmp_out_path = loop.run_until_complete(pack()) | ||
|
||
# Move it to the output path | ||
shutil.move(tmp_out_path, carton_path) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Oops, something went wrong.