Skip to content

Commit

Permalink
7.1 release (#2027)
Browse files Browse the repository at this point in the history
  • Loading branch information
DawerG authored Nov 1, 2023
1 parent dd90747 commit dbb0094
Show file tree
Hide file tree
Showing 76 changed files with 4,511 additions and 1,486 deletions.
12 changes: 12 additions & 0 deletions CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -191,19 +191,31 @@ else()
message(STATUS "CoreML.framework and dependent frameworks not found. Skipping libcoremlpython build.")
endif()


# Build kmeans-1d
set(KMEANS_DIR "${PROJECT_SOURCE_DIR}/deps/kmeans1d")
execute_process(
COMMAND python3 setup.py build_ext --inplace
WORKING_DIRECTORY ${KMEANS_DIR}
)

# Somehow Python's setuptools is building this shared object file so that it tries to load the C++
# standard library using an rpath that only exist on the build machine. Change that so it gets
# loaded from the standard location.
if(APPLE)
file(GLOB SO_FILE "${PROJECT_SOURCE_DIR}/deps/kmeans1d/kmeans1d/_core.*.so")
execute_process(
COMMAND install_name_tool -change @rpath/libc++.1.dylib /usr/lib/libc++.1.dylib ${SO_FILE}
)
endif()

# Copy kmeans-1d to Python deps folder
execute_process(
COMMAND cp -r kmeans1d ../../coremltools/_deps
WORKING_DIRECTORY ${KMEANS_DIR}
)


set(PYTHON_TAG "cp${PYTHON_VERSION_MAJOR}${PYTHON_VERSION_MINOR}")
if(APPLE)
execute_process(COMMAND uname -m OUTPUT_VARIABLE HARDWARE_NAME OUTPUT_STRIP_TRAILING_WHITESPACE)
Expand Down
21 changes: 20 additions & 1 deletion coremltools/_deps/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -154,10 +154,15 @@ def __get_sklearn_version(version):

# ---------------------------------------------------------------------------------------
_HAS_TORCH = True
_TORCH_MAX_VERSION = "2.0.0"
_TORCH_MAX_VERSION = "2.1.0"
_HAS_TORCH_EXPORT_API = False
try:
import torch
_warn_if_above_max_supported_version("Torch", torch.__version__, _TORCH_MAX_VERSION)

if _get_version(torch.__version__) >= _StrictVersion("2.1.0"):
_HAS_TORCH_EXPORT_API = True

except:
_HAS_TORCH = False
MSG_TORCH_NOT_FOUND = "PyTorch not found."
Expand All @@ -170,6 +175,20 @@ def __get_sklearn_version(version):
_HAS_TORCH_VISION = False
MSG_TORCH_VISION_NOT_FOUND = "TorchVision not found."

_HAS_TORCH_AUDIO = True
try:
import torchaudio
except:
_HAS_TORCH_AUDIO = False
MSG_TORCH_AUDIO_NOT_FOUND = "TorchAudio not found."


_HAS_EXECUTORCH = True
try:
import executorch
except:
_HAS_EXECUTORCH = False
MSG_EXECUTORCH_NOT_FOUND = "Executorch not found."

# ---------------------------------------------------------------------------------------
try:
Expand Down
173 changes: 126 additions & 47 deletions coremltools/converters/_converters_entry.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@
from coremltools import ComputeUnit as _ComputeUnit
from coremltools import __version__ as _ct_version
from coremltools import _logger as logger
from coremltools._deps import _HAS_TF_1, _HAS_TF_2, _HAS_TORCH
from coremltools._deps import _HAS_TF_1, _HAS_TF_2, _HAS_TORCH, _HAS_TORCH_EXPORT_API
from coremltools.converters._profile_utils import _profile
from coremltools.converters.mil._deployment_compatibility import (
AvailableTarget,
Expand All @@ -36,7 +36,7 @@
from coremltools.converters.mil.mil.passes.defs.quantization import FP16ComputePrecision
from coremltools.converters.mil.mil.passes.graph_pass import PassOption as _PassOption
from coremltools.converters.mil.mil.passes.pass_pipeline import PassPipeline
from coremltools.models import _METADATA_SOURCE, _METADATA_VERSION
from coremltools.models import _METADATA_SOURCE, _METADATA_SOURCE_DIALECT, _METADATA_VERSION
from coremltools.models.utils import _MLPACKAGE_EXTENSION

if _HAS_TF_1:
Expand All @@ -51,8 +51,13 @@
if _HAS_TORCH:
import torch

from coremltools.converters.mil.frontend.torch.load import \
_torchscript_from_model as pytorch_load
from coremltools.converters.mil.frontend.torch.load import (
_torchscript_from_spec as try_load_torchscript,
)

if _HAS_TORCH_EXPORT_API:
from torch.export import ExportedProgram



@_profile
Expand Down Expand Up @@ -102,8 +107,12 @@ def convert(
* PyTorch
- A `TorchScript <https://pytorch.org/docs/stable/jit.html>`_ object
- Path to a ``.pt`` file
- TorchScript Models:
- A `TorchScript <https://pytorch.org/docs/stable/jit.html>`_ object
- Path to a ``.pt`` file
- Torch Exported Models:
- A `ExportedProgram <https://pytorch.org/docs/stable/export.html#torch.export.ExportedProgram> ` object with `EDGE` dialect
source : str (optional)
Expand Down Expand Up @@ -161,18 +170,23 @@ def convert(
When ``inputs`` not provided or ``dtype`` not specified. The float 32 inputs defaults to float 16.
* PyTorch:
- The ``inputs`` parameter is required.
- Number of elements in ``inputs`` must match the number of inputs
of the PyTorch model.
- ``inputs`` may be a nested list or tuple.
- ``TensorType`` and ``ImageType`` must have the ``shape`` specified.
- If the ``name`` argument is specified with ``TensorType`` or
``ImageType``, the converted Core ML model will have inputs with
the same name.
- If ``dtype`` is missing:
* For ``minimum_deployment_target <= ct.target.macOS12``, it defaults to float 32.
* For ``minimum_deployment_target >= ct.target.macOS13``, and with ``compute_precision`` in float 16 precision.
It defaults to float 16.
- TorchScript Models:
- The ``inputs`` parameter is required.
- Number of elements in ``inputs`` must match the number of inputs
of the PyTorch model.
- ``inputs`` may be a nested list or tuple.
- ``TensorType`` and ``ImageType`` must have the ``shape`` specified.
- If the ``name`` argument is specified with ``TensorType`` or
``ImageType``, the converted Core ML model will have inputs with
the same name.
- If ``dtype`` is missing:
* For ``minimum_deployment_target <= ct.target.macOS12``, it defaults to float 32.
* For ``minimum_deployment_target >= ct.target.macOS13``, and with ``compute_precision`` in float 16 precision.
It defaults to float 16.
- Torch Exported Models:
- The ``inputs`` parameter is not supported. ``inputs`` parameter is inferred from Torch ExportedProgram.
outputs : list of ``TensorType`` or ``ImageType`` (optional)
Expand Down Expand Up @@ -218,13 +232,17 @@ def convert(
* PyTorch:
- If specified, the length of the list must match the number of
outputs returned by the PyTorch model.
- If ``name`` is specified, it is applied to the output names of the
converted Core ML model.
- For ``minimum_deployment_target >= ct.target.macOS13``, and with ``compute_precision`` in float 16 precision.
If ``dtype`` not specified, the outputs inferred of type float 32
defaults to float 16.
- TorchScript Models:
- If specified, the length of the list must match the number of
outputs returned by the PyTorch model.
- If ``name`` is specified, it is applied to the output names of the
converted Core ML model.
- For ``minimum_deployment_target >= ct.target.macOS13``, and with ``compute_precision`` in float 16 precision.
If ``dtype`` not specified, the outputs inferred of type float 32
defaults to float 16.
- Torch Exported Models:
- The ``outputs`` parameter is not supported. ``outputs`` parameter is inferred from Torch ExportedProgram.
classifier_config : ClassifierConfig class (optional)
Expand Down Expand Up @@ -308,7 +326,7 @@ def convert(
The above transform iterates through all the ops, looking at each op's
inputs and outputs. If they are of type float 32, ``cast``
ops are injected to convert those tensors (also known as `vars`) to
type float 16.
type float 16. Similarly, int32 vars will also be cast to int16.
- ``coremltools.precision.FLOAT32`` enum: No transform is applied.
Expand Down Expand Up @@ -489,15 +507,17 @@ def skip_real_div_ops(op):
PyTorch:
>>> model = torchvision.models.mobilenet_v2()
>>> model.eval()
>>> example_input = torch.rand(1, 3, 256, 256)
>>> traced_model = torch.jit.trace(model, example_input)
TorchScript Models:
>>> input = ct.TensorType(name='input_name', shape=(1, 3, 256, 256))
>>> mlmodel = ct.convert(traced_model, inputs=[input])
>>> results = mlmodel.predict({"input": example_input.numpy()})
>>> print(results['1651']) # 1651 is the node name given by PyTorch's JIT
>>> model = torchvision.models.mobilenet_v2()
>>> model.eval()
>>> example_input = torch.rand(1, 3, 256, 256)
>>> traced_model = torch.jit.trace(model, example_input)
>>> input = ct.TensorType(name='input_name', shape=(1, 3, 256, 256))
>>> mlmodel = ct.convert(traced_model, inputs=[input])
>>> results = mlmodel.predict({"input": example_input.numpy()})
>>> print(results['1651']) # 1651 is the node name given by PyTorch's JIT
See `Conversion Options <https://coremltools.readme.io/docs/neural-network-conversion>`_ for
more advanced options.
Expand All @@ -508,6 +528,7 @@ def skip_real_div_ops(op):
outputs_as_strings,
outputs_as_tensor_or_image_types,
outputs)
source_dialect = _determine_source_dialect(model, exact_source)
exact_target = _determine_target(convert_to, minimum_deployment_target)
_validate_conversion_arguments(
model,
Expand All @@ -525,7 +546,7 @@ def skip_real_div_ops(op):
if pass_pipeline is None:
pass_pipeline = PassPipeline()
if not need_fp16_cast_pass:
pass_pipeline.remove_passes({"common::add_fp16_cast"})
pass_pipeline.remove_passes({"common::add_fp16_cast", "common::add_int16_cast"})
if isinstance(compute_precision, FP16ComputePrecision):
# For backward compatibility with the `op_selector` param in FP16ComputePrecision.
pass_pipeline._pass_options["common::add_fp16_cast"] = [
Expand Down Expand Up @@ -584,7 +605,7 @@ def skip_real_div_ops(op):

gc.collect()

mlmodel = _record_build_metadata(mlmodel, exact_source)
mlmodel = _record_build_metadata(mlmodel, exact_source, source_dialect=source_dialect)

return mlmodel

Expand Down Expand Up @@ -819,16 +840,45 @@ def _flatten_list(_inputs):
raise ValueError("Input should be a list of TensorType or ImageType")

elif exact_source == "pytorch":
if inputs is None:
raise ValueError('Expected argument for pytorch "inputs" not provided')
if _HAS_TORCH_EXPORT_API and isinstance(model, ExportedProgram):
if model.dialect != "EDGE":
raise NotImplementedError(
f"Conversion for models with only EDGE dialect is supported/tested. Provided Dialect: {model.dialect}"
)

raise_if_duplicated(flat_inputs)
if inputs is not None and not all(
[isinstance(_input, InputType) for _input in flat_inputs]
):
raise ValueError(
"Input should be a list/tuple (or nested lists/tuples) of TensorType or ImageType"
)
# TODO: rdar://115845792 ([Executorch] Handle user provided inputs/outputs in the convert API)
if inputs is not None:
raise AssertionError("'inputs' argument should be None for ExportedProgram")

if outputs is not None:
raise AssertionError("'outputs' argument should be None for ExportedProgram")

else:
is_torch_load_successful = False
try:
try_load_torchscript(model)
is_torch_load_successful = True
except:
pass
if is_torch_load_successful:
if inputs is None:
raise ValueError(
'Expected argument "inputs" for TorchScript models not provided'
)

raise_if_duplicated(flat_inputs)
if inputs is not None and not all(
[isinstance(_input, InputType) for _input in flat_inputs]
):
raise ValueError(
"Input should be a list/tuple (or nested lists/tuples) of TensorType or ImageType"
)
else:
raise TypeError(
"@model must either be a TorchScript object (or .pt or .pth file) or an ExportedProgram object (if using torch.export based API), received: {}".format(
type(model)
)
)

elif exact_source == "milinternal":
if not isinstance(model, Program):
Expand All @@ -837,6 +887,19 @@ def _flatten_list(_inputs):
)


def _determine_source_dialect(model, exact_source):

source_dialect = None
if exact_source == "pytorch":

if _HAS_TORCH_EXPORT_API and isinstance(model, ExportedProgram):
return f"TorchExport::{model.dialect}"
else:
return "TorchScript"

return source_dialect


def _determine_source(model, source,
output_names,
outputs_as_tensor_or_image_types,
Expand Down Expand Up @@ -875,9 +938,13 @@ def _determine_source(model, source,
pass

if source == "auto" and _HAS_TORCH:

if _HAS_TORCH_EXPORT_API and isinstance(model, ExportedProgram):
return "pytorch"

is_torch_load_successful = False
try:
pytorch_load(model)
try_load_torchscript(model)
is_torch_load_successful = True
except:
pass
Expand Down Expand Up @@ -953,6 +1020,12 @@ def _get_metadata_from_mlmodel(mlmodel):
src_pkg_version = mlmodel.user_defined_metadata[_METADATA_SOURCE]
coremltools_version = mlmodel.user_defined_metadata[_METADATA_VERSION]

src_dialect = (
None
if _METADATA_SOURCE_DIALECT not in mlmodel.user_defined_metadata
else mlmodel.user_defined_metadata[_METADATA_SOURCE_DIALECT]
)

src_pkg_version_list = src_pkg_version.split("==")
if len(src_pkg_version_list) == 0:
src_pkg, pkg_ver = None, None
Expand All @@ -969,10 +1042,13 @@ def _get_metadata_from_mlmodel(mlmodel):
if src_pkg is not None and pkg_ver is not None:
build_info['coremltools-component-' + src_pkg] = str(pkg_ver)

if src_dialect is not None:
build_info["coremltools-source-dialect"] = src_dialect

return build_info


def _record_build_metadata(mlmodel, exact_source):
def _record_build_metadata(mlmodel, exact_source, source_dialect=None):
# recording metadata: coremltools version, source framework and version
if exact_source in {"tensorflow", "tensorflow2"} and (_HAS_TF_1 or _HAS_TF_2):
src_pkg_version = "tensorflow=={0}".format(tf.__version__)
Expand All @@ -986,6 +1062,9 @@ def _record_build_metadata(mlmodel, exact_source):
mlmodel.user_defined_metadata[_METADATA_SOURCE] = src_pkg_version
mlmodel.user_defined_metadata[_METADATA_VERSION] = _ct_version

if source_dialect is not None:
mlmodel.user_defined_metadata[_METADATA_SOURCE_DIALECT] = source_dialect

build_info = _get_metadata_from_mlmodel(mlmodel)

mlmodel._set_build_info_mil_attributes(build_info)
Expand Down
Loading

0 comments on commit dbb0094

Please sign in to comment.