Skip to content

Commit

Permalink
8.0b2 Release (#2308)
Browse files Browse the repository at this point in the history
  • Loading branch information
jakesabathia2 authored Aug 16, 2024
1 parent 8b7048e commit 5e2460f
Show file tree
Hide file tree
Showing 431 changed files with 23,130 additions and 30,075 deletions.
33 changes: 33 additions & 0 deletions NOTICE.txt
Original file line number Diff line number Diff line change
Expand Up @@ -46,3 +46,36 @@ distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

This project contains content in the files coremltools/optimize/torch/quantization/modules/conv_transpose.py and coremltools/optimize/torch/quantization/modules/conv_transpose_fused.py which are adapted from pytorch (https://github.com/pytorch/). The license for these follows:

Copyright (c) 2016 Facebook, Inc (Adam Paszke)

All rights reserved.

Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:

1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.

2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.

3. Neither the names of Facebook, Deepmind Technologies, NYU, NEC Laboratories America
and IDIAP Research Institute nor the names of its contributors may be
used to endorse or promote products derived from this software without
specific prior written permission.

THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
12 changes: 5 additions & 7 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -26,9 +26,9 @@ With coremltools, you can:
After conversion, you can integrate the Core ML models with your app using Xcode.

## Install 8.0 Beta
The [coremltools version 8 beta 1](https://github.com/apple/coremltools/releases/tag/8.0b1) is now out. To install, run the following command in your terminal:
The [coremltools version 8 beta 2](https://github.com/apple/coremltools/releases/tag/8.0b2) is now out. To install, run the following command in your terminal:
```shell
pip install coremltools==8.0b1
pip install coremltools==8.0b2
```


Expand All @@ -46,11 +46,9 @@ pip install -U coremltools

To install coremltools, see [Installing Core ML Tools](https://apple.github.io/coremltools/docs-guides/source/installing-coremltools.html). For more information, see the following:

* [Release Notes](https://github.com/apple/coremltools/releases/)
* [Guide and examples](https://apple.github.io/coremltools/docs-guides/index.html)
* [Release Notes](https://github.com/apple/coremltools/releases/)
* [Guide and examples](https://apple.github.io/coremltools/docs-guides/index.html)
* [API Reference](https://apple.github.io/coremltools/index.html)
* [Core ML Specification](https://apple.github.io/coremltools/mlmodel/index.html)
* [Building from Source](BUILDING.md)
* [Contribution Guidelines](CONTRIBUTING.md)


* [Contribution Guidelines](CONTRIBUTING.md)
10 changes: 9 additions & 1 deletion coremltools/_deps/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -54,7 +54,7 @@ def _warn_if_above_max_supported_version(package_name, package_version, max_supp
_HAS_SKLEARN = True
_SKLEARN_VERSION = None
_SKLEARN_MIN_VERSION = "0.17"
_SKLEARN_MAX_VERSION = "1.1.2"
_SKLEARN_MAX_VERSION = "1.5.1"


def __get_sklearn_version(version):
Expand Down Expand Up @@ -197,6 +197,14 @@ def __get_sklearn_version(version):
else:
_HAS_SCIPY = True

# ---------------------------------------------------------------------------------------
try:
import transformers
except:
_HAS_HF = False
else:
_HAS_HF = True

# General utils
def version_ge(module, target_version):
"""
Expand Down
45 changes: 34 additions & 11 deletions coremltools/converters/mil/backend/mil/load.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@
import os
import warnings
from collections import OrderedDict
from typing import Any, Dict, List, Optional, Tuple
from typing import Any, Dict, List, Optional, Tuple, Union

import numpy as np

Expand Down Expand Up @@ -62,12 +62,19 @@
BlobWriter = None


def should_use_weight_file(val):
def should_use_weight_file(
val: Union[np.ndarray, np.generic],
specification_version: Optional[int] = _SPECIFICATION_VERSION_IOS_15,
) -> bool:
# additional dtype are supported >= iOS18
supported_dtypes = ["float16", "float32", "uint8", "int8"]
if specification_version >= _SPECIFICATION_VERSION_IOS_18:
supported_dtypes += ["uint16", "int16", "int32", "uint32"]
return (
val is not None
and isinstance(val, (np.ndarray, np.generic))
and val.size >= 10
and val.dtype in ['float16', 'float32', 'uint8', 'int8']
and val.dtype in supported_dtypes
)


Expand All @@ -80,13 +87,26 @@ def __init__(
self,
prog: Program,
weights_dir: str,
specification_version: int,
):
self.prog = prog
self.weights_dir = weights_dir
self.specification_version = specification_version
self.blob_writers = {}
self.weight_id_to_file_value = {} # mapping from weight_id to file value
self.prog.validate(check_essential_scope=True)

@staticmethod
def _get_valid_kwargs(kwargs: Dict[str, Any]) -> Dict[str, Any]:
"""
Get a valid kwargs to initialize a MILProtoExporter object.
"""
return {
"prog": kwargs["prog"],
"weights_dir": kwargs["weights_dir"],
"specification_version": kwargs["specification_version"],
}

def translate_program_attributes(self) -> Dict[str, Any]:
"""
Get the program attributes which need to be exported to mil proto.
Expand Down Expand Up @@ -157,7 +177,7 @@ def get_milproto_value(self, var: Var) -> proto.MIL_pb2.Value:
"""
Translate a pymil Var into milproto value.
"""
if should_use_weight_file(var.val):
if should_use_weight_file(var.val, self.specification_version):
return self.create_file_value(var)
else:
return create_immediate_value(var)
Expand Down Expand Up @@ -475,9 +495,7 @@ def convert_function(self, function: Function, opset: str) -> proto.MIL_pb2.Func
inputs=inputs, opset=opset, block_specializations={opset: block}
)

def export(
self, specification_version: Optional[str] = _SPECIFICATION_VERSION_IOS_15
) -> proto.MIL_pb2.Program:
def export(self) -> proto.MIL_pb2.Program:
"""
Export a pymil program into mil proto with the given specification version.
"""
Expand All @@ -486,7 +504,9 @@ def export(

function_protos = {}
for func_name, func in self.prog.functions.items():
function_protos[func_name] = self.convert_function(func, _OPSET[specification_version])
function_protos[func_name] = self.convert_function(
func, _OPSET[self.specification_version]
)

kwargs = {
"version": 1,
Expand Down Expand Up @@ -1040,11 +1060,14 @@ def load(
)

# convert pymil program into mil proto
kwargs["prog"] = prog
kwargs["weights_dir"] = weights_dir
kwargs["specification_version"] = specification_version
exporter_kwargs = MILProtoExporter._get_valid_kwargs(kwargs)
mil_proto_exporter = MILProtoExporter(
prog,
weights_dir,
**exporter_kwargs,
)
mil_proto = mil_proto_exporter.export(specification_version)
mil_proto = mil_proto_exporter.export()

# return the model provided by users
desc = kwargs.get("model_description", None)
Expand Down
107 changes: 107 additions & 0 deletions coremltools/converters/mil/backend/mil/test_load.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,9 @@
# Use of this source code is governed by a BSD-3-clause license that can be
# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause

import itertools
import math
import os
import platform
import shutil
import tempfile
Expand All @@ -22,9 +24,68 @@
TestConstexprLut as _TestConstexprLut,
)
from coremltools.converters.mil.mil.program import Symbol
from coremltools.converters.mil.mil.types.type_mapping import string_to_nptype
from coremltools.models.utils import _macos_version


class TestWeightFileSerialization:
@staticmethod
@pytest.mark.parametrize(
"dtype, opset_version",
itertools.product(
["fp16", "fp32", "uint8", "int8", "uint16", "int16", "int32", "uint32"],
[ct.target.iOS16, ct.target.iOS18],
),
)
def test_weight_serialization(dtype, opset_version):
if dtype == "uint32":
# There is a pass that casts the output to CoreML supported dtype.
# uint32 will fail because `cast` op doesn't accept such input type.
pytest.skip("uint32 is not supported in `cast` op.")
if dtype in ["uint8", "int8", "uint16", "int16"] and opset_version == ct.target.iOS16:
# iOS16 doesn't support the above dtype either
pytest.skip("dtype not support in iOS16")

if dtype in ["fp16", "fp32", "uint8", "int8"]:
should_serialize_weight = True
else:
should_serialize_weight = opset_version >= ct.target.iOS18

@mb.program(input_specs=[mb.TensorSpec((1,))], opset_version=opset_version)
def prog(x):
val = np.random.rand(1000).astype(string_to_nptype(dtype))
return mb.const(val=val), mb.add(x=x, y=1.0)

# we don't want the const to be constant folding after casting
pipeline = ct.PassPipeline()
pipeline.set_options("common::const_elimination", {"skip_const_by_size": "-1"})
mlmodel = ct.convert(
prog,
minimum_deployment_target=opset_version,
pass_pipeline=pipeline,
)
saved_package_path = tempfile.mkdtemp(suffix=".mlpackage")
mlmodel.save(saved_package_path)

# check the weights are serialized as file value
if ct.utils._macos_version() >= (15, 0):
with tempfile.TemporaryDirectory() as serialize_dir:
os.system(f"coremlcompiler compile {saved_package_path} {serialize_dir}")
model_name_with_extension = os.path.basename(saved_package_path)
model_name_wo_extension, _ = os.path.splitext(model_name_with_extension)
mil_file = open(
os.path.join(serialize_dir, f"{model_name_wo_extension}.mlmodelc", "model.mil")
)
mil_txt = mil_file.read()
if should_serialize_weight:
assert f"tensor<{dtype}, [1000]>(BLOBFILE" in mil_txt
else:
assert f"tensor<{dtype}, [1000]>(BLOBFILE" not in mil_txt

# cleanup
shutil.rmtree(saved_package_path)


class TestMILFlexibleShapes:
@mb.program(input_specs=[mb.TensorSpec(shape=[1, 3, Symbol("H"), Symbol("W")])])
def basic_network(x):
Expand Down Expand Up @@ -991,9 +1052,11 @@ def prog(state, x, y):
prog,
convert_to="mlprogram",
minimum_deployment_target=ct.target.iOS18,
skip_model_load=True,
)

mil = mlmodel.get_spec().mlProgram

for function in mil.functions.values():
for block in function.block_specializations.values():
ops = list(block.operations)
Expand All @@ -1005,6 +1068,50 @@ def prog(state, x, y):
]
assert [val.type for val in ops] == expected_ops

@staticmethod
def test_coreml_update_state_lowering_with_prefer_state_in_downstream():
@mb.program(
input_specs=[
mb.StateTensorSpec((1,), dtype=types.fp16),
mb.TensorSpec((1,), dtype=types.fp16),
mb.TensorSpec((1,), dtype=types.fp16),
mb.TensorSpec((1,), dtype=types.fp16),
],
opset_version=ct.target.iOS18,
)
def prog(state, x, y, z):
# Although seemingly not used, graph pass prefer_state_in_downstream will
# make its output as identiy.x
mb.coreml_update_state(state=state, value=x)
# If value only feeds into coreml_update_state,
# the prefer_state_in_downstream has no affects
mb.coreml_update_state(state=state, value=y)
# This is the one that really is not used
mb.coreml_update_state(state=state, value=z)
return mb.identity(x=x), mb.coreml_update_state(state=state, value=y)

mlmodel = ct.convert(
prog,
convert_to="mlprogram",
minimum_deployment_target=ct.target.iOS18,
skip_model_load=True,
)

mil = mlmodel.get_spec().mlProgram
for function in mil.functions.values():
for block in function.block_specializations.values():
ops = list(block.operations)
expected_ops = [
"write_state",
"read_state",
"write_state",
"write_state",
"identity",
"write_state",
"read_state",
]
assert [val.type for val in ops] == expected_ops

@staticmethod
@pytest.mark.skipif(ct.utils._macos_version() < (15, 0),
reason="State only supported on macOS 15+")
Expand Down
2 changes: 1 addition & 1 deletion coremltools/converters/mil/converter.py
Original file line number Diff line number Diff line change
Expand Up @@ -278,7 +278,7 @@ def mil_convert_to_proto(
# If the client calls `mil_convert` directly, the `pass_pipeline` is None. To keep the
# behaviour same as before, the quantization pass is removed in this situation.
# TODO: rdar://106111553 ([Infra] Quantization Pass is skipped when `mil_convert` is called directly.)
main_pipeline = PassPipeline()
main_pipeline = kwargs.get("pass_pipeline", PassPipeline())
main_pipeline.remove_passes({"common::add_fp16_cast", "common::add_int16_cast"})
frontend_pipeline, backend_pipeline = _construct_other_pipelines(
main_pipeline, convert_from, convert_to
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -115,7 +115,7 @@ def var_constraints(pattern):
if not is_scalar:
conv_weight = pattern.conv.weight.val
passed = passed and (
np.product(scale.shape) == Cout
np.prod(scale.shape) == Cout
or (len(scale.shape) == len(conv_weight.shape) and scale.shape[1] == Cout)
or (len(scale.shape) == len(conv_weight.shape) - 1 and scale.shape[0] == Cout)
)
Expand Down
Loading

0 comments on commit 5e2460f

Please sign in to comment.