Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

TPC attach 2 framework #1296

Merged
merged 6 commits into from
Dec 23, 2024
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -9,3 +9,4 @@
OperatorSetConcat= schema.OperatorSetConcat
Fusing = schema.Fusing
TargetPlatformModel = schema.TargetPlatformModel
OperatorSetNames = schema.OperatorSetNames
Original file line number Diff line number Diff line change
Expand Up @@ -61,7 +61,6 @@ class OperatorSetNames(Enum):
OPSET_DROPOUT = "Dropout"
OPSET_SPLIT = "Split"
OPSET_CHUNK = "Chunk"
OPSET_UNBIND = "Unbind"
OPSET_MAXPOOL = "MaxPool"
OPSET_SIZE = "Size"
OPSET_SHAPE = "Shape"
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,47 @@
from typing import Dict, Tuple, List, Any, Optional

from model_compression_toolkit import DefaultDict
from model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema import TargetPlatformModel
from model_compression_toolkit.target_platform_capabilities.target_platform import TargetPlatformCapabilities, \
OperationsSetToLayers


class TpcAttach2Fw:

def __init__(self):
self._opset2layer = None

# we provide attributes mapping that maps each layer type in the operations set
# that has weights attributes with provided quantization config (in the tp model) to
# its framework-specific attribute name.
# note that a DefaultDict should be provided if not all the layer types in the
# operation set are provided separately in the mapping.
self._opset2attr_mapping = None

def attach(self, tpc_model: TargetPlatformModel,
custom_opset2layer: Dict[str, Tuple[List[Any], Optional[Dict[str, DefaultDict]]]] = None
) -> TargetPlatformCapabilities:

tpc = TargetPlatformCapabilities(tpc_model)

with tpc:
for opset_name, operators in self._opset2layer.items():
attr_mapping = self._opset2attr_mapping.get(opset_name)
if attr_mapping is None:
OperationsSetToLayers(opset_name, operators)
else:
OperationsSetToLayers(opset_name, operators, attr_mapping=attr_mapping)

if custom_opset2layer is not None:
for opset_name, operators in custom_opset2layer.items():
if len(operators) == 1:
OperationsSetToLayers(opset_name, operators[0])
elif len(operators) == 2:
OperationsSetToLayers(opset_name, operators[0], attr_mapping=operators[1])
else:
raise ValueError(f"Custom operator set to layer mapping should include up to 2 elements - "
f"a list of layers to attach to the operator and an optional mapping of "
f"attributes names, but given a mapping contains {len(operators)} elements.")

return tpc

Original file line number Diff line number Diff line change
@@ -0,0 +1,106 @@
# Copyright 2024 Sony Semiconductor Israel, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================

import tensorflow as tf
from packaging import version

from model_compression_toolkit.verify_packages import FOUND_SONY_CUSTOM_LAYERS

if FOUND_SONY_CUSTOM_LAYERS:
from sony_custom_layers.keras.object_detection.ssd_post_process import SSDPostProcess

if version.parse(tf.__version__) >= version.parse("2.13"):
from keras.src.layers import Conv2D, DepthwiseConv2D, Dense, Reshape, ZeroPadding2D, Dropout, \
MaxPooling2D, Activation, ReLU, Add, Subtract, Multiply, PReLU, Flatten, Cropping2D, LeakyReLU, Permute, \
Conv2DTranspose, Identity, Concatenate, BatchNormalization, Minimum, Maximum
else:
from keras.layers import Conv2D, DepthwiseConv2D, Dense, Reshape, ZeroPadding2D, Dropout, \
MaxPooling2D, Activation, ReLU, Add, Subtract, Multiply, PReLU, Flatten, Cropping2D, LeakyReLU, Permute, \
Conv2DTranspose, Concatenate, BatchNormalization, Minimum, Maximum

from model_compression_toolkit import DefaultDict
from model_compression_toolkit.target_platform_capabilities.constants import KERNEL_ATTR, BIAS, \
BIAS_ATTR, KERAS_KERNEL, KERAS_DEPTHWISE_KERNEL
from model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema import OperatorSetNames
from model_compression_toolkit.target_platform_capabilities.target_platform import LayerFilterParams
from model_compression_toolkit.target_platform_capabilities.target_platform.targetplatform2framework.attach2fw import \
TpcAttach2Fw


class TpcAttach2Keras(TpcAttach2Fw):
def __init__(self):
super().__init__()

self._opset2layer = {
OperatorSetNames.OPSET_CONV.value: [Conv2D, tf.nn.conv2d],
OperatorSetNames.OPSET_DEPTHWISE_CONV.value: [DepthwiseConv2D, tf.nn.depthwise_conv2d],
OperatorSetNames.OPSET_CONV_TRANSPOSE.value: [Conv2DTranspose, tf.nn.conv2d_transpose],
OperatorSetNames.OPSET_FULLY_CONNECTED.value: [Dense],
OperatorSetNames.OPSET_CONCATENATE.value: [tf.concat, Concatenate],
OperatorSetNames.OPSET_STACK.value: [tf.stack],
OperatorSetNames.OPSET_UNSTACK.value: [tf.unstack],
OperatorSetNames.OPSET_GATHER.value: [tf.gather, tf.compat.v1.gather],
OperatorSetNames.OPSET_EXPAND.value: [],
OperatorSetNames.OPSET_BATCH_NORM.value: [BatchNormalization],
OperatorSetNames.OPSET_RELU.value: [tf.nn.relu, ReLU],
OperatorSetNames.OPSET_RELU6.value: [tf.nn.relu6],
OperatorSetNames.OPSET_LEAKY_RELU.value: [tf.nn.leaky_relu, LeakyReLU],
OperatorSetNames.OPSET_HARD_TANH.value: [LayerFilterParams(Activation, activation="hard_tanh")],
OperatorSetNames.OPSET_ADD.value: [tf.add, Add],
OperatorSetNames.OPSET_SUB.value: [tf.subtract, Subtract],
OperatorSetNames.OPSET_MUL.value: [tf.math.multiply, Multiply],
OperatorSetNames.OPSET_DIV.value: [tf.math.divide, tf.math.truediv],
OperatorSetNames.OPSET_MIN_MAX.value: [tf.math.minimum, tf.math.maximum, Minimum, Maximum],
OperatorSetNames.OPSET_PRELU.value: [PReLU],
OperatorSetNames.OPSET_SWISH.value: [tf.nn.swish, LayerFilterParams(Activation, activation="swish")],
OperatorSetNames.OPSET_SIGMOID.value: [tf.nn.sigmoid, LayerFilterParams(Activation, activation="sigmoid")],
OperatorSetNames.OPSET_TANH.value: [tf.nn.tanh, LayerFilterParams(Activation, activation="tanh")],
OperatorSetNames.OPSET_GELU.value: [tf.nn.gelu, LayerFilterParams(Activation, activation="gelu")],
OperatorSetNames.OPSET_HARDSIGMOID.value: [tf.keras.activations.hard_sigmoid,
LayerFilterParams(Activation, activation="hard_sigmoid")],
OperatorSetNames.OPSET_FLATTEN.value: [Flatten],
OperatorSetNames.OPSET_GET_ITEM.value: [tf.__operators__.getitem],
OperatorSetNames.OPSET_RESHAPE.value: [Reshape, tf.reshape],
OperatorSetNames.OPSET_PERMUTE.value: [Permute],
OperatorSetNames.OPSET_TRANSPOSE.value: [tf.transpose],
OperatorSetNames.OPSET_DROPOUT.value: [Dropout],
OperatorSetNames.OPSET_SPLIT.value: [tf.split],
OperatorSetNames.OPSET_MAXPOOL.value: [MaxPooling2D],
OperatorSetNames.OPSET_SHAPE.value: [tf.shape, tf.compat.v1.shape],
OperatorSetNames.OPSET_EQUAL.value: [tf.math.equal],
OperatorSetNames.OPSET_ARGMAX.value: [tf.math.argmax],
OperatorSetNames.OPSET_TOPK.value: [tf.nn.top_k],
OperatorSetNames.OPSET_FAKE_QUANT_WITH_MIN_MAX_VARS.value: [tf.quantization.fake_quant_with_min_max_vars],
OperatorSetNames.OPSET_COMBINED_NON_MAX_SUPPRESSION.value: [tf.image.combined_non_max_suppression],
OperatorSetNames.OPSET_CROPPING2D.value: [Cropping2D],
OperatorSetNames.OPSET_ZERO_PADDING2d.value: [ZeroPadding2D],
OperatorSetNames.OPSET_CAST.value: [tf.cast],
OperatorSetNames.OPSET_STRIDED_SLICE.value: [tf.strided_slice]
}

if FOUND_SONY_CUSTOM_LAYERS:
self._opset2layer[OperatorSetNames.OPSET_POST_PROCESS] = [SSDPostProcess]

self._opset2attr_mapping = {OperatorSetNames.OPSET_CONV.value: {
KERNEL_ATTR: DefaultDict(default_value=KERAS_KERNEL),
BIAS_ATTR: DefaultDict(default_value=BIAS)},
OperatorSetNames.OPSET_DEPTHWISE_CONV.value: {
KERNEL_ATTR: DefaultDict({
DepthwiseConv2D: KERAS_DEPTHWISE_KERNEL,
tf.nn.depthwise_conv2d: KERAS_DEPTHWISE_KERNEL}, default_value=KERAS_KERNEL),
BIAS_ATTR: DefaultDict(default_value=BIAS)},
OperatorSetNames.OPSET_FULLY_CONNECTED.value: {
KERNEL_ATTR: DefaultDict(default_value=KERAS_KERNEL),
BIAS_ATTR: DefaultDict(default_value=BIAS)}}
Original file line number Diff line number Diff line change
@@ -0,0 +1,89 @@
# Copyright 2024 Sony Semiconductor Israel, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================

import operator

import torch
from torch import add, sub, mul, div, divide, flatten, reshape, split, unsqueeze, dropout, sigmoid, tanh, \
chunk, unbind, topk, gather, equal, transpose, permute, argmax, squeeze, multiply, subtract, minimum, \
maximum
from torch.nn import Conv2d, Linear, ConvTranspose2d, MaxPool2d, BatchNorm2d
from torch.nn import Dropout, Flatten, Hardtanh
from torch.nn import ReLU, ReLU6, PReLU, SiLU, Sigmoid, Tanh, Hardswish, Hardsigmoid, LeakyReLU, GELU
import torch.nn.functional as F
from torch.nn.functional import relu, relu6, prelu, silu, hardtanh, hardswish, hardsigmoid, leaky_relu, gelu

from model_compression_toolkit import DefaultDict
from model_compression_toolkit.target_platform_capabilities.constants import KERNEL_ATTR, PYTORCH_KERNEL, BIAS, \
BIAS_ATTR
from model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema import OperatorSetNames
from model_compression_toolkit.target_platform_capabilities.target_platform import LayerFilterParams
from model_compression_toolkit.target_platform_capabilities.target_platform.targetplatform2framework.attach2fw import \
TpcAttach2Fw


class TpcAttach2Pytorch(TpcAttach2Fw):
def __init__(self):
super().__init__()

self._opset2layer = {
OperatorSetNames.OPSET_CONV.value: [Conv2d],
OperatorSetNames.OPSET_CONV_TRANSPOSE.value: [ConvTranspose2d],
OperatorSetNames.OPSET_FULLY_CONNECTED.value: [Linear],
OperatorSetNames.OPSET_CONCATENATE.value: [torch.cat, torch.concat, torch.concatenate],
OperatorSetNames.OPSET_STACK.value: [torch.stack],
OperatorSetNames.OPSET_UNSTACK.value: [unbind],
OperatorSetNames.OPSET_GATHER.value: [gather],
OperatorSetNames.OPSET_EXPAND.value: [torch.Tensor.expand],
OperatorSetNames.OPSET_BATCH_NORM.value: [BatchNorm2d],
OperatorSetNames.OPSET_RELU.value: [torch.relu, ReLU, relu],
OperatorSetNames.OPSET_RELU6.value: [ReLU6, relu6],
OperatorSetNames.OPSET_LEAKY_RELU.value: [LeakyReLU, leaky_relu],
OperatorSetNames.OPSET_HARD_TANH.value: [LayerFilterParams(Hardtanh, min_val=0),
LayerFilterParams(hardtanh, min_val=0)],
OperatorSetNames.OPSET_ADD.value: [operator.add, add],
OperatorSetNames.OPSET_SUB.value: [operator.sub, sub, subtract],
OperatorSetNames.OPSET_MUL.value: [operator.mul, mul, multiply],
OperatorSetNames.OPSET_DIV.value: [operator.truediv, div, divide],
OperatorSetNames.OPSET_MIN_MAX.value: [minimum, maximum],
OperatorSetNames.OPSET_PRELU.value: [PReLU, prelu],
OperatorSetNames.OPSET_SWISH.value: [SiLU, silu],
OperatorSetNames.OPSET_SIGMOID.value: [Sigmoid, sigmoid, F.sigmoid],
OperatorSetNames.OPSET_TANH.value: [Tanh, tanh, F.tanh],
OperatorSetNames.OPSET_GELU.value: [GELU, gelu],
OperatorSetNames.OPSET_HARDSIGMOID.value: [Hardsigmoid, hardsigmoid],
OperatorSetNames.OPSET_HARDSWISH.value: [Hardswish, hardswish],
OperatorSetNames.OPSET_FLATTEN.value: [Flatten, flatten],
OperatorSetNames.OPSET_GET_ITEM.value: [operator.getitem],
OperatorSetNames.OPSET_RESHAPE.value: [reshape],
OperatorSetNames.OPSET_UNSQUEEZE.value: [unsqueeze],
OperatorSetNames.OPSET_SQUEEZE.value: [squeeze],
OperatorSetNames.OPSET_PERMUTE.value: [permute],
OperatorSetNames.OPSET_TRANSPOSE.value: [transpose],
OperatorSetNames.OPSET_DROPOUT.value: [Dropout, dropout],
OperatorSetNames.OPSET_SPLIT.value: [split],
OperatorSetNames.OPSET_CHUNK.value: [chunk],
OperatorSetNames.OPSET_MAXPOOL.value: [MaxPool2d],
OperatorSetNames.OPSET_SIZE.value: [torch.Tensor.size],
OperatorSetNames.OPSET_SHAPE.value: [torch.Tensor.shape],
OperatorSetNames.OPSET_EQUAL.value: [equal],
OperatorSetNames.OPSET_ARGMAX.value: [argmax],
OperatorSetNames.OPSET_TOPK.value: [topk],
}

pytorch_linear_attr_mapping = {KERNEL_ATTR: DefaultDict(default_value=PYTORCH_KERNEL),
BIAS_ATTR: DefaultDict(default_value=BIAS)}
self._opset2attr_mapping = {OperatorSetNames.OPSET_CONV.value: pytorch_linear_attr_mapping,
OperatorSetNames.OPSET_FULLY_CONNECTED.value: pytorch_linear_attr_mapping}
Loading