Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add unsqueeze and transpose patterns #178

Merged
merged 11 commits into from
Dec 18, 2024
Original file line number Diff line number Diff line change
Expand Up @@ -80,6 +80,8 @@
#include "op/shape_of.hpp"
#include "op/slice.hpp"
#include "op/squeeze.hpp"
#include "op/transpose.hpp"
#include "op/unsqueeze.hpp"
#include "op/binary_eltwise.hpp"
#include "openvino/core/dimension.hpp"
#include "openvino/core/rt_info.hpp"
Expand Down Expand Up @@ -325,6 +327,8 @@ void injectMLIR(std::shared_ptr<ov::Model> model,
manager.register_pass<ShapeOfPattern>();
manager.register_pass<SlicePattern>();
manager.register_pass<SqueezePattern>();
manager.register_pass<TransposePattern>();
manager.register_pass<UnsqueezePattern>();
manager.register_pass<MatMulPattern>();
manager.register_pass<Partitioner>(context, mode, loweringContext);
manager.run_passes(model);
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,54 @@
// Copyright (C) 2018-2024 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//

#include "mlir/Dialect/Linalg/Passes.h"
#include "mlir/Dialect/Tensor/IR/Tensor.h"

#include <openvino/op/transpose.hpp>
#include "openvino/pass/pattern/op/wrap_type.hpp"

#include "transpose.hpp"
#include "../convert_common.hpp"

namespace {

using namespace ov::mlir;

struct ConvertTranspose {
void operator()(ConversionContext& context, NodePtr node) {
auto loc = createLocation(context.context, node);
auto& builder = context.builder();
const auto input = context.getInputs(node)[0];
// TODO: support dynamic inputs
// const auto order = context.getInputs(node)[1];

const auto ov_output_element_type = node->get_output_element_type(0);
const auto ov_output_shape = node->get_output_partial_shape(0);
auto out_type = importTensor(context.context, ov_output_shape, ov_output_element_type);
auto dynamic_dimensions = context.get_dynamic_dimension_values(ov_output_shape);

auto const_order = dynamic_cast<ov::op::v0::Constant*>(node->get_input_node_ptr(1));
assert(const_order && "non-const order not supported");
ov::Coordinate coords = const_order->get_coordinate_val();
SmallVector<int64_t> order(coords.begin(), coords.end());

auto empty = builder.create<tensor::EmptyOp>(loc, out_type, dynamic_dimensions);
auto transpose = builder.create<linalg::TransposeOp>(loc, input, empty, order);
context.addOutputs(node, transpose);
}
};

} // namespace

namespace ov {
namespace mlir {

using namespace ov::pass::pattern;
using namespace ov::op;

TransposePattern::TransposePattern() : MarkPattern(wrap_type<v1::Transpose>({any_input(), any_input()}), ConvertTranspose()) {}

} // namespace mlir
} // namespace ov

Original file line number Diff line number Diff line change
@@ -0,0 +1,23 @@
// Copyright (C) 2018-2024 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//

#pragma once

#include "mlir/IR/Builders.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/IR/Value.h"

#include "../conversion_context.hpp"

namespace ov {
namespace mlir {

class TransposePattern : public MarkPattern {
public:
OPENVINO_RTTI("TransposePattern", "0");
TransposePattern();
};

} // namespace mlir
} // namespace ov
Original file line number Diff line number Diff line change
@@ -0,0 +1,75 @@
// Copyright (C) 2018-2024 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//

#include "mlir/Dialect/Shape/IR/Shape.h"
#include "mlir/Dialect/Linalg/Passes.h"
#include "mlir/Dialect/Tensor/IR/Tensor.h"

#include <openvino/op/constant.hpp>
#include <openvino/op/unsqueeze.hpp>
#include "openvino/pass/pattern/op/wrap_type.hpp"

#include "unsqueeze.hpp"
#include "../convert_common.hpp"


namespace {

using namespace ov::mlir;

struct ConvertUnsqueeze {
void operator()(ConversionContext& context, NodePtr node) {
auto loc = createLocation(context.context, node);
auto& builder = context.builder();
const auto input = context.getInputs(node)[0];
// TODO: support dynamic inputs
// const auto axes = context.getInputs(node)[1];

const auto ov_output_element_type = node->get_output_element_type(0);
const auto ov_input_shape = node->get_input_partial_shape(0);

assert(ov_input_shape.rank().is_static() && "expecting static output shape");

auto const_axes = dynamic_cast<ov::op::v0::Constant*>(node->get_input_node_ptr(1));
assert(const_axes && "non-const axes not supported");
ov::Coordinate coords = const_axes->get_coordinate_val();

// Calculate the resulting shape.
// E.g., for an input tensor<4x2xf32> and axes [0, 2] (tensor<2xi64>) need to build a shape 1x4x1x2
SmallVector<ReassociationIndices> expand_groups;
ReassociationIndices group = ReassociationIndices();
SmallVector<int64_t> shape(coords.size() + ov_input_shape.rank().get_length());
for (size_t input_idx = 0, coord_idx = 0, i = 0; i < shape.size(); ++i) {
group.push_back(i);
if (coord_idx < coords.size() && i == coords[coord_idx]) {
shape[i] = 1;
coord_idx++;
} else {
const auto& dim = ov_input_shape[input_idx];
shape[i] = dim.is_dynamic() ? ShapedType::kDynamic : dim.get_length();
input_idx++;
expand_groups.push_back(group);
group = ReassociationIndices();
}
}
Comment on lines +40 to +55
Copy link
Owner

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Why cannot we just take it as node->get_output_partial_shape(0)? I believe the shape in MLIR should match the shape from OV.

Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Would it be always available? (dynamic shapes?)

Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I'd still need to generate the reassociation groups anyway though

Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I'd still need to generate the reassociation groups anyway though

Copy link
Owner

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

get_output_partial_shape always returns shape, including dynamic shapes, which means a part of the dimensions can be dynamic and you already know how to detect them -- dim.is_dynamic(). But I agree that you still need to build reassociation groups.


auto result_type = RankedTensorType::get(shape, importPrecision(context.context, ov_output_element_type));
auto expand_shape = builder.create<tensor::ExpandShapeOp>(loc, result_type, input, expand_groups);
context.addOutputs(node, expand_shape);
}
};

} // namespace

namespace ov {
namespace mlir {

using namespace ov::pass::pattern;
using namespace ov::op;

UnsqueezePattern::UnsqueezePattern() : MarkPattern(wrap_type<v0::Unsqueeze>({any_input(), any_input()}), ConvertUnsqueeze()) {}

} // namespace mlir
} // namespace ov

Original file line number Diff line number Diff line change
@@ -0,0 +1,24 @@
// Copyright (C) 2018-2024 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//

#pragma once

#include "mlir/IR/Builders.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/IR/Value.h"

#include "../conversion_context.hpp"

namespace ov {
namespace mlir {

class UnsqueezePattern : public MarkPattern {
public:
OPENVINO_RTTI("UnsqueezePattern", "0");
UnsqueezePattern();
};

} // namespace mlir
} // namespace ov

Loading