From 2fad0153691556082bbed3446ed67cc284aaa510 Mon Sep 17 00:00:00 2001 From: Fripping <15010770306@163.com> Date: Mon, 5 Aug 2024 15:32:35 +0800 Subject: [PATCH 1/8] a --- .../multiary_infer_sym-checkpoint.cc | 1234 ++++ .../.ipynb_checkpoints/ops-checkpoint.yaml | 5027 +++++++++++++++++ 2 files changed, 6261 insertions(+) create mode 100644 paddle/fluid/pir/dialect/operator/interface/infer_symbolic_shape/.ipynb_checkpoints/multiary_infer_sym-checkpoint.cc create mode 100755 paddle/phi/ops/yaml/.ipynb_checkpoints/ops-checkpoint.yaml diff --git a/paddle/fluid/pir/dialect/operator/interface/infer_symbolic_shape/.ipynb_checkpoints/multiary_infer_sym-checkpoint.cc b/paddle/fluid/pir/dialect/operator/interface/infer_symbolic_shape/.ipynb_checkpoints/multiary_infer_sym-checkpoint.cc new file mode 100644 index 0000000000000..5fb88771a0490 --- /dev/null +++ b/paddle/fluid/pir/dialect/operator/interface/infer_symbolic_shape/.ipynb_checkpoints/multiary_infer_sym-checkpoint.cc @@ -0,0 +1,1234 @@ +// Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/common/ddim.h" +#include "paddle/common/layout.h" +#include "paddle/fluid/pir/dialect/operator/interface/infer_symbolic_shape/infer_sym_slice_utils.h" +#include "paddle/fluid/pir/dialect/operator/interface/infer_symbolic_shape/infer_sym_utils.h" +#include "paddle/fluid/pir/dialect/operator/interface/infer_symbolic_shape/multiary_infer_sym.h" +#include "paddle/fluid/pir/dialect/operator/ir/op_attribute.h" + +namespace paddle::dialect { + +bool AccuracyOpInferSymbolicShape( + pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) { + const symbol::ShapeOrDataDimExprs &out_shape = + infer_context->GetShapeOrDataForValue(op->operand_source(0)); + const symbol::ShapeOrDataDimExprs &label_shape = + infer_context->GetShapeOrDataForValue(op->operand_source(2)); + + // Assume indices has same shape as inference, because + // it's the output of topk. + PADDLE_ENFORCE_EQ( + label_shape.shape().size(), + 2UL, + common::errors::InvalidArgument( + "ShapeError: label's dimensions of AccuracyOp must be 2. " + "But received label's dimensions = %d", + label_shape.shape().size())); + + infer_context->AddEqualCstr(label_shape.shape()[1], symbol::DimExpr{1}); + infer_context->AddEqualCstr(out_shape.shape()[0], label_shape.shape()[0]); + + std::vector accuracy_shape = {}; + infer_context->SetShapeOrDataForValue( + op->result(0), + symbol::ShapeOrDataDimExprs{ + symbol::TensorShapeOrDataDimExprs(accuracy_shape)}); + + std::vector correct_shape = {}; + infer_context->SetShapeOrDataForValue( + op->result(1), + symbol::ShapeOrDataDimExprs{ + symbol::TensorShapeOrDataDimExprs(correct_shape)}); + + std::vector total_shape = {}; + infer_context->SetShapeOrDataForValue( + op->result(2), + symbol::ShapeOrDataDimExprs{ + symbol::TensorShapeOrDataDimExprs(total_shape)}); + + return true; +} + +bool AddNOpInferSymbolicShape(pir::Operation *op, + pir::InferSymbolicShapeContext *infer_context) { + const auto &input_list_shape = + infer_context->GetShapeOrDataForValue(op->operand_source(0)); + PADDLE_ENFORCE_EQ( + input_list_shape.isa(), + true, + common::errors::InvalidArgument( + "The type of inputs shape should be TensorListShapeOrDataDimExprs")); + const auto &inputs_shape = + input_list_shape.dyn_cast(); + PADDLE_ENFORCE_GT( + inputs_shape.size(), + 0, + common::errors::InvalidArgument( + "The input tensor X's dimensions of AddNOp " + "should be larger than 0. But received X's dimensions %d.", + inputs_shape.size())); + symbol::TensorShapeOrDataDimExprs candidate_shape = inputs_shape.front(); + for (size_t i = 1; i < inputs_shape.size(); ++i) { + // 0D tensor + if (inputs_shape[i].shape().size() == 0) { + continue; + } + if (candidate_shape.shape().size() == 0) { + candidate_shape = inputs_shape[i]; + continue; + } + for (size_t j = 0; j < candidate_shape.shape().size(); ++j) { + infer_context->AddEqualCstr(candidate_shape.shape()[j], + inputs_shape[i].shape()[j]); + } + } + infer_context->SetShapeOrDataForValue( + op->result(0), symbol::ShapeOrDataDimExprs{candidate_shape}); + + return true; +} + +bool AddmmOpInferSymbolicShape(pir::Operation *op, + pir::InferSymbolicShapeContext *infer_context) { + const auto &input_shape = + infer_context->GetShapeOrDataForValue(op->operand_source(0)); + const auto &x_shape = + infer_context->GetShapeOrDataForValue(op->operand_source(1)); + const auto &y_shape = + infer_context->GetShapeOrDataForValue(op->operand_source(2)); + + auto ndim_input = input_shape.shape().size(); + auto ndim_x = x_shape.shape().size(); + auto ndim_y = y_shape.shape().size(); + + PADDLE_ENFORCE_EQ(ndim_input == 2 || ndim_input == 1, + true, + common::errors::InvalidArgument( + "The input tensor input's dimension must be 2 or 1. " + "But received input's dimension = [%d].", + ndim_input)); + PADDLE_ENFORCE_EQ(ndim_x, + 2, + common::errors::InvalidArgument( + "The input tensor x's dimension must be 2. " + "But received x's dimension = [%d].", + ndim_x)); + PADDLE_ENFORCE_EQ(ndim_y, + 2, + common::errors::InvalidArgument( + "The input tensor y's dimension must be 2. " + "But received y's dimension = [%d].", + ndim_y)); + + std::vector output_shape; + output_shape.push_back(x_shape.shape()[0]); + output_shape.push_back(y_shape.shape()[1]); + + infer_context->SetShapeOrDataForValue( + op->result(0), + symbol::ShapeOrDataDimExprs{ + symbol::TensorShapeOrDataDimExprs(output_shape)}); + + infer_context->AddEqualCstr(x_shape.shape()[1], y_shape.shape()[0]); + + if (ndim_input == 2) { + infer_context->AddBroadcastableCstr(input_shape.shape()[0], + x_shape.shape()[0]); + infer_context->AddBroadcastableCstr(input_shape.shape()[1], + y_shape.shape()[1]); + } else if (ndim_input == 1) { + infer_context->AddBroadcastableCstr(input_shape.shape()[0], + y_shape.shape()[1]); + } + + return true; +} + +bool Addmm_OpInferSymbolicShape(pir::Operation *op, + pir::InferSymbolicShapeContext *infer_context) { + return AddmmOpInferSymbolicShape(op, infer_context); +} + +bool AucOpInferSymbolicShape(pir::Operation *op, + pir::InferSymbolicShapeContext *infer_context) { + const auto &predict_shape = + infer_context->GetShapeOrDataForValue(op->operand_source(0)); + const auto &label_shape = + infer_context->GetShapeOrDataForValue(op->operand_source(1)); + + PADDLE_ENFORCE_GE( + predict_shape.shape().size(), + 2, + common::errors::InvalidArgument( + "The Input(Predict) has not been initialized properly. The " + "shape of Input(Predict) = [%s], the shape size must be " + "greater_equal 2.", + predict_shape.shape())); + + const auto &predict_height = predict_shape.shape()[0]; + const auto &label_height = label_shape.shape()[0]; + + infer_context->AddEqualCstr(predict_height, label_height); + + int num_thresholds = + op->attribute("num_thresholds").data(); + int slide_steps = op->attribute("slide_steps").data(); + + int num_pred_buckets = num_thresholds + 1; + + PADDLE_ENFORCE_GE( + num_pred_buckets, + 1, + common::errors::InvalidArgument("num_thresholds must larger than 1")); + PADDLE_ENFORCE_GE( + slide_steps, + 0, + common::errors::InvalidArgument("slide_steps must be natural number")); + + infer_context->SetShapeOrDataForValue( + op->result(0), + symbol::ShapeOrDataDimExprs{ + symbol::TensorShapeOrDataDimExprs(std::vector{})}); + + if (slide_steps) { + infer_context->SetShapeOrDataForValue( + op->result(1), + symbol::ShapeOrDataDimExprs{ + symbol::TensorShapeOrDataDimExprs(std::vector{ + (1 + slide_steps) * num_pred_buckets + 1})}); + infer_context->SetShapeOrDataForValue( + op->result(2), + symbol::ShapeOrDataDimExprs{ + symbol::TensorShapeOrDataDimExprs(std::vector{ + (1 + slide_steps) * num_pred_buckets + 1})}); + } else { + infer_context->SetShapeOrDataForValue( + op->result(1), + symbol::ShapeOrDataDimExprs{symbol::TensorShapeOrDataDimExprs( + std::vector{1, num_pred_buckets})}); + infer_context->SetShapeOrDataForValue( + op->result(2), + symbol::ShapeOrDataDimExprs{symbol::TensorShapeOrDataDimExprs( + std::vector{1, num_pred_buckets})}); + } + + return true; +} + +bool BatchNormOpInferSymbolicShape( + pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) { + const auto &x_shape_or_data = + infer_context->GetShapeOrDataForValue(op->operand_source(0)); + const auto &scale_shape_or_data = + infer_context->GetShapeOrDataForValue(op->operand_source(3)); + const auto &bias_shape_or_data = + infer_context->GetShapeOrDataForValue(op->operand_source(4)); + + std::vector x_dims = x_shape_or_data.shape(); + + std::string data_layout_str = + op->attribute("data_format").AsString(); + const DataLayout data_layout = common::StringToDataLayout(data_layout_str); + + PADDLE_ENFORCE_GE( + x_dims.size(), + 2, + phi::errors::InvalidArgument( + "ShapeError: the dimension of input " + "X must greater than or equal to 2. But received: the shape of input " + "X = [%s], the dimension of input X =[%d]", + x_dims, + x_dims.size())); + PADDLE_ENFORCE_LE( + x_dims.size(), + 5, + phi::errors::InvalidArgument( + "ShapeError: the dimension of input X " + "must smaller than or equal to 5. But received: the shape of input X " + "= [%s], the dimension of input X = [%d]", + x_dims, + x_dims.size())); + + symbol::DimExpr C = (data_layout == DataLayout::kNCHW) + ? x_dims[1] + : x_dims[x_dims.size() - 1]; + + if (!scale_shape_or_data.isa()) { + std::vector scale_dims = scale_shape_or_data.shape(); + PADDLE_ENFORCE_EQ(scale_dims.size(), + 1UL, + phi::errors::InvalidArgument( + "ShapeError: the dimension of scale must equal to 1." + "But received: the dimension of scale is [%d]", + scale_dims.size())); + infer_context->AddEqualCstr(scale_dims[0], C); + } + + if (!bias_shape_or_data.isa()) { + std::vector bias_dims = bias_shape_or_data.shape(); + PADDLE_ENFORCE_EQ(bias_dims.size(), + 1UL, + phi::errors::InvalidArgument( + "ShapeError: the dimension of bias must equal to 1." + "But received: the dimension of bias is [%d]", + bias_dims.size())); + infer_context->AddEqualCstr(bias_dims[0], C); + } + + // Set output shapes + infer_context->SetShapeOrDataForValue( + op->result(0), + symbol::ShapeOrDataDimExprs{symbol::TensorShapeOrDataDimExprs(x_dims)}); + + std::vector param_dims = {C}; + infer_context->SetShapeOrDataForValue( + op->result(1), + symbol::ShapeOrDataDimExprs{ + symbol::TensorShapeOrDataDimExprs(param_dims)}); + infer_context->SetShapeOrDataForValue( + op->result(2), + symbol::ShapeOrDataDimExprs{ + symbol::TensorShapeOrDataDimExprs(param_dims)}); + + if (op->result(3) && op->result(3).type()) { + infer_context->SetShapeOrDataForValue( + op->result(3), + symbol::ShapeOrDataDimExprs{ + symbol::TensorShapeOrDataDimExprs(param_dims)}); + } + if (op->result(4) && op->result(4).type()) { + infer_context->SetShapeOrDataForValue( + op->result(4), + symbol::ShapeOrDataDimExprs{ + symbol::TensorShapeOrDataDimExprs(param_dims)}); + } + if (op->result(5) && op->result(5).type()) { + std::vector reserve_space_dims{ + symbol::DimExpr{infer_context->GetNextSymName()}}; + infer_context->SetShapeOrDataForValue( + op->result(5), + symbol::ShapeOrDataDimExprs{ + symbol::TensorShapeOrDataDimExprs(reserve_space_dims)}); + } + + return true; +} + +bool BatchNorm_OpInferSymbolicShape( + pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) { + return BatchNormOpInferSymbolicShape(op, infer_context); +} + +bool BicubicInterpOpInferSymbolicShape( + pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) { + const symbol::ShapeOrDataDimExprs &x = + infer_context->GetShapeOrDataForValue(op->operand_source(0)); + + const auto &attributes = op->attributes(); + + const std::string data_format = + attributes.at("data_format").dyn_cast().AsString(); + int out_d = attributes.at("out_d").dyn_cast().data(); + int out_h = attributes.at("out_h").dyn_cast().data(); + int out_w = attributes.at("out_w").dyn_cast().data(); + const std::vector &scale = details::GetVectorAttr(op, "scale"); + + const bool has_size_tensor = [&] { + pir::Value size_tensor = op->operand_source(2); + if (!size_tensor || !size_tensor.type()) { + return false; + } + const auto &list_size_tensor = + size_tensor.type().dyn_cast(); + return list_size_tensor && !list_size_tensor.empty(); + }(); + auto GetSizeTensorDataExpr = + [&](pir::Value value) -> std::vector { + const symbol::ShapeOrDataDimExprs &size_tensor_shape = + infer_context->GetShapeOrDataForValue(value); + PADDLE_ENFORCE_EQ( + size_tensor_shape.isa(), + true, + common::errors::InvalidArgument( + "The size_tensor of Interpolation should be type of " + "TensorListShapeOrDataDimExprs")); + return details::GetOrCreateExprVecFromData(size_tensor_shape, + infer_context); + }; + auto GetOutSizeDataExpr = + [&](pir::Value value) -> std::vector { + const symbol::ShapeOrDataDimExprs &out_size_tensor_shape = + infer_context->GetShapeOrDataForValue(value); + return details::GetOrCreateExprVecFromData(out_size_tensor_shape, + infer_context); + }; + auto GetOutDimByScale = [&](const symbol::DimExpr &in_dim, + float scale) -> symbol::DimExpr { + PADDLE_ENFORCE_GT(scale, + 0, + common::errors::InvalidArgument( + "The scale in Attr(scale) of Operator(interpolate) " + "should be greater than 0, but received value is %d.", + scale)); + if (in_dim.isa()) { + return symbol::DimExpr{ + static_cast(in_dim.dyn_cast() * scale)}; + } + return symbol::DimExpr{infer_context->GetNextSymName()}; + }; + + std::vector size_tensor; + if (out_d != -1) size_tensor.push_back(out_d); + if (out_h != -1) size_tensor.push_back(out_h); + if (out_w != -1) size_tensor.push_back(out_w); + + const DataLayout data_layout = common::StringToDataLayout(data_format); + + if (x.shape().size() == 3) { + // shape check for 1D interpolate for input tensor shape NCHW + if (!size_tensor.empty()) { + // top priority size + std::vector dim_out; + if (data_layout == DataLayout::kNCHW) { + dim_out = {x.shape()[0], x.shape()[1], symbol::DimExpr{out_w}}; + } else { + dim_out = {x.shape()[0], symbol::DimExpr{out_w}, x.shape()[2]}; + } + + symbol::ShapeOrDataDimExprs shape_data{ + symbol::TensorShapeOrDataDimExprs(dim_out)}; + + pir::Value res = op->result(0); + infer_context->SetShapeOrDataForValue(res, shape_data); + return true; + } + + symbol::DimExpr out_w_tmp{0}; + const auto &next_sym = infer_context->GetNextSymName(); + out_w_tmp = symbol::DimExpr(next_sym); + + std::vector dim_out; + if (data_layout == DataLayout::kNCHW) { + dim_out = {x.shape()[0], x.shape()[1], out_w_tmp}; + } else { + dim_out = {x.shape()[0], out_w_tmp, x.shape()[2]}; + } + + symbol::ShapeOrDataDimExprs shape_data{ + symbol::TensorShapeOrDataDimExprs(dim_out)}; + + pir::Value res = op->result(0); + infer_context->SetShapeOrDataForValue(res, shape_data); + return true; + } else if (x.shape().size() == 4) { + // shape check for 2D interpolate for input tensor shape NCHW + auto GetOutHW = [&]() -> std::tuple { + // top priority size + if (has_size_tensor) { + const auto &size_tensor_list_shape = + GetSizeTensorDataExpr(op->operand_source(2)); + PADDLE_ENFORCE_EQ(size_tensor_list_shape.size(), + 2, + common::errors::InvalidArgument( + "The size of size_tensor list should be 2.")); + return std::make_tuple(size_tensor_list_shape.at(0), + size_tensor_list_shape.at(1)); + } + // has out_size tensor + if (op->operand_source(1)) { + const auto &out_size_shape_or_data = + infer_context->GetShapeOrDataForValue(op->operand_source(1)); + PADDLE_ENFORCE_EQ( + out_size_shape_or_data.shape().size(), + 1, + common::errors::InvalidArgument( + "The rank of input out_size tensor should be 1.")); + infer_context->AddEqualCstr(out_size_shape_or_data.shape()[0], + symbol::DimExpr{2}); + const auto &out_size_data = GetOutSizeDataExpr(op->operand_source(1)); + return std::make_tuple(symbol::DimExpr{out_size_data[0]}, + symbol::DimExpr{out_size_data[1]}); + } + // has scale + if (scale.size() == 2) { + float scale_h = scale[0]; + float scale_w = scale[1]; + const auto &in_h = + data_layout == DataLayout::kNCHW ? x.shape()[2] : x.shape()[1]; + const auto &in_w = + data_layout == DataLayout::kNCHW ? x.shape()[3] : x.shape()[2]; + return std::make_tuple(GetOutDimByScale(in_h, scale_h), + GetOutDimByScale(in_w, scale_w)); + } + + return std::make_tuple(symbol::DimExpr{out_h}, symbol::DimExpr{out_w}); + }; + + const std::vector dim_out = [&] { + const auto &[out_h_sym, out_w_sym] = GetOutHW(); + if (data_layout == DataLayout::kNCHW) { + return std::vector{ + x.shape()[0], x.shape()[1], out_h_sym, out_w_sym}; + } else { + return std::vector{ + x.shape()[0], out_h_sym, out_w_sym, x.shape()[3]}; + } + }(); + + symbol::ShapeOrDataDimExprs shape_data{ + symbol::TensorShapeOrDataDimExprs(dim_out)}; + infer_context->SetShapeOrDataForValue(op->result(0), shape_data); + + return true; + } else if (x.shape().size() == 5) { + auto GetOutDHW = + [&]() -> std::tuple { + // top priority size + if (has_size_tensor) { + const auto &size_tensor_list_shape = + GetSizeTensorDataExpr(op->operand_source(2)); + PADDLE_ENFORCE_EQ(size_tensor_list_shape.size(), + 3, + common::errors::InvalidArgument( + "The size of size_tensor list should be 3.")); + return std::make_tuple(size_tensor_list_shape.at(0), + size_tensor_list_shape.at(1), + size_tensor_list_shape.at(2)); + } + // has out_size tensor + if (op->operand_source(1)) { + const auto &out_size_data = GetOutSizeDataExpr(op->operand_source(1)); + return std::make_tuple(symbol::DimExpr{out_size_data[0]}, + symbol::DimExpr{out_size_data[1]}, + symbol::DimExpr{out_size_data[2]}); + } + // has scale + if (scale.size() == 3) { + float scale_d = scale[0]; + float scale_h = scale[1]; + float scale_w = scale[2]; + const auto &in_d = + data_layout == DataLayout::kNCHW ? x.shape()[2] : x.shape()[1]; + const auto &in_h = + data_layout == DataLayout::kNCHW ? x.shape()[3] : x.shape()[2]; + const auto &in_w = + data_layout == DataLayout::kNCHW ? x.shape()[4] : x.shape()[3]; + return std::make_tuple(GetOutDimByScale(in_d, scale_d), + GetOutDimByScale(in_h, scale_h), + GetOutDimByScale(in_w, scale_w)); + } + + return std::make_tuple(symbol::DimExpr{out_d}, + symbol::DimExpr{out_h}, + symbol::DimExpr{out_w}); + }; + + const std::vector dim_out = [&] { + const auto &[out_d_sym, out_h_sym, out_w_sym] = GetOutDHW(); + if (data_layout == DataLayout::kNCHW) { + return std::vector{ + x.shape()[0], x.shape()[1], out_d_sym, out_h_sym, out_w_sym}; + } else { + return std::vector{ + x.shape()[0], out_d_sym, out_h_sym, out_w_sym, x.shape()[4]}; + } + }(); + + symbol::ShapeOrDataDimExprs shape_data{ + symbol::TensorShapeOrDataDimExprs(dim_out)}; + infer_context->SetShapeOrDataForValue(op->result(0), shape_data); + return true; + } else { + PADDLE_THROW( + common::errors::Fatal("Input(X) dimension must be 3, 4 or 5!")); + } + + return true; +} + +bool BilinearOpInferSymbolicShape( + pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) { + const auto &x_shape = + infer_context->GetShapeOrDataForValue(op->operand_source(0)); + const auto &y_shape = + infer_context->GetShapeOrDataForValue(op->operand_source(1)); + const auto &weight_shape = + infer_context->GetShapeOrDataForValue(op->operand_source(2)); + + PADDLE_ENFORCE_EQ( + x_shape.shape().size(), + 2UL, + common::errors::InvalidArgument("The input(X) must be a 2D Tensor.")); + PADDLE_ENFORCE_EQ( + y_shape.shape().size(), + 2UL, + common::errors::InvalidArgument("The input(Y) must be a 2D Tensor.")); + PADDLE_ENFORCE_EQ( + weight_shape.shape().size(), + 3UL, + common::errors::InvalidArgument( + "Expected the input(Weight) is a 3D tensor. But received %dD tensor.", + weight_shape.shape().size())); + + infer_context->AddEqualCstr(x_shape.shape()[0], y_shape.shape()[0]); + + infer_context->AddEqualCstr(x_shape.shape()[1], weight_shape.shape()[1]); + infer_context->AddEqualCstr(y_shape.shape()[1], weight_shape.shape()[2]); + + if (op->operand_source(3)) { // has bias + const auto &bias_shape = + infer_context->GetShapeOrDataForValue(op->operand_source(3)); + PADDLE_ENFORCE_EQ(bias_shape.shape().size(), + 2UL, + common::errors::InvalidArgument( + "The Input(Bias) must be a 2-D tensor with " + "the 2nd dimension fixed to 1 (a row vector).")); + infer_context->AddEqualCstr(bias_shape.shape()[0], symbol::DimExpr{1}); + infer_context->AddEqualCstr(bias_shape.shape()[1], weight_shape.shape()[0]); + } + + infer_context->SetShapeOrDataForValue( + op->result(0), + symbol::ShapeOrDataDimExprs{symbol::TensorShapeOrDataDimExprs( + {x_shape.shape()[0], weight_shape.shape()[0]})}); + + return true; +} + +bool BilinearInterpOpInferSymbolicShape( + pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) { + return BicubicInterpOpInferSymbolicShape(op, infer_context); +} + +bool CrossEntropyWithSoftmaxOpInferSymbolicShape( + pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) { + const symbol::ShapeOrDataDimExprs &input_shape = + infer_context->GetShapeOrDataForValue(op->operand_source(0)); + const symbol::ShapeOrDataDimExprs &index_shape = + infer_context->GetShapeOrDataForValue(op->operand_source(1)); + + const auto &input_dim = input_shape.shape(); + const auto &index_dim = index_shape.shape(); + const auto &attributes = op->attributes(); + int axis = attributes.at("axis").dyn_cast().data(); + if (axis < 0) axis += input_shape.shape().size(); + bool soft_label = + attributes.at("soft_label").dyn_cast().data(); + PADDLE_ENFORCE(!soft_label || input_dim.size() == index_dim.size(), + common::errors::InvalidArgument( + "The input and index should have the same rank when " + "soft_label is true. But received input rank(%d) and " + "index rank(%d)", + input_dim.size(), + index_dim.size())); + + auto softmax_dim = index_dim; + auto out_dim = index_dim; + + if (index_dim.size() == input_dim.size()) { + if (soft_label) { + out_dim[axis] = 1; + } + softmax_dim[axis] = input_dim[axis]; + } else { + softmax_dim.insert(softmax_dim.begin() + axis, input_dim[axis]); + if (soft_label) { + out_dim.insert(out_dim.begin() + axis, 1); + } + } + + infer_context->SetShapeOrDataForValue( + op->result(0), symbol::TensorShapeOrDataDimExprs(softmax_dim)); + infer_context->SetShapeOrDataForValue( + op->result(1), symbol::TensorShapeOrDataDimExprs(out_dim)); + + return true; +} + +bool CrossEntropyWithSoftmax_OpInferSymbolicShape( + pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) { + return CrossEntropyWithSoftmaxOpInferSymbolicShape(op, infer_context); +} + +bool ConcatOpInferSymbolicShape(pir::Operation *op, + pir::InferSymbolicShapeContext *infer_context) { + const auto &axis_expr = + infer_context->GetShapeOrDataForValue(op->operand_source(1)); + if (!axis_expr.data() || !axis_expr.data()->at(0).isa()) { + pir::Value res = op->result(0); + infer_context->SetSymbolForValueByStaticShape(res); + return true; + } + + pir::Value operand_source = op->operand_source(0); + const auto &shape_data_list = + infer_context->GetShapeOrDataForValue(operand_source) + .dyn_cast(); + + size_t rank = shape_data_list.at(0).shape().size(); + const int64_t axis = [&] { + int64_t axis = axis_expr.data()->at(0).dyn_cast(); + return axis >= 0 ? axis : std::max(int64_t(0), int64_t(axis + rank)); + }(); + + if (shape_data_list.at(0).data().has_value()) { + if (rank == 1) { + const auto &s_or_d = + infer_context->GetShapeOrDataForValue(operand_source); + ExprVec data = details::GetExprVecFromData(s_or_d); + + const std::vector shape{std::int64_t(data.size())}; + symbol::ShapeOrDataDimExprs shape_data{ + symbol::TensorShapeOrDataDimExprs(shape, data)}; + pir::Value res = op->result(0); + infer_context->SetShapeOrDataForValue(res, shape_data); + + return true; + } else { + PADDLE_THROW(common::errors::Unimplemented( + op->name() + + " 's InferSymbolicShape can NOT deal with rank > 1 now.")); + } + std::vector data; + data.reserve(shape_data_list.size()); + for (auto &data_elem : shape_data_list) { + data.push_back(data_elem.data().value().at(0)); + } + const std::vector shape{std::int64_t(data.size())}; + symbol::ShapeOrDataDimExprs shape_data{ + symbol::TensorShapeOrDataDimExprs(shape, data)}; + pir::Value res = op->result(0); + infer_context->SetShapeOrDataForValue(res, shape_data); + + return true; + } + + const std::vector &out_dims = [&] { + std::vector out_dims = shape_data_list.at(0).shape(); + for (size_t i = 0; i < rank; ++i) { + if (i != static_cast(axis)) { + details::BuildCstrEqForTensorListAlongAxis( + infer_context, shape_data_list, i); + continue; + } + for (size_t j = 1; j < shape_data_list.size(); ++j) { + out_dims.at(axis) = + out_dims.at(axis) + shape_data_list.at(j).shape().at(axis); + } + } + return out_dims; + }(); + + symbol::ShapeOrDataDimExprs shape_data{ + symbol::TensorShapeOrDataDimExprs(out_dims)}; + + pir::Value res = op->result(0); + infer_context->SetShapeOrDataForValue(res, shape_data); + + return true; +} + +bool FullWithTensorOpInferSymbolicShape( + pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) { + pir::Value operand_source = op->operand_source(1); + const symbol::ShapeOrDataDimExprs &operand_shape_or_data = + infer_context->GetShapeOrDataForValue(operand_source); + + const auto &out_shape = operand_shape_or_data.data().has_value() + ? operand_shape_or_data.data().value() + : operand_shape_or_data.shape(); + + infer_context->SetShapeOrDataForValue( + op->result(0), symbol::TensorShapeOrDataDimExprs(out_shape)); + return true; +} + +bool FlashAttnOpInferSymbolicShape( + pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) { + pir::Value operand_source = op->operand_source(0); + const symbol::ShapeOrDataDimExprs &q = + infer_context->GetShapeOrDataForValue(operand_source); + + const symbol::ShapeOrDataDimExprs &k = + infer_context->GetShapeOrDataForValue(op->operand_source(1)); + + const symbol::ShapeOrDataDimExprs &v = + infer_context->GetShapeOrDataForValue(op->operand_source(2)); + + PADDLE_ENFORCE_EQ(q.shape().size(), + 4, + common::errors::InvalidArgument( + "flash_attn receive input with dim " + "[batch_size, seq_len, num_heads, head_dim]")); + + infer_context->AddEqualCstr(q.shape()[0], k.shape()[0]); + infer_context->AddEqualCstr(q.shape()[0], v.shape()[0]); + infer_context->AddEqualCstr(k.shape()[1], v.shape()[1]); + + if (op->operand_source(4)) { + const symbol::ShapeOrDataDimExprs &attn_mask = + infer_context->GetShapeOrDataForValue(op->operand_source(4)); + infer_context->AddEqualCstr(attn_mask.shape()[0], q.shape()[0]); + infer_context->AddEqualCstr(attn_mask.shape()[2], q.shape()[1]); + infer_context->AddEqualCstr(attn_mask.shape()[3], k.shape()[1]); + } + + std::vector out_shape = q.shape(); + + out_shape.back() = v.shape().back(); + + infer_context->SetShapeOrDataForValue( + op->result(0), symbol::TensorShapeOrDataDimExprs(out_shape)); + + // GPU has round for seqlen, but XPU has not. Here we align with the GPU + // version. + auto round_multiple = [](symbol::DimExpr x) { + auto m = symbol::DimExpr{128}; + auto m_minus_one = symbol::DimExpr{127}; + return (x + m_minus_one) / m * m; + }; + auto batch_size_expr = q.shape()[0]; + auto num_heads_expr = q.shape()[2]; + auto seqlen_q_rounded_expr = round_multiple(q.shape()[1]); + auto seqlen_k_rounded_expr = round_multiple(k.shape()[1]); + if (op->result(1)) { + std::vector softmax_shape{batch_size_expr, + num_heads_expr, + seqlen_q_rounded_expr, + seqlen_k_rounded_expr}; + infer_context->SetShapeOrDataForValue( + op->result(1), symbol::TensorShapeOrDataDimExprs(softmax_shape)); + } + if (op->result(2)) { + std::vector softmax_lse_shape{ + batch_size_expr, num_heads_expr, seqlen_q_rounded_expr}; + infer_context->SetShapeOrDataForValue( + op->result(2), symbol::TensorShapeOrDataDimExprs(softmax_lse_shape)); + } + if (op->result(3)) { + std::vector seed_offset_shape{symbol::DimExpr{2}}; + infer_context->SetShapeOrDataForValue( + op->result(3), symbol::TensorShapeOrDataDimExprs(out_shape)); + } + return true; +} + +bool GroupNormOpInferSymbolicShape( + pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) { + const symbol::ShapeOrDataDimExprs &x_shape = + infer_context->GetShapeOrDataForValue(op->operand_source(0)); + + infer_context->SetShapeOrDataForValue(op->result(0), x_shape); + + const symbol::DimExpr &batch_size = x_shape.shape()[0]; + int groups = op->attribute("groups").data(); + symbol::TensorShapeOrDataDimExprs mean_shape( + std::vector{batch_size, groups}); + if (op->result(1)) { + infer_context->SetShapeOrDataForValue(op->result(1), mean_shape); + } + if (op->result(2)) { + infer_context->SetShapeOrDataForValue(op->result(2), mean_shape); + } + return true; +} + +bool LerpOpInferSymbolicShape(pir::Operation *op, + pir::InferSymbolicShapeContext *infer_context) { + const auto &x_shape_or_data = + infer_context->GetShapeOrDataForValue(op->operand_source(0)); + const auto &y_shape_or_data = + infer_context->GetShapeOrDataForValue(op->operand_source(1)); + const auto &w_shape_or_data = + infer_context->GetShapeOrDataForValue(op->operand_source(2)); + const auto &x_shape = x_shape_or_data.shape(); + const auto &y_shape = y_shape_or_data.shape(); + const auto &w_shape = w_shape_or_data.shape(); + size_t x_ndims = x_shape.size(); + size_t y_ndims = y_shape.size(); + size_t w_ndims = w_shape.size(); + std::vector out1_shape; + std::vector out2_shape; + if (x_ndims > y_ndims) { + out1_shape.assign(x_shape.begin(), x_shape.end()); + } else if (x_ndims < y_ndims) { + out1_shape.assign(y_shape.begin(), y_shape.end()); + } else { + symbol::DimExprBuilder builder; + for (size_t i = 0; i < x_ndims; ++i) { + out1_shape.emplace_back(builder.Broadcast(x_shape[i], y_shape[i])); + infer_context->AddBroadcastableCstr(x_shape[i], y_shape[i]); + } + } + size_t out1_ndims = out1_shape.size(); + if (w_ndims > out1_ndims) { + out2_shape.assign(w_shape.begin(), w_shape.end()); + } else if (w_ndims < out1_ndims) { + out2_shape.assign(out1_shape.begin(), out1_shape.end()); + } else { + symbol::DimExprBuilder builder; + for (size_t i = 0; i < w_ndims; ++i) { + out2_shape.emplace_back(builder.Broadcast(w_shape[i], out1_shape[i])); + infer_context->AddBroadcastableCstr(w_shape[i], out1_shape[i]); + } + } + infer_context->SetShapeOrDataForValue( + op->result(0), + symbol::ShapeOrDataDimExprs{ + symbol::TensorShapeOrDataDimExprs(out2_shape)}); + return true; +} + +bool Lerp_OpInferSymbolicShape(pir::Operation *op, + pir::InferSymbolicShapeContext *infer_context) { + return LerpOpInferSymbolicShape(op, infer_context); +} + +bool LayerNormOpInferSymbolicShape( + pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) { + // Get the shapes of input tensors + const auto &x_shape_or_data = + infer_context->GetShapeOrDataForValue(op->operand_source(0)); + const auto &scale_shape_or_data = + infer_context->GetShapeOrDataForValue(op->operand_source(1)); + const auto &bias_shape_or_data = + infer_context->GetShapeOrDataForValue(op->operand_source(2)); + + std::vector x_dims = x_shape_or_data.shape(); + int begin_norm_axis = + op->attribute("begin_norm_axis").data(); + + // Flatten x_dims to 2D and get dim[1] + symbol::DimExpr matrix_dim_1 = x_dims[begin_norm_axis]; + for (std::size_t i = begin_norm_axis + 1; i < x_dims.size(); ++i) { + matrix_dim_1 = matrix_dim_1 * x_dims[i]; + } + + if (!scale_shape_or_data.isa()) { + std::vector scale_dims = scale_shape_or_data.shape(); + infer_context->AddEqualCstr(scale_dims[0], matrix_dim_1); + } + if (!bias_shape_or_data.isa()) { + std::vector bias_dims = bias_shape_or_data.shape(); + infer_context->AddEqualCstr(bias_dims[0], matrix_dim_1); + } + + // Set output shapes + infer_context->SetShapeOrDataForValue( + op->result(0), + symbol::ShapeOrDataDimExprs{symbol::TensorShapeOrDataDimExprs(x_dims)}); + + // Set mean and variance shapes + std::vector before_norm_dims( + x_dims.begin(), x_dims.begin() + begin_norm_axis); + infer_context->SetShapeOrDataForValue( + op->result(1), + symbol::ShapeOrDataDimExprs{ + symbol::TensorShapeOrDataDimExprs(before_norm_dims)}); + infer_context->SetShapeOrDataForValue( + op->result(2), + symbol::ShapeOrDataDimExprs{ + symbol::TensorShapeOrDataDimExprs(before_norm_dims)}); + + return true; +} + +bool LinspaceOpInferSymbolicShape( + pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) { + const auto &num_shape_or_data = + infer_context->GetShapeOrDataForValue(op->operand_source(2)); + const auto step = [&] { + symbol::DimExpr expr; + if (num_shape_or_data.data().has_value()) { + expr = num_shape_or_data.data().value()[0]; + } else { + expr = num_shape_or_data.shape()[0]; + } + return expr; + }(); + const symbol::ShapeOrDataDimExprs &shape_data = [&] { + std::vector out_dims{step}; + return symbol::ShapeOrDataDimExprs{ + symbol::TensorShapeOrDataDimExprs(out_dims)}; + }(); + infer_context->SetShapeOrDataForValue(op->result(0), shape_data); + return true; +} + +bool LinearInterpOpInferSymbolicShape( + pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) { + return BicubicInterpOpInferSymbolicShape(op, infer_context); +} + +bool LogspaceOpInferSymbolicShape( + pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) { + return LinspaceOpInferSymbolicShape(op, infer_context); +} + +bool NearestInterpOpInferSymbolicShape( + pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) { + return BicubicInterpOpInferSymbolicShape(op, infer_context); +} + +bool MemoryEfficientAttentionOpInferSymbolicShape( + pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) { + const auto &q_shape = + infer_context->GetShapeOrDataForValue(op->operand_source(0)).shape(); + const auto &k_shape = + infer_context->GetShapeOrDataForValue(op->operand_source(1)).shape(); + const auto &v_shape = + infer_context->GetShapeOrDataForValue(op->operand_source(2)).shape(); + PADDLE_ENFORCE_EQ( + q_shape.size(), + 4, + common::errors::InvalidArgument("Query should be a 4-D tensor" + "But received Query dimension(%d)", + q_shape.size())); + PADDLE_ENFORCE_EQ( + k_shape.size(), + 4, + common::errors::InvalidArgument("Key should be a 4-D tensor" + "But received Key dimension(%d)", + k_shape.size())); + PADDLE_ENFORCE_EQ( + v_shape.size(), + 4, + common::errors::InvalidArgument("Value should be a 4-D tensor" + "But received Value dimension(%d)", + v_shape.size())); + + const auto &query_batch_size = q_shape[0]; + const auto &query_seq_length = q_shape[1]; + const auto &query_num_head = q_shape[2]; + const auto &query_head_size = q_shape[3]; + + const auto &key_batch_size = k_shape[0]; + const auto &key_seq_length = k_shape[1]; + const auto &key_num_head = k_shape[2]; + const auto &key_head_size = k_shape[3]; + + const auto &value_batch_size = v_shape[0]; + const auto &value_seq_length = v_shape[1]; + const auto &value_num_head = v_shape[2]; + const auto &value_head_size = v_shape[3]; + + infer_context->AddEqualCstr(query_batch_size, key_batch_size); + infer_context->AddEqualCstr(key_batch_size, value_batch_size); + + infer_context->AddEqualCstr(query_num_head, key_num_head); + infer_context->AddEqualCstr(key_num_head, value_num_head); + + infer_context->AddEqualCstr(query_head_size, key_head_size); + + infer_context->AddEqualCstr(key_seq_length, value_seq_length); + + const std::vector out_dims{ + query_batch_size, query_seq_length, query_num_head, value_head_size}; + const std::vector logsumexp_dims{query_num_head, + query_batch_size}; + const std::vector seed_and_offset_dims{2}; + + infer_context->SetShapeOrDataForValue( + op->result(0), symbol::TensorShapeOrDataDimExprs(out_dims)); + infer_context->SetShapeOrDataForValue( + op->result(1), symbol::TensorShapeOrDataDimExprs(logsumexp_dims)); + infer_context->SetShapeOrDataForValue( + op->result(2), symbol::TensorShapeOrDataDimExprs(seed_and_offset_dims)); + + return true; +} + +bool RoiAlignOpInferSymbolicShape( + pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) { + const auto &x = op->operand_source(0); + const auto &boxes = op->operand_source(1); + + const auto &num_boxes = + infer_context->GetShapeOrDataForValue(boxes).shape()[0]; + symbol::DimExpr channel_num = + infer_context->GetShapeOrDataForValue(x).shape()[1]; + + int32_t out_h = op->attribute("pooled_height").data(); + int32_t out_w = op->attribute("pooled_width").data(); + + std::vector out_dim = {num_boxes, channel_num, out_h, out_w}; + infer_context->SetShapeOrDataForValue( + op->result(0), symbol::TensorShapeOrDataDimExprs(out_dim)); + return true; +} + +bool MeshgridOpInferSymbolicShape( + pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) { + const symbol::TensorListShapeOrDataDimExprs &shape_data_list = + infer_context->GetShapeOrDataForValue(op->operand_source(0)) + .dyn_cast(); + + const symbol::ShapeOrDataDimExprs sym_shape_dim_exprs = [&] { + symbol::TensorListShapeOrDataDimExprs shape_dim_exprs_list; + std::vector vec; + + for (auto &shape_data : shape_data_list) { + if (shape_data.shape().size() == 0) { + vec.emplace_back(1); + } else { + vec.emplace_back(shape_data.shape()[0]); + } + } + + auto shape_dim_exprs = symbol::TensorShapeOrDataDimExprs(vec); + + for (size_t i = 0; i < shape_data_list.size(); i++) { + shape_dim_exprs_list.emplace_back(shape_dim_exprs); + } + + return symbol::ShapeOrDataDimExprs(shape_dim_exprs_list); + }(); + + pir::Value res = op->result(0); + infer_context->SetShapeOrDataForValue(res, sym_shape_dim_exprs); + return true; +} + +bool StackOpInferSymbolicShape(pir::Operation *op, + pir::InferSymbolicShapeContext *infer_context) { + pir::Value operand_source = op->operand_source(0); + + const auto &attributes = op->attributes(); + int axis = attributes.at("axis").dyn_cast().data(); + const symbol::TensorListShapeOrDataDimExprs &shape_data_list = + infer_context->GetShapeOrDataForValue(operand_source) + .dyn_cast(); + + size_t rank = shape_data_list.at(0).shape().size(); + if (axis < 0) axis += rank + 1; + const symbol::ShapeOrDataDimExprs shape_data = [&] { + std::vector result_shape = {}; + std::vector result_data = {}; + const symbol::TensorShapeOrDataDimExprs &x_shape_data = + shape_data_list.at(0); + + const bool data_flag = [&] { + for (const auto &shape_data : shape_data_list) { + if (!shape_data.data().has_value()) { + return false; + } + } + return true; + }(); + + if (data_flag) { + // case 1: data is not empty, eg: shape_data_list = + // [[shape:{3},data:{S0,6,7}],...] + if (axis == 0 && x_shape_data.data().value().size() <= 1) { + for (const auto &shape_data : shape_data_list) { + result_data.emplace_back(shape_data.data().value().at(0)); + } + } else { + PADDLE_THROW(common::errors::Unimplemented( + op->name() + + " 's InferSymbolicShape can NOT deal with data size > 1 now.")); + } + result_shape.emplace_back( + static_cast(shape_data_list.size())); + } else { + // case 2: data is empty, eg: shape_data_list = + // [[shape:{5,6,7},data:{}],...] + for (size_t i = 0; i < rank; ++i) { + details::BuildCstrEqForTensorListAlongAxis( + infer_context, shape_data_list, i); + } + for (const symbol::DimExpr &dim : x_shape_data.shape()) { + result_shape.emplace_back(dim); + } + result_shape.insert(result_shape.begin() + axis, + static_cast(shape_data_list.size())); + } + + if (result_data.empty()) { + return symbol::ShapeOrDataDimExprs( + symbol::TensorShapeOrDataDimExprs(result_shape)); + } + return symbol::ShapeOrDataDimExprs( + symbol::TensorShapeOrDataDimExprs(result_shape, result_data)); + }(); + + pir::Value res = op->result(0); + infer_context->SetShapeOrDataForValue(res, shape_data); + return true; +} + +bool TrilinearInterpOpInferSymbolicShape( + pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) { + return BicubicInterpOpInferSymbolicShape(op, infer_context); +} + +bool WhereOpInferSymbolicShape(pir::Operation *op, + pir::InferSymbolicShapeContext *infer_context) { + infer_context->SetShapeOrDataForValue( + op->result(0), + infer_context->GetShapeOrDataForValue(op->operand_source(0))); + + const std::vector &operands = {op->operand_source(0), + op->operand_source(1)}; + + size_t rank = infer_context->GetShapeOrDataForValue(op->operand_source(0)) + .shape() + .size(); + + for (size_t i = 0; i < rank; ++i) { + paddle::dialect::details::BuildCstrEqForTensorListAlongAxis( + infer_context, operands, i); + } + + return true; +} + +bool Where_OpInferSymbolicShape(pir::Operation *op, + pir::InferSymbolicShapeContext *infer_context) { + return WhereOpInferSymbolicShape(op, infer_context); +} + +bool FakeChannelWiseDequantizeMaxAbsOpInferSymbolicShape( + pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) { + const auto &x_shape_or_data = + infer_context->GetShapeOrDataForValue(op->operand_source(0)); + + int quant_axis = op->attribute("quant_axis").data(); + int x_num_col_dims = + op->attribute("x_num_col_dims").data(); + + PADDLE_ENFORCE_EQ( + quant_axis == 0 || quant_axis == 1, + true, + common::errors::InvalidArgument("'quant_axis' should be 0 or 1, but " + "the received is %d", + quant_axis)); + PADDLE_ENFORCE_EQ(x_num_col_dims == 0, + false, + common::errors::InvalidArgument( + "'x_num_col_dims' should be larger than 0, but " + "the received is %d", + x_num_col_dims)); + + infer_context->SetShapeOrDataForValue( + op->result(0), + symbol::ShapeOrDataDimExprs{ + symbol::TensorShapeOrDataDimExprs(x_shape_or_data.shape())}); + + return true; +} + +} // namespace paddle::dialect diff --git a/paddle/phi/ops/yaml/.ipynb_checkpoints/ops-checkpoint.yaml b/paddle/phi/ops/yaml/.ipynb_checkpoints/ops-checkpoint.yaml new file mode 100755 index 0000000000000..d37c56117f3d5 --- /dev/null +++ b/paddle/phi/ops/yaml/.ipynb_checkpoints/ops-checkpoint.yaml @@ -0,0 +1,5027 @@ +# This file is designed for C++ operators, which manages the +# generated code for dynamic mode and static mode. If you want +# to add the new operator configuration, make sure an operator's +# Python API, dynamic graph API, and static graph Operator parameters +# are consistent and correspond one-to-one. It's forbidden that the +# operator configured in this yaml file does not have Python API. + +- op : abs + args : (Tensor x) + output : Tensor(out) + infer_meta : + func : RealAndImagInferMeta + kernel : + func : abs + data_type : x + inplace: (x -> out) + backward : abs_grad + interfaces : paddle::dialect::InferSymbolicShapeInterface + +- op : accuracy + args : (Tensor x, Tensor indices, Tensor label) + output : Tensor(accuracy), Tensor(correct), Tensor(total) + infer_meta : + func : AccuracyInferMeta + kernel : + func : accuracy + data_type : x + interfaces : paddle::dialect::InferSymbolicShapeInterface + traits : paddle::dialect::ForwardOnlyTrait + +- op : accuracy_check + args : (Tensor x, Tensor y, str fn_name, double rtol=1e-5, double atol=1e-8, bool equal_nan=false) + output : Tensor(out) + infer_meta : + func : ValueCompareInferMeta + param: [x, y] + kernel : + func : accuracy_check + data_type : x + interfaces : paddle::dialect::InferSymbolicShapeInterface + +- op : acos + args : (Tensor x) + output : Tensor(out) + infer_meta : + func : UnchangedInferMeta + kernel : + func : acos + inplace: (x -> out) + backward : acos_grad + interfaces : paddle::dialect::InferSymbolicShapeInterface + +- op : acosh + args : (Tensor x) + output : Tensor(out) + infer_meta : + func : UnchangedInferMeta + kernel : + func : acosh + inplace: (x -> out) + backward : acosh_grad + interfaces : paddle::dialect::InferSymbolicShapeInterface + +- op : adadelta_ + args : (Tensor param, Tensor grad, Tensor avg_squared_grad, Tensor avg_squared_update, + Tensor learning_rate, Tensor master_param, float rho = 0.95f, float epsilon = + 1.0e-6f, bool multi_precision = false) + output : Tensor(param_out), Tensor(moment_out), Tensor(inf_norm_out), Tensor(master_param_out) + infer_meta : + func : AdadeltaInferMeta + kernel : + func : adadelta + data_type : param + optional : master_param, master_param_out + inplace : (param -> param_out), (avg_squared_grad -> moment_out), (avg_squared_update -> inf_norm_out), (master_param -> master_param_out) + +- op : adagrad_ + args : (Tensor param, Tensor grad, Tensor moment, Tensor learning_rate, Tensor master_param, float epsilon = 1.0e-6f, bool multi_precision = false) + output : Tensor(param_out), Tensor(moment_out), Tensor(master_param_out) + infer_meta : + func : AdagradInferMeta + kernel : + func : adagrad {dense, dense, dense, dense, dense -> dense, dense, dense} + adagrad_dense_param_sparse_grad {dense, selected_rows, dense, dense, dense -> dense, dense, dense} + data_type : param + optional : master_param, master_param_out + inplace : (param -> param_out), (moment -> moment_out), (master_param -> master_param_out) + traits : pir::SideEffectTrait + +- op : adam_ + args : (Tensor param, Tensor grad, Tensor learning_rate, Tensor moment1, Tensor moment2, Tensor beta1_pow, Tensor beta2_pow, Tensor master_param, Tensor skip_update, Scalar beta1 = 0.9f, Scalar beta2 = 0.999f, Scalar epsilon = 1.0e-8f, bool lazy_mode = false, int64_t min_row_size_to_use_multithread = 1000, bool multi_precision = false, bool use_global_beta_pow = false) + output : Tensor(param_out), Tensor(moment1_out), Tensor(moment2_out), Tensor(beta1_pow_out), Tensor(beta2_pow_out), Tensor(master_param_out) + infer_meta : + func : AdamInferMeta + spmd_rule : AdamInferSpmdDynamic + kernel : + func : adam {dense, dense, dense, dense, dense, dense, dense, dense, dense -> dense, dense, dense, dense, dense, dense}, + adam_dense_param_sparse_grad {dense, selected_rows, dense, dense, dense, dense, dense, dense, dense -> dense, dense, dense, dense, dense, dense} + data_type : param + optional : master_param, skip_update, master_param_out + inplace : (param -> param_out), (moment1 -> moment1_out), (moment2 -> moment2_out), (beta1_pow -> beta1_pow_out), (beta2_pow -> beta2_pow_out), (master_param -> master_param_out) + traits : pir::SideEffectTrait + +- op : adamax_ + args : (Tensor param, Tensor grad, Tensor learning_rate, Tensor moment, Tensor inf_norm, Tensor beta1_pow, Tensor master_param, float beta1 = 0.9f, float beta2 = 0.999f, float epsilon = 1.0e-8f, bool multi_precision = false) + output : Tensor(param_out), Tensor(moment_out), Tensor(inf_norm_out), Tensor(master_param_out) + infer_meta : + func : AdamaxInferMeta + kernel : + func : adamax + data_type : param + optional : master_param, master_param_out + inplace : (param -> param_out), (moment -> moment_out), (inf_norm -> inf_norm_out), (master_param ->master_param_out) + traits : pir::SideEffectTrait + +- op : adamw_ + args : (Tensor param, Tensor grad, Tensor learning_rate, Tensor moment1, Tensor moment2, Tensor beta1_pow, Tensor beta2_pow, Tensor master_param, Tensor skip_update, Scalar beta1 = 0.9f, Scalar beta2 = 0.999f, Scalar epsilon = 1.0e-8f, float lr_ratio = 1.0f, float coeff = 0.01f, bool with_decay = false, bool lazy_mode = false, int64_t min_row_size_to_use_multithread = 1000, bool multi_precision = false, bool use_global_beta_pow = false) + output : Tensor(param_out), Tensor(moment1_out), Tensor(moment2_out), Tensor(beta1_pow_out), Tensor(beta2_pow_out), Tensor(master_param_out) + infer_meta : + func : AdamwInferMeta + spmd_rule : AdamwInferSpmdDynamic + kernel : + func : adamw + data_type : param + optional : master_param, skip_update, master_param_out + inplace : (param -> param_out), (moment1 -> moment1_out), (moment2 -> moment2_out), (beta1_pow -> beta1_pow_out), (beta2_pow -> beta2_pow_out), (master_param -> master_param_out) + traits : pir::SideEffectTrait + +- op : add_position_encoding + args: (Tensor x, float alpha = 1.0f, float beta = 1.0f) + output: Tensor (out) + infer_meta: + func: AddPositionEncodingInferMeta + kernel: + func: add_position_encoding + data_type: x + backward: add_position_encoding_grad + +- op : addmm + args : (Tensor input, Tensor x, Tensor y, float beta=1.0, float alpha=1.0) + output : Tensor(out) + infer_meta : + func : AddmmInferMeta + kernel : + func : addmm + data_type : x + inplace: (input -> out) + backward : addmm_grad + interfaces : paddle::dialect::InferSymbolicShapeInterface + +- op : affine_channel + args: (Tensor x, Tensor scale, Tensor bias, str data_layout = "AnyLayout") + output: Tensor (out) + infer_meta: + func: AffineChannelInferMeta + kernel: + func: affine_channel + backward: affine_channel_grad + inplace : (x -> out) + +- op : affine_grid + args : (Tensor input, IntArray output_shape={}, bool align_corners=true) + output : Tensor(output) + infer_meta : + func : AffineGridInferMeta + param : [input, output_shape, align_corners] + kernel : + func : affine_grid + param : [input, output_shape, align_corners] + data_type : input + backward : affine_grid_grad + +- op : all + args : (Tensor x, int64_t[] axis={}, bool keepdim=false) + output : Tensor(out) + infer_meta : + func : ReduceInferMeta + spmd_rule : ReductionAllInferSpmdDynamic + kernel : + func : all + traits : paddle::dialect::ForwardOnlyTrait + interfaces : paddle::dialect::InferSymbolicShapeInterface + +- op : all_gather + args : (Tensor x, int ring_id = 0, int nranks=0) + output : Tensor(out) + infer_meta : + func : AllGatherInferMeta + param: [x, nranks] + kernel : + func : all_gather + param: [x, nranks] + +- op : allclose + args : (Tensor x, Tensor y, Scalar(double) rtol=1e-5, Scalar(double) atol=1e-8, bool equal_nan=false) + output : Tensor(out) + infer_meta : + func : AllValueCompareInferMeta + param: [x, y] + kernel : + func : allclose + data_type : x + interfaces : paddle::dialect::InferSymbolicShapeInterface + +- op : amax + args : (Tensor x, int64_t[] axis={}, bool keepdim=false) + output : Tensor(out) + infer_meta : + func : ReduceInferMeta + kernel : + func : amax + backward : amax_grad + interfaces : paddle::dialect::InferSymbolicShapeInterface + +- op : amin + args : (Tensor x, int64_t[] axis={}, bool keepdim=false) + output : Tensor(out) + infer_meta : + func : ReduceInferMeta + kernel : + func : amin + backward : amin_grad + interfaces : paddle::dialect::InferSymbolicShapeInterface + +- op : angle + args : (Tensor x) + output : Tensor + infer_meta : + func : RealAndImagInferMeta + kernel : + func : angle + backward : angle_grad + interfaces : paddle::dialect::InferSymbolicShapeInterface + +- op : any + args : (Tensor x, int64_t[] axis={}, bool keepdim=false) + output : Tensor(out) + infer_meta : + func : ReduceInferMeta + kernel : + func : any + traits : paddle::dialect::ForwardOnlyTrait + interfaces : paddle::dialect::InferSymbolicShapeInterface + +- op : apply_per_channel_scale + args: (Tensor x, Tensor scales) + output: Tensor(out) + infer_meta : + func : ApplyPerChannelScaleInferMeta + kernel : + func : apply_per_channel_scale + data_type : x + +- op : argmax + args : (Tensor x, Scalar(int64_t) axis, bool keepdims = false, bool flatten = false, DataType dtype = DataType::INT64) + output : Tensor(out) + infer_meta : + func : ArgMinMaxInferMeta + spmd_rule : ArgMaxInferSpmdDynamic + kernel : + func : argmax + data_type : x + interfaces : paddle::dialect::InferSymbolicShapeInterface + traits : paddle::dialect::ForwardOnlyTrait + +- op : argmin + args : (Tensor x, Scalar(int64_t) axis, bool keepdims = false, bool flatten = false, DataType dtype = DataType::INT64) + output : Tensor(out) + infer_meta : + func : ArgMinMaxInferMeta + kernel : + func : argmin + data_type : x + interfaces : paddle::dialect::InferSymbolicShapeInterface + traits : paddle::dialect::ForwardOnlyTrait + +- op : argsort + args : (Tensor x, int axis=-1, bool descending=false, bool stable=false) + output : Tensor(out), Tensor(indices) + infer_meta : + func : ArgsortInferMeta + kernel : + func : argsort + backward : argsort_grad + interfaces : paddle::dialect::InferSymbolicShapeInterface + +- op : as_complex + args : (Tensor x) + output : Tensor + infer_meta : + func : AsComplexInferMeta + kernel : + func : as_complex + backward : as_complex_grad + interfaces : paddle::dialect::InferSymbolicShapeInterface + +- op : as_real + args : (Tensor x) + output : Tensor + infer_meta : + func : AsRealInferMeta + kernel : + func : as_real + backward : as_real_grad + interfaces : paddle::dialect::InferSymbolicShapeInterface + +- op : as_strided + args : (Tensor input, int64_t[] dims = {}, int64_t[] stride = {}, int64_t offset = 0) + output : Tensor + infer_meta : + func : StridedUnChangedInferMeta + param : [input] + kernel : + func : as_strided + backward : as_strided_grad + no_need_buffer : input + +- op : asgd_ + args : (Tensor param, Tensor grad, Tensor learning_rate, Tensor d, Tensor y, Tensor n, Tensor master_param, bool multi_precision=false) + output : Tensor(param_out), Tensor(d_out), Tensor(y_out), Tensor(master_param_out) + infer_meta : + func : ASGDInferMeta + kernel : + func : asgd + data_type : param + data_transform : + support_trans_dtype : learning_rate, n + optional : master_param, master_param_out + inplace : (param -> param_out), (d -> d_out), (y -> y_out), (master_param -> master_param_out) + traits : pir::SideEffectTrait + +- op : asin + args : (Tensor x) + output : Tensor(out) + infer_meta : + func : UnchangedInferMeta + kernel : + func : asin + inplace: (x -> out) + backward : asin_grad + interfaces : paddle::dialect::InferSymbolicShapeInterface + +- op : asinh + args : (Tensor x) + output : Tensor(out) + infer_meta : + func : UnchangedInferMeta + kernel : + func : asinh + inplace: (x -> out) + backward : asinh_grad + interfaces : paddle::dialect::InferSymbolicShapeInterface + +- op : assign_out_ + args : (Tensor x, Tensor output) + output : Tensor(out) + infer_meta : + func : UnchangedInferMeta + param : [x] + kernel : + func : assign + param : [x] + inplace : (output -> out) + backward : assign_out__grad + traits : pir::SideEffectTrait + +- op : assign_pos + args : (Tensor x, Tensor cum_count, Tensor eff_num_len) + output : Tensor(out) + infer_meta : + func : AssignPosInferMeta + kernel : + func : assign_pos + +- op : assign_value_ + args : (Tensor output, int[] shape, DataType dtype, Scalar[] values, Place place = {}) + output : Tensor(out) + inplace: (output -> out) + infer_meta : + func : AssignValueInferMeta + param : [shape, dtype] + kernel : + func : assign_value + param : [shape, dtype, values] + data_type : dtype + backend : place > output + interfaces : paddle::dialect::InferSymbolicShapeInterface + traits : paddle::dialect::ForwardOnlyTrait + +- op : atan + args : (Tensor x) + output : Tensor(out) + infer_meta : + func : UnchangedInferMeta + kernel : + func : atan + inplace: (x -> out) + backward : atan_grad + interfaces : paddle::dialect::InferSymbolicShapeInterface + +- op : atan2 + args : (Tensor x, Tensor y) + output : Tensor(out) + infer_meta : + func : Atan2InferMeta + kernel : + func : atan2 + backward : atan2_grad + interfaces : paddle::dialect::InferSymbolicShapeInterface + +- op : atanh + args : (Tensor x) + output : Tensor(out) + infer_meta : + func : UnchangedInferMeta + kernel : + func : atanh + inplace: (x -> out) + backward : atanh_grad + interfaces : paddle::dialect::InferSymbolicShapeInterface + +- op : attention_lstm + args: (Tensor x, Tensor c0, Tensor h0, Tensor attention_weight, Tensor attention_bias, + Tensor attention_scalar, Tensor attention_scalar_bias, Tensor lstm_weight, + Tensor lstm_bias, str gate_activation = "sigmoid", str cell_activation = "tanh", + str candidate_activation = "tanh") + output: Tensor (hidden), Tensor (cell), Tensor (attentioned_x), Tensor (attention_fc_out), + Tensor (lstm_x), Tensor (lstm_out) + infer_meta: + func: AttentionLstmInferMeta + kernel: + func: attention_lstm + data_type: x + optional: h0, attention_bias, attention_scalar, attention_scalar_bias + intermediate: attentioned_x, attention_fc_out, lstm_x, lstm_out + +- op : auc + args : (Tensor x, Tensor label, Tensor stat_pos, Tensor stat_neg, Tensor ins_tag_weight, str curve = "ROC", int num_thresholds = (2 << 12) - 1, int slide_steps = 1) + output : Tensor(auc), Tensor(stat_pos_out), Tensor(stat_neg_out) + infer_meta : + func : AucInferMeta + kernel : + func : auc + data_type : x + optional : ins_tag_weight + interfaces : paddle::dialect::InferSymbolicShapeInterface + +- op : average_accumulates_ + args : (Tensor param, Tensor in_sum_1, Tensor in_sum_2, Tensor in_sum_3, Tensor in_num_accumulates, Tensor in_old_num_accumulates, Tensor in_num_updates, float average_window = 0, int64_t max_average_window = INT64_MAX, int64_t min_average_window = 10000L) + output : Tensor(out_sum_1), Tensor(out_sum_2), Tensor(out_sum_3), Tensor(out_num_accumulates), Tensor(out_old_num_accumulates), Tensor(out_num_updates) + infer_meta: + func : AverageAccumulatesInferMeta + kernel : + func : average_accumulates {dense, dense, dense, dense, dense ,dense, dense -> dense, dense, dense, dense, dense, dense} + data_type : param + inplace : (in_sum_1 -> out_sum_1), (in_sum_2 -> out_sum_2), (in_sum_3 -> out_sum_3), (in_num_accumulates -> out_num_accumulates), (in_old_num_accumulates -> out_old_num_accumulates), (in_num_updates -> out_num_updates) + +- op : batch_fc + args : (Tensor input, Tensor w, Tensor bias) + output : Tensor(out) + infer_meta: + func : BatchFCInferMeta + kernel : + func : batch_fc + data_type: input + backward: batch_fc_grad + +- op : bce_loss + args : (Tensor input, Tensor label) + output : Tensor + infer_meta : + func : BCELossInferMeta + kernel : + func : bce_loss + data_type : input + inplace : (input -> out) + backward : bce_loss_grad + interfaces : paddle::dialect::InferSymbolicShapeInterface + +- op : beam_search + args: (Tensor pre_ids, Tensor pre_scores, Tensor ids, Tensor scores, int level, + int beam_size, int end_id, bool is_accumulated = true) + output: Tensor (selected_ids), Tensor (selected_scores), Tensor (parent_idx) + infer_meta: + func: BeamSearchInferMeta + kernel: + func: beam_search + data_type: pre_ids + optional: ids, parent_idx + +- op : bernoulli + args : (Tensor x) + output : Tensor(out) + infer_meta : + func : UnchangedInferMeta + kernel : + func : bernoulli + interfaces : paddle::dialect::InferSymbolicShapeInterface + traits : paddle::dialect::ForwardOnlyTrait + +- op : bicubic_interp + args : (Tensor x, Tensor out_size, Tensor[] size_tensor, Tensor scale_tensor, str data_format="NCHW", int out_d=0, int out_h=0, int out_w=0, float[] scale={}, str interp_method="bilinear", bool align_corners=true, int align_mode=1) + output : Tensor(output) + infer_meta : + func : InterpolateInferMeta + optional: out_size, size_tensor, scale_tensor + kernel : + func : bicubic_interp + data_type : x + backward : bicubic_interp_grad + data_transform : + skip_transform : out_size, size_tensor, scale_tensor + interfaces : paddle::dialect::InferSymbolicShapeInterface + +- op : bilinear + args : (Tensor x, Tensor y, Tensor weight, Tensor bias) + output : Tensor + infer_meta : + func : BilinearInferMeta + kernel : + func : bilinear + optional : bias + backward : bilinear_grad + interfaces : paddle::dialect::InferSymbolicShapeInterface + +- op : bilinear_interp + args : (Tensor x, Tensor out_size, Tensor[] size_tensor, Tensor scale_tensor, str data_format="NCHW", int out_d=0, int out_h=0, int out_w=0, float[] scale={}, str interp_method="bilinear", bool align_corners=true, int align_mode=1) + output : Tensor(output) + infer_meta : + func : InterpolateInferMeta + optional: out_size, size_tensor, scale_tensor + kernel : + func : bilinear_interp + data_type : x + backward : bilinear_interp_grad + data_transform : + skip_transform : out_size, size_tensor, scale_tensor + interfaces : paddle::dialect::InferSymbolicShapeInterface + +- op : bincount + args: (Tensor x, Tensor weights, Scalar(int) minlength = 0) + output: Tensor(out) + infer_meta: + func: BincountInferMeta + kernel: + func: bincount + optional: weights + +- op : binomial + args : (Tensor count, Tensor prob) + output : Tensor(out) + infer_meta : + func : BinomialInferMeta + kernel : + func : binomial + +- op : bipartite_match + args: (Tensor dist_mat, str match_type = "bipartite", float dist_threshold = 0.5) + output: Tensor (col_to_row_match_indices), Tensor (col_to_row_match_dist) + infer_meta: + func: BipartiteMatchInferMeta + kernel: + func: bipartite_match + data_type: dist_mat + interfaces : paddle::dialect::InferSymbolicShapeInterface + +- op : bitwise_and + args : (Tensor x, Tensor y) + output : Tensor(out) + infer_meta : + func : ElementwiseInferMeta + spmd_rule : ElementwiseBinaryInferSpmd + kernel : + func : bitwise_and + backend : x + inplace: (x -> out) + interfaces : paddle::dialect::InferSymbolicShapeInterface + traits : paddle::dialect::ForwardOnlyTrait + +- op : bitwise_left_shift + args : (Tensor x, Tensor y, bool is_arithmetic = true) + output : Tensor(out) + infer_meta : + func : BitwiseShiftInferMeta + kernel : + func : bitwise_left_shift + backend : x + inplace: (x -> out) + traits : paddle::dialect::ForwardOnlyTrait + +- op : bitwise_not + args : (Tensor x) + output : Tensor(out) + infer_meta : + func : UnchangedInferMeta + spmd_rule : ElementwiseUnaryInferSpmd + kernel : + func : bitwise_not + backend : x + inplace: (x -> out) + interfaces : paddle::dialect::InferSymbolicShapeInterface + traits : paddle::dialect::ForwardOnlyTrait + +- op : bitwise_or + args : (Tensor x, Tensor y) + output : Tensor(out) + infer_meta : + func : ElementwiseInferMeta + kernel : + func : bitwise_or + backend : x + inplace: (x -> out) + traits : paddle::dialect::ForwardOnlyTrait + +- op : bitwise_right_shift + args : (Tensor x, Tensor y, bool is_arithmetic = true) + output : Tensor(out) + infer_meta : + func : BitwiseShiftInferMeta + kernel : + func : bitwise_right_shift + backend : x + inplace: (x -> out) + traits : paddle::dialect::ForwardOnlyTrait + +- op : bitwise_xor + args : (Tensor x, Tensor y) + output : Tensor(out) + infer_meta : + func : ElementwiseInferMeta + kernel : + func : bitwise_xor + backend : x + inplace: (x -> out) + interfaces : paddle::dialect::InferSymbolicShapeInterface + traits : paddle::dialect::ForwardOnlyTrait + +- op : bmm + args : (Tensor x, Tensor y) + output : Tensor + infer_meta : + func : BmmInferMeta + kernel : + func : bmm + backward : bmm_grad + +- op : box_clip + args: (Tensor input, Tensor im_info) + output: Tensor (output) + infer_meta: + func: BoxClipInferMeta + kernel: + func: box_clip + +- op : box_coder + args : (Tensor prior_box, Tensor prior_box_var, Tensor target_box, str code_type = "encode_center_size", bool box_normalized = true, int axis = 0, float[] variance = {}) + output : Tensor(output_box) + infer_meta : + func : BoxCoderInferMeta + kernel : + func : box_coder + optional : prior_box_var + +- op : broadcast_tensors + args: (Tensor[] input) + output: Tensor[]{input.size()} + infer_meta: + func: BroadcastTensorsInferMeta + kernel: + func: broadcast_tensors + data_type : input + backward: broadcast_tensors_grad + +- op : c_allgather + args : (Tensor x, int ring_id, int nranks, bool use_calc_stream) + output : Tensor(out) + infer_meta : + func : AllGatherInferMeta + param: [x, nranks] + kernel : + func : c_allgather + +- op : c_allreduce_max + args : (Tensor x, int ring_id, bool use_calc_stream, bool use_model_parallel) + output : Tensor(out) + infer_meta : + func : AllReduceInferMeta + param : [x] + kernel : + func : c_allreduce_max + inplace : (x -> out) + +- op : c_allreduce_min + args : (Tensor x, int ring_id, bool use_calc_stream, bool use_model_parallel) + output : Tensor(out) + infer_meta : + func : AllReduceInferMeta + param : [x] + kernel : + func : c_allreduce_min + inplace : (x -> out) + +- op : c_allreduce_prod + args : (Tensor x, int ring_id, bool use_calc_stream, bool use_model_parallel) + output : Tensor(out) + infer_meta : + func : AllReduceInferMeta + param : [x] + kernel : + func : c_allreduce_prod + inplace : (x -> out) + +- op : c_allreduce_sum + args : (Tensor x, int ring_id, bool use_calc_stream, bool use_model_parallel) + output : Tensor(out) + infer_meta : + func : AllReduceInferMeta + param : [x] + kernel : + func : c_allreduce_sum + inplace : (x -> out) + +- op : c_broadcast + args : (Tensor x, int ring_id=0, int root=0, bool use_calc_stream=false) + output : Tensor(out) + infer_meta : + func : UnchangedInferMeta + param : [x] + kernel : + func : c_broadcast + inplace : (x -> out) + +- op : c_concat + args : (Tensor x, int rank, int nranks, int ring_id, bool use_calc_stream, bool use_model_parallel) + output : Tensor(out) + infer_meta : + func : CConcatInferMeta + param : [x, nranks] + kernel : + func : c_concat + +- op : c_identity + args : (Tensor x, int ring_id, bool use_calc_stream, bool use_model_parallel) + output : Tensor(out) + infer_meta : + func : CIdentityInferMeta + kernel : + func : c_identity + inplace : (x -> out) + +- op : c_reduce_sum + args : (Tensor x, int ring_id, int root_id, bool use_calc_stream) + output : Tensor(out) + infer_meta : + func : DistReduceInferMeta + param : [x] + kernel : + func : c_reduce_sum + inplace : (x -> out) + +- op : c_sync_calc_stream + args : (Tensor x) + output : Tensor(out) + infer_meta : + func : UnchangedInferMeta + param : [x] + kernel : + func : c_sync_calc_stream + inplace : (x -> out) + +- op : c_sync_comm_stream + args : (Tensor x, int ring_id) + output : Tensor(out) + infer_meta : + func : UnchangedInferMeta + param : [x] + kernel : + func : c_sync_comm_stream + inplace : (x -> out) + +- op : calc_reduced_attn_scores + args : (Tensor q, Tensor k, Tensor softmax_lse) + output : Tensor(reduced_scores) + infer_meta : + func : CalcReducedAttnScoresInferMeta + param : [q, k, softmax_lse] + kernel : + func : calc_reduced_attn_scores + data_type : q + +- op : cast + args : (Tensor x, DataType dtype) + output : Tensor(out) + infer_meta : + func : CastInferMeta + spmd_rule : CastInferSpmd + kernel : + func : cast + param : [x, dtype] + data_type : x + inplace: (x -> out) + backward : cast_grad + interfaces : paddle::dialect::InferSymbolicShapeInterface + +- op : ceil + args : (Tensor x) + output : Tensor(out) + infer_meta : + func : UnchangedInferMeta + kernel : + func : ceil + inplace : (x -> out) + backward : ceil_grad + interfaces : paddle::dialect::InferSymbolicShapeInterface + +- op : celu + args : (Tensor x, float alpha = 1.0) + output : Tensor(out) + infer_meta : + func : UnchangedInferMeta + param: [x] + kernel : + func : celu + backward : celu_grad + interfaces : paddle::dialect::InferSymbolicShapeInterface + +- op : channel_shuffle + args : (Tensor x, int groups, str data_format="NCHW") + output : Tensor(out) + infer_meta : + func : ChannelShuffleInferMeta + kernel : + func : channel_shuffle + backward : channel_shuffle_grad + interfaces : paddle::dialect::InferSymbolicShapeInterface + +- op : check_finite_and_unscale_ + args : (Tensor[] x, Tensor scale) + output : Tensor[](out){x.size()}, Tensor(found_infinite) + infer_meta : + func : CheckFiniteAndUnscaleInferMeta + param : [x, scale] + spmd_rule : CheckFiniteAndUnscaleSpmd + kernel : + func : check_finite_and_unscale + param : [x, scale] + data_type : x + inplace : (x -> out) + +- op : check_numerics + args : (Tensor tensor, str op_type = "", str var_name = "", int check_nan_inf_level = 0, int stack_height_limit = -1, str output_dir = "") + output : Tensor(stats), Tensor(values) + infer_meta : + func : CheckNumericsInferMeta + kernel : + func : check_numerics + +- op : cholesky + args : (Tensor x, bool upper=false) + output : Tensor + infer_meta : + func : CholeskyInferMeta + kernel : + func : cholesky + backward : cholesky_grad + interfaces : paddle::dialect::InferSymbolicShapeInterface + +- op : cholesky_solve + args : (Tensor x, Tensor y, bool upper=false) + output : Tensor + infer_meta : + func : CholeskySolveInferMeta + kernel : + func : cholesky_solve + backward : cholesky_solve_grad + +- op : class_center_sample + args : (Tensor label, int num_classes, int num_samples, int ring_id = 0, int rank = 0, int nranks = 1, bool fix_seed = false, int seed = 0) + output : Tensor(remapped_label), Tensor(sampled_local_class_center) + infer_meta : + func : ClassCenterSampleInferMeta + kernel : + func : class_center_sample + data_type : label + traits : pir::SideEffectTrait + +- op : clip + args : (Tensor x, Scalar(float) min, Scalar(float) max) + output : Tensor(out) + inplace : (x -> out) + infer_meta : + func : UnchangedInferMeta + param : [x] + kernel : + func : clip + data_type : x + backward : clip_grad + interfaces : paddle::dialect::InferSymbolicShapeInterface + +- op : clip_by_norm + args : (Tensor x, float max_norm) + output : Tensor(out) + infer_meta : + func : ClipByNormInferMeta + kernel : + func : clip_by_norm {dense -> dense} + clip_by_norm_sr {selected_rows -> selected_rows} + interfaces : paddle::dialect::InferSymbolicShapeInterface + +- op : coalesce_tensor + args : (Tensor[] input, DataType dtype, bool copy_data = false, bool set_constant = false, bool persist_output = false, float constant = 0.0, bool use_align = true, int align_size = -1, int size_of_dtype = -1, int64_t[] concated_shapes = {}, int64_t[] concated_ranks = {}) + output : Tensor[](output){input.size()}, Tensor(fused_output) + infer_meta : + func : CoalesceTensorInferMeta + kernel : + func : coalesce_tensor + data_type : dtype + +- op : collect_fpn_proposals + args: (Tensor[] multi_level_rois, Tensor[] multi_level_scores, Tensor[] multi_level_rois_num, + int post_nms_topn) + output: Tensor (fpn_rois), Tensor (rois_num) + infer_meta: + func: CollectFpnProposalsInferMeta + kernel: + func: collect_fpn_proposals + data_type: multi_level_rois + optional: multi_level_rois_num, rois_num + +- op : complex + args : (Tensor real, Tensor imag) + output : Tensor + infer_meta : + func : ComplexInferMeta + kernel : + func : complex + data_type : real + backward : complex_grad + interfaces : paddle::dialect::InferSymbolicShapeInterface + +- op : concat + args : (Tensor[] x, Scalar axis=0) + output : Tensor + infer_meta : + func : ConcatInferMeta + param : [x, axis] + spmd_rule : ConcatInferSpmdDynamic + kernel : + func : concat + data_type : x + backward : concat_grad + interfaces : paddle::dialect::InferSymbolicShapeInterface, paddle::dialect::LayoutTransformationInterface + +- op : conj + args : (Tensor x) + output : Tensor (out) + infer_meta : + func : UnchangedInferMeta + kernel : + func : conj + backward : conj_grad + interfaces : paddle::dialect::InferSymbolicShapeInterface + +- op : conv2d + args : (Tensor input, Tensor filter, int[] strides={1, 1}, int[] paddings={0, 0}, str padding_algorithm="EXPLICIT", int[] dilations={1, 1}, int groups=1, str data_format="NCHW") + output : Tensor + infer_meta : + func : ConvInferMeta + kernel : + func : conv2d + data_type : input + backward : conv2d_grad + interfaces : paddle::dialect::InferSymbolicShapeInterface, paddle::dialect::LayoutTransformationInterface + +- op : conv2d_transpose + args : (Tensor x, Tensor filter, int[] strides={1, 1}, int[] paddings={0, 0}, int[] output_padding={}, IntArray output_size={}, str padding_algorithm="EXPLICIT", int groups=1, int[] dilations={1, 1}, str data_format="NCHW") + output : Tensor(out) + infer_meta : + func : Conv2dTransposeInferMeta + kernel : + func : conv2d_transpose + data_type : x + backward : conv2d_transpose_grad + +- op : conv2d_transpose_bias + args : (Tensor x, Tensor filter, Tensor bias, int[] strides={1, 1}, int[] paddings={0, 0}, int[] output_padding={}, IntArray output_size={}, str padding_algorithm="EXPLICIT", int groups=1, int[] dilations={1, 1}, str data_format="NCHW") + output : Tensor(out) + infer_meta : + func : Conv2dTransposeInferMeta + param: [x, filter, strides, paddings, output_padding, output_size, padding_algorithm, groups, dilations, data_format] + kernel : + func : conv2d_transpose_bias + data_type : x + +- op : conv3d + args : (Tensor input, Tensor filter, int[] strides={1, 1, 1}, int[] paddings={0, 0, 0}, str padding_algorithm="EXPLICIT", int groups=1, int[] dilations={1, 1, 1}, str data_format="NCDHW") + output : Tensor + infer_meta : + func : Conv3DInferMeta + kernel : + func : conv3d + data_type : input + backward : conv3d_grad + interfaces : paddle::dialect::InferSymbolicShapeInterface + +- op : conv3d_transpose + args : (Tensor x, Tensor filter, int[] strides={1, 1, 1}, int[] paddings={0, 0, 0}, int[] output_padding={}, int[] output_size={}, str padding_algorithm="EXPLICIT", int groups=1, int[] dilations={1, 1, 1}, str data_format="NCHW") + output : Tensor(out) + infer_meta : + func : ConvTransposeInferMeta + kernel : + func : conv3d_transpose + data_type : x + backward : conv3d_transpose_grad + +- op : copy_to + args : (Tensor x, Place place, bool blocking) + output : Tensor(out) + invoke : copy_to_impl(x, place, blocking) + interfaces : paddle::dialect::InferSymbolicShapeInterface + +- op : copysign + args : (Tensor x, Tensor y) + output : Tensor(out) + infer_meta : + func : ElementwiseInferMeta + kernel : + func : copysign + inplace: (x -> out) + backward : copysign_grad + interfaces : paddle::dialect::InferSymbolicShapeInterface + +- op : correlation + args : (Tensor input1, Tensor input2, int pad_size, int kernel_size, int max_displacement, int stride1, int stride2, int corr_type_multiply=1) + output : Tensor(out) + infer_meta : + func : CorrelationInferMeta + kernel : + func : correlation + data_type : input1 + backward : correlation_grad + +- op : cos + args : (Tensor x) + output : Tensor(out) + infer_meta : + func : UnchangedInferMeta + spmd_rule : ElementwiseUnaryInferSpmd + kernel : + func : cos + inplace: (x -> out) + backward : cos_grad + interfaces : paddle::dialect::InferSymbolicShapeInterface + +- op : cosh + args : (Tensor x) + output : Tensor(out) + infer_meta : + func : UnchangedInferMeta + kernel : + func : cosh + inplace: (x -> out) + backward : cosh_grad + interfaces : paddle::dialect::InferSymbolicShapeInterface + +- op : crf_decoding + args: (Tensor emission, Tensor transition, Tensor label, Tensor length) + output: Tensor (viterbi_path) + infer_meta: + func: CrfDecodingInferMeta + kernel: + func: crf_decoding + data_type: emission + optional: label, length + +- op : crop + args : (Tensor x, IntArray shape = {}, IntArray offsets = {}) + output : Tensor(out) + infer_meta : + func : CropInferMeta + kernel : + func : crop + data_type : x + backward : crop_grad + +- op : cross + args : (Tensor x, Tensor y, int axis = 9) + output : Tensor + infer_meta : + func : CrossInferMeta + kernel : + func : cross + data_type : x + backward : cross_grad + interfaces : paddle::dialect::InferSymbolicShapeInterface + +# Part of python API paddle.nn.functional.cross_entropy +- op : cross_entropy_with_softmax + args : (Tensor input, Tensor label, bool soft_label=false, bool use_softmax=true, bool numeric_stable_mode=true, int ignore_index=-100, int axis=-1) + output : Tensor(softmax), Tensor(loss) + inplace : (input -> softmax) + infer_meta : + func : CrossEntropyWithSoftmaxInferMeta + spmd_rule: CrossEntropyWithSoftmaxInferSpmd + kernel : + func : cross_entropy_with_softmax + data_type : input + backward : cross_entropy_with_softmax_grad + interfaces : paddle::dialect::InferSymbolicShapeInterface + +- op : ctc_align + args: (Tensor input, Tensor input_length, int blank = 0, bool merge_repeated = true, + int padding_value = 0) + output: Tensor (output), Tensor (output_length) + infer_meta: + func: CtcAlignInferMeta + kernel: + func: ctc_align + data_type: input + optional: input_length, output_length + interfaces : paddle::dialect::InferSymbolicShapeInterface + +- op : cudnn_lstm + args: (Tensor x, Tensor init_h, Tensor init_c, Tensor w, Tensor[] weight_list, Tensor sequence_length, float dropout_prob = 0.0, bool is_bidirec = false, int hidden_size = 100, int num_layers = 1, bool is_test = false, int seed = 0) + output: Tensor (out), Tensor (last_h), Tensor (last_c), Tensor (reserve), Tensor (state_out) + infer_meta: + func: CudnnLSTMInferMeta + kernel: + func: cudnn_lstm + data_type: x + optional: w, weight_list, sequence_length + intermediate: reserve + backward: cudnn_lstm_grad + +- op : cummax + args : (Tensor x, int axis=-1, DataType dtype = DataType::INT64) + output : Tensor(out), Tensor(indices) + infer_meta : + func : CumWithIndicesInferMeta + kernel : + func : cummax + data_type : x + backward : cummax_grad + interfaces : paddle::dialect::InferSymbolicShapeInterface + +- op : cummin + args : (Tensor x, int axis=-1, DataType dtype = DataType::INT64) + output : Tensor(out), Tensor(indices) + infer_meta : + func : CumWithIndicesInferMeta + kernel : + func : cummin + data_type : x + backward : cummin_grad + interfaces : paddle::dialect::InferSymbolicShapeInterface + +- op : cumprod + args : (Tensor x, int dim, bool exclusive=false, bool reverse=false) + output : Tensor(out) + infer_meta : + func : UnchangedInferMetaCheckAxis + param : [x, dim] + kernel : + func : cumprod + inplace: (x -> out) + backward : cumprod_grad + interfaces : paddle::dialect::InferSymbolicShapeInterface + +- op : cumsum + args : (Tensor x, Scalar axis=-1, bool flatten=false, bool exclusive=false, bool reverse=false) + output : Tensor(out) + infer_meta : + func : CumScalarAxisInferMeta + kernel : + func : cumsum + data_type : x + inplace: (x -> out) + backward : cumsum_grad + interfaces : paddle::dialect::InferSymbolicShapeInterface + +- op : cvm + args: (Tensor x, Tensor cvm, bool use_cvm = true) + output: Tensor (out) + infer_meta: + func: CvmInferMeta + kernel: + func: cvm + data_type: x + backward: cvm_grad + no_need_buffer: cvm + +- op : data + args : (str name, IntArray shape, DataType dtype, Place place) + output : Tensor(out) + infer_meta : + func : DataInferMeta + param : [name, shape, dtype] + kernel: + func : data + param : [name, shape, dtype] + data_type : dtype + backend : place + interfaces : paddle::dialect::InferSymbolicShapeInterface + +- op : decayed_adagrad + args : (Tensor param, Tensor grad, Tensor moment, Tensor learning_rate, float decay = 0.95f, float epsilon = 1.0e-6f) + output : Tensor(param_out), Tensor(moment_out) + infer_meta : + func : DecayedAdagradInferMeta + kernel : + func : decayed_adagrad + data_type : param + +- op : decode_jpeg + args : (Tensor x, str mode, Place place) + output : Tensor(out) + infer_meta : + func : DecodeJpegInferMeta + param : [x, mode] + kernel : + func : decode_jpeg + param : [x, mode] + backend : place + +- op : deformable_conv + args : (Tensor x, Tensor offset, Tensor filter, Tensor mask, int[] strides, int[] paddings, int[] dilations, int deformable_groups, int groups, int im2col_step) + output : Tensor(out) + infer_meta : + func : DeformableConvInferMeta + kernel : + func : deformable_conv + data_type : x + optional : mask + backward : deformable_conv_grad + +- op : depend + args: (Tensor x, Tensor[] dep) + output: Tensor (out) + infer_meta: + func : UnchangedInferMeta + param : [x] + kernel: + func: depend + +- op : depthwise_conv2d + args : (Tensor input, Tensor filter, int[] strides={1, 1}, int[] paddings={0, 0}, str padding_algorithm="EXPLICIT", int groups=1, int[] dilations={1, 1}, str data_format="NCHW") + output : Tensor(out) + infer_meta : + func : DepthwiseConvInferMeta + kernel : + func : depthwise_conv2d + data_type : input + backward : depthwise_conv2d_grad + +- op : depthwise_conv2d_transpose + args : (Tensor x, Tensor filter, int[] strides={1, 1}, int[] paddings={0, 0}, int[] output_padding={}, IntArray output_size={}, str padding_algorithm="EXPLICIT", int groups=1, int[] dilations={1, 1}, str data_format="NCHW") + output : Tensor(out) + infer_meta : + func : Conv2dTransposeInferMeta + kernel : + func : depthwise_conv2d_transpose + data_type : x + backward : depthwise_conv2d_transpose_grad + +- op : dequantize_abs_max + args : (Tensor x, Tensor scale, float max_range) + output : Tensor(out) + infer_meta : + func : DequantizeAbsMaxInferMeta + kernel : + func : dequantize_abs_max + data_type : x + +- op : dequantize_log + args: (Tensor x, Tensor dict) + output: Tensor(out) + infer_meta: + func: DequantizeLogInferMeta + kernel: + func: dequantize_log + data_type: x + interfaces : paddle::dialect::InferSymbolicShapeInterface + +- op : det + args : (Tensor x) + output : Tensor + infer_meta : + func : UnchangedInferMeta + kernel : + func : determinant + backward : det_grad + +- op : detection_map + args: (Tensor detect_res, Tensor label, Tensor has_state, Tensor pos_count, Tensor + true_pos, Tensor false_pos, int class_num, int background_label = 0, float overlap_threshold + = .5f, bool evaluate_difficult = true, str ap_type = "integral") + output: Tensor (accum_pos_count), Tensor (accum_true_pos), Tensor (accum_false_pos), + Tensor (m_ap) + infer_meta: + func: DetectionMapInferMeta + kernel: + func: detection_map + data_type: detect_res + optional: has_state, pos_count, true_pos, false_pos + +- op : dgc + args : (Tensor u, Tensor v, Tensor grad, Tensor param, Tensor current_step, Tensor nranks, float m=0.9, bool use_nesterov=true, float[] sparsity={}, float rampup_begin_step=0.0, float rampup_step=0.0, float regular_coeff=0.0, int regular_type=0) + output : Tensor(u_out), Tensor(v_out), Tensor(encode_grad), Tensor(grad_out), Tensor(k), Tensor(gather_buff) + infer_meta: + func: DgcInferMeta + param : [u, v, grad, param, current_step, nranks] + kernel : + func : dgc + param : [u, v, grad, param, current_step, nranks, m, use_nesterov, sparsity, rampup_begin_step, rampup_step, regular_coeff, regular_type] + optional: param + data_transform : + skip_transform : current_step, nranks + +- op : dgc_clip_by_norm + args: (Tensor x, Tensor current_step, float max_norm, float rampup_begin_step = -1.0) + output: Tensor(out) + infer_meta: + func: ClipByNormInferMeta + param: [x, max_norm] + kernel: + func: dgc_clip_by_norm {dense, dense -> dense} + dgc_clip_by_norm_sr {selected_rows, dense -> selected_rows} + data_transform : + skip_transform : current_step + +- op : dgc_momentum + args: (Tensor param, Tensor grad, Tensor velocity, Tensor learning_rate, Tensor + master_param, Tensor current_step_tensor, Tensor nranks_tensor, float mu, bool use_nesterov + = false, str regularization_method = "", float regularization_coeff = 0.0f, bool + multi_precision = false, float rescale_grad = 1.0f, float rampup_begin_step = + -1.0) + output: Tensor (param_out), Tensor (velocity_out), Tensor (master_param_out), Tensor + (grad_out) + infer_meta: + func: DGCMomentumInferMeta + kernel: + func: dgc_momentum + data_type: param + optional : master_param, master_param_out + data_transform : + skip_transform : current_step_tensor, nranks_tensor + +- op : diag + args : (Tensor x, int offset = 0, float padding_value = 0.0) + output : Tensor + infer_meta : + func : DiagInferMeta + kernel : + func : diag + backward : diag_grad + +- op : diag_embed + args : (Tensor input, int offset = 0, int dim1 = -2, int dim2 = -1) + output : Tensor(out) + infer_meta : + func : DiagEmbedInferMeta + kernel : + func : diag_embed + interfaces : paddle::dialect::InferSymbolicShapeInterface + traits : paddle::dialect::ForwardOnlyTrait + +- op : diagonal + args : (Tensor x, int offset = 0, int axis1 = 0, int axis2 = 1) + output : Tensor + infer_meta : + func : DiagonalInferMeta + kernel : + func : diagonal + backward : diagonal_grad + interfaces : paddle::dialect::InferSymbolicShapeInterface + +- op : digamma + args : (Tensor x) + output : Tensor(out) + infer_meta : + func : UnchangedInferMeta + kernel : + func : digamma + inplace: (x -> out) + backward : digamma_grad + interfaces : paddle::dialect::InferSymbolicShapeInterface + +- op : dirichlet + args: (Tensor alpha) + output: Tensor(out) + infer_meta: + func: DirichletInferMeta + kernel: + func: dirichlet + interfaces : paddle::dialect::InferSymbolicShapeInterface + +- op : disable_check_model_nan_inf + args: (Tensor x, int flag = 0) + output: Tensor(out) + infer_meta: + func: UnchangedInferMeta + param : [x] + kernel: + func: check_model_nan_inf + data_type: x + backward: disable_check_model_nan_inf_grad + +- op : dist + args : (Tensor x, Tensor y, float p = 2.0) + output : Tensor + infer_meta : + func : DistInferMeta + kernel : + func : dist + backward : dist_grad + +- op : dot + args : (Tensor x, Tensor y) + output : Tensor + infer_meta : + func : DotInferMeta + kernel : + func : dot + data_type : x + backward : dot_grad + +- op : dpsgd + args: (Tensor param, Tensor grad, Tensor learning_rate, float clip = 10.0f, float batch_size = 16.0f, float sigma = 1.0f, int seed = 0) + output: Tensor(param_out) + infer_meta: + func: DpsgdInferMeta + kernel: + func: dpsgd + data_type: param + +- op : dropout + args : (Tensor x, Tensor seed_tensor, Scalar p, bool is_test, str mode, int seed, bool fix_seed) + output : Tensor(out), Tensor(mask) + infer_meta : + func : DropoutInferMeta + kernel : + func : dropout + data_type : x + optional : seed_tensor + intermediate : mask + backward : dropout_grad + traits : pir::SideEffectTrait + +- op : edit_distance + args : (Tensor hyps, Tensor refs, Tensor hypslength, Tensor refslength, bool normalized = false) + output : Tensor(sequencenum), Tensor(out) + infer_meta : + func : EditDistanceInferMeta + kernel : + func : edit_distance + data_type : DataType::FLOAT32 + optional : hypslength, refslength + +- op : eig + args: (Tensor x) + output: Tensor(out_w), Tensor(out_v) + infer_meta: + func: EigInferMeta + kernel: + func: eig + backward: eig_grad + +- op : eigh + args : (Tensor x, str UPLO = "L") + output : Tensor(out_w), Tensor(out_v) + infer_meta : + func : EighInferMeta + kernel : + func : eigh + backward : eigh_grad + interfaces : paddle::dialect::InferSymbolicShapeInterface + +- op : eigvals + args : (Tensor x) + output : Tensor(out) + infer_meta : + func : EigvalsInferMeta + kernel : + func : eigvals + +- op : eigvalsh + args : (Tensor x, str uplo = "L", bool is_test = false) + output : Tensor(eigenvalues), Tensor(eigenvectors) + infer_meta : + func : EigvalshInferMeta + kernel : + func : eigvalsh + data_type : x + backward : eigvalsh_grad + interfaces : paddle::dialect::InferSymbolicShapeInterface + +- op : elu + args : (Tensor x, float alpha = 1.0f) + output : Tensor(out) + infer_meta : + func : UnchangedInferMeta + param : [x] + kernel : + func : elu + inplace : (x -> out) + backward : elu_grad + +- op : empty + args : (IntArray shape, DataType dtype=DataType::FLOAT32, Place place=CPUPlace()) + output: Tensor(out) + infer_meta : + func : CreateInferMeta + param : [shape, dtype] + kernel : + func : empty + param : [shape, dtype] + data_type : dtype + backend : place + interfaces : paddle::dialect::InferSymbolicShapeInterface + +- op : empty_like + args : (Tensor x, DataType dtype = DataType::UNDEFINED, Place place = {}) + output: Tensor(out) + infer_meta : + func : CreateLikeInferMeta + param : [x, dtype] + kernel : + func : empty_like + param : [x, dtype] + data_type : dtype > x + backend : place > x + interfaces : paddle::dialect::InferSymbolicShapeInterface + +- op : enable_check_model_nan_inf + args: (Tensor x, int flag = 1) + output: Tensor(out) + infer_meta: + func: UnchangedInferMeta + param : [x] + kernel: + func: check_model_nan_inf + data_type: x + backward: enable_check_model_nan_inf_grad + interfaces : paddle::dialect::InferSymbolicShapeInterface + +- op : equal_all + args : (Tensor x, Tensor y) + output : Tensor(out) + infer_meta : + func : CompareAllInferMeta + kernel : + func : equal_all + +- op : erf + args : (Tensor x) + output : Tensor(out) + infer_meta : + func : UnchangedInferMeta + kernel : + func : erf + inplace : (x -> out) + backward : erf_grad + interfaces : paddle::dialect::InferSymbolicShapeInterface + +- op : erfinv + args : (Tensor x) + output : Tensor(out) + infer_meta : + func : UnchangedInferMeta + kernel : + func : erfinv + inplace : (x -> out) + backward : erfinv_grad + interfaces : paddle::dialect::InferSymbolicShapeInterface + +- op : exp + args : (Tensor x) + output : Tensor(out) + infer_meta : + func : UnchangedInferMeta + spmd_rule : ElementwiseUnaryInferSpmd + kernel : + func : exp + inplace : (x -> out) + backward : exp_grad + interfaces : paddle::dialect::InferSymbolicShapeInterface + +- op : expand + args : (Tensor x, IntArray shape = {}) + output : Tensor(out) + infer_meta : + func : ExpandInferMeta + kernel : + func : expand + data_type : x + backward : expand_grad + +- op : expand_as + args : (Tensor x, Tensor y, int[] target_shape = {}) + output : Tensor(out) + infer_meta : + func : ExpandAsInferMeta + local_shape: target_shape + kernel : + func : expand_as + data_type : x + optional : y + backward : expand_as_grad + interfaces : paddle::dialect::InferSymbolicShapeInterface + +- op : expm1 + args : (Tensor x) + output : Tensor(out) + infer_meta : + func : UnchangedInferMeta + param : [x] + kernel : + func : expm1 + inplace: (x -> out) + backward : expm1_grad + interfaces : paddle::dialect::InferSymbolicShapeInterface + +- op : exponential_ + args : (Tensor x, float lam) + output : Tensor(out) + infer_meta : + func : UnchangedInferMeta + param : [x] + kernel : + func : exponential + inplace : (x -> out) + backward : exponential__grad + interfaces : paddle::dialect::InferSymbolicShapeInterface + +- op : eye + args : (Scalar num_rows, Scalar num_columns, DataType dtype=DataType::FLOAT32, Place place={}) + output : Tensor(out) + infer_meta : + func : EyeInferMeta + param : [num_rows, num_columns, dtype] + kernel : + func : eye + param : [num_rows, num_columns, dtype] + data_type : dtype + backend : place + +- op : fake_channel_wise_dequantize_max_abs + args : (Tensor x, Tensor[] scales, int[] quant_bits = {8}, int quant_axis = 0, int x_num_col_dims = 1) + output : Tensor(out) + infer_meta : + func : FakeChannelWiseDequantizeMaxAbsInferMeta + kernel : + func : fake_channel_wise_dequantize_max_abs + data_type : x + interfaces : paddle::dialect::InferSymbolicShapeInterface + +- op : fake_channel_wise_quantize_abs_max + args : (Tensor x, int bit_length = 8, int round_type = 1, int quant_axis = 0, bool is_test = false) + output : Tensor(out), Tensor(out_scale) + infer_meta : + func : FakeChannelWiseQuantizeAbsMaxInferMeta + kernel : + func : fake_channel_wise_quantize_abs_max + data_type : x + interfaces : paddle::dialect::InferSymbolicShapeInterface + +- op : fake_channel_wise_quantize_dequantize_abs_max + args : (Tensor x, int bit_length = 8, int round_type = 1, int quant_axis = 0) + output : Tensor(out), Tensor(out_scale) + infer_meta : + func : FakeChannelWiseQuantizeDequantizeAbsMaxInferMeta + kernel : + func : fake_channel_wise_quantize_dequantize_abs_max + data_type : x + backward : fake_channel_wise_quantize_dequantize_abs_max_grad + +- op : fake_dequantize_max_abs + args : (Tensor x, Tensor scale, float max_range) + output : Tensor(out) + infer_meta : + func : FakeDequantizeMaxAbsInferMeta + kernel : + func : fake_dequantize_max_abs + data_type : x + +- op : fake_quantize_abs_max + args : (Tensor x, int bit_length = 8, int round_type = 1) + output : Tensor(out), Tensor(out_scale) + infer_meta : + func : FakeQuantizeAbsMaxInferMeta + kernel : + func : fake_quantize_abs_max + data_type : x + +- op : fake_quantize_dequantize_abs_max + args : (Tensor x, int bit_length = 8, int round_type = 1) + output : Tensor(out), Tensor(out_scale) + infer_meta : + func : FakeQuantizeAbsMaxInferMeta + kernel : + func : fake_quantize_dequantize_abs_max + data_type : x + backward : fake_quantize_dequantize_abs_max_grad + +- op : fake_quantize_dequantize_moving_average_abs_max + args : (Tensor x, Tensor in_scale, Tensor in_accum, Tensor in_state, float moving_rate = 0.9, int bit_length = 8, bool is_test = false, int round_type = 1) + output : Tensor(out), Tensor(out_scale), Tensor(out_state), Tensor(out_accum) + infer_meta : + func : FakeQuantOrWithDequantMovingAverageAbsMaxInferMeta + kernel : + func : fake_quantize_dequantize_moving_average_abs_max + data_type : x + optional : in_accum, in_state, out_state, out_accum + backward : fake_quantize_dequantize_moving_average_abs_max_grad + inplace: (in_scale -> out_scale) + +- op : fake_quantize_moving_average_abs_max + args : (Tensor x, Tensor in_scale, Tensor in_accum, Tensor in_state, float moving_rate = 0.9, int bit_length = 8, bool is_test = false, int round_type = 1) + output : Tensor(out), Tensor(out_scale), Tensor(out_state), Tensor(out_accum) + infer_meta : + func : FakeQuantOrWithDequantMovingAverageAbsMaxInferMeta + kernel : + func : fake_quantize_moving_average_abs_max + data_type : x + optional : in_accum, in_state, out_state, out_accum + inplace: (in_scale -> out_scale) + +- op : fake_quantize_range_abs_max + args : (Tensor x, Tensor in_scale, Tensor iter, int window_size = 10000, int bit_length = 8, bool is_test = false, int round_type = 1) + output : Tensor(out), Tensor(out_scale), Tensor(out_scales) + infer_meta : + func : FakeQuantizeRangeAbsMaxInferMeta + kernel : + func : fake_quantize_range_abs_max + data_type : x + optional : iter, out_scales + inplace: (in_scale -> out_scale) + +- op : fft_c2c + args : (Tensor x, int64_t[] axes, str normalization, bool forward) + output : Tensor + infer_meta : + func : FFTC2CInferMeta + kernel : + func : fft_c2c + backward : fft_c2c_grad + interfaces : paddle::dialect::InferSymbolicShapeInterface + +- op : fft_c2r + args : (Tensor x, int64_t[] axes, str normalization, bool forward, int64_t last_dim_size=0L) + output : Tensor + infer_meta : + func : FFTC2RInferMeta + kernel : + func : fft_c2r + backward : fft_c2r_grad + interfaces : paddle::dialect::InferSymbolicShapeInterface + +- op : fft_r2c + args : (Tensor x, int64_t[] axes, str normalization, bool forward, bool onesided) + output : Tensor + infer_meta : + func : FFTR2CInferMeta + kernel : + func : fft_r2c + backward : fft_r2c_grad + interfaces : paddle::dialect::InferSymbolicShapeInterface + +- op : fill + args : (Tensor x, Scalar(double) value=0) + output : Tensor(out) + infer_meta : + func : UnchangedInferMeta + param : [x] + kernel : + func : fill + inplace : (x -> out) + backward: fill_grad + interfaces : paddle::dialect::InferSymbolicShapeInterface + +- op : fill_diagonal + args : (Tensor x, float value=0, int offset=0, bool wrap=false) + output : Tensor(out) + infer_meta : + func : FillDiagonalInferMeta + kernel : + func : fill_diagonal + data_type : x + inplace : (x -> out) + backward : fill_diagonal_grad + interfaces : paddle::dialect::InferSymbolicShapeInterface + +- op : fill_diagonal_tensor + args : (Tensor x, Tensor y, int64_t offset = 0, int dim1 = 0, int dim2 = 1) + output : Tensor(out) + infer_meta : + func : FillDiagonalTensorInferMeta + kernel : + func : fill_diagonal_tensor + inplace : (x -> out) + backward : fill_diagonal_tensor_grad + interfaces : paddle::dialect::InferSymbolicShapeInterface + +- op : flash_attn + args : (Tensor q, Tensor k, Tensor v, Tensor fixed_seed_offset, Tensor attn_mask, float dropout = 0.0, bool causal = false, bool return_softmax = false, bool is_test = false, str rng_name = "") + output : Tensor(out), Tensor(softmax), Tensor(softmax_lse), Tensor(seed_offset) + optional : fixed_seed_offset, attn_mask + infer_meta : + func : FlashAttnInferMeta + param : [q, k, v] + spmd_rule : FlashAttInferSpmd + kernel : + func : flash_attn + data_type : q + backward : flash_attn_grad + interfaces : paddle::dialect::InferSymbolicShapeInterface + +- op : flash_attn_qkvpacked + args : (Tensor qkv, Tensor fixed_seed_offset, Tensor attn_mask, float dropout = 0.0, bool causal = false, bool return_softmax = false, bool is_test = false, str rng_name = "") + output : Tensor(out), Tensor(softmax), Tensor(softmax_lse), Tensor(seed_offset) + optional : fixed_seed_offset, attn_mask + infer_meta : + func : FlashAttnQKVPackedInferMeta + param : [qkv] + kernel : + func : flash_attn_qkvpacked + data_type : qkv + backward : flash_attn_qkvpacked_grad + +- op : flash_attn_unpadded + args : (Tensor q, Tensor k, Tensor v, Tensor cu_seqlens_q, Tensor cu_seqlens_k, Tensor fixed_seed_offset, Tensor attn_mask, int64_t max_seqlen_q, int64_t max_seqlen_k, float scale, float dropout = 0.0, bool causal = false, bool return_softmax = false, bool is_test = false, str rng_name = "") + output : Tensor(out), Tensor(softmax), Tensor(softmax_lse), Tensor(seed_offset) + optional : fixed_seed_offset , attn_mask + infer_meta : + func : FlashAttnInferMeta + param : [q, k, v] + kernel : + func : flash_attn_unpadded + data_type : q + intermediate : softmax_lse, seed_offset + backward : flash_attn_unpadded_grad + +- op : flash_attn_varlen_qkvpacked + args : (Tensor qkv, Tensor cu_seqlens_q, Tensor cu_seqlens_k, Tensor fixed_seed_offset, Tensor attn_mask, int64_t max_seqlen_q, int64_t max_seqlen_k, float scale, float dropout = 0.0, bool causal = false, bool return_softmax = false, bool is_test = false, str rng_name = "", bool varlen_padded = true) + output : Tensor(out), Tensor(softmax), Tensor(softmax_lse), Tensor(seed_offset) + optional : fixed_seed_offset , attn_mask + infer_meta : + func : FlashAttnQKVPackedInferMeta + param : [qkv] + kernel : + func : flash_attn_varlen_qkvpacked + data_type : qkv + intermediate : softmax_lse, seed_offset + backward : flash_attn_varlen_qkvpacked_grad + +- op : flash_attn_with_sparse_mask + args : (Tensor q, Tensor k, Tensor v, Tensor attn_mask_start_row_indices, Tensor fixed_seed_offset, float dropout = 0.0, bool causal = false, int attn_mask_start_row = 0, bool return_softmax = false, bool is_test = false, str rng_name = "") + output : Tensor(out), Tensor(softmax), Tensor(softmax_lse), Tensor(seed_offset) + optional : fixed_seed_offset + infer_meta : + func : FlashAttnInferMeta + param : [q, k, v] + kernel : + func : flash_attn_with_sparse_mask + data_type : q + backward : flash_attn_with_sparse_mask_grad + +- op : flatten + args : (Tensor x, int start_axis = 1, int stop_axis = 1) + output : Tensor(out), Tensor(xshape) + infer_meta : + func : FlattenWithXShapeInferMeta + spmd_rule : FlattenInferSpmd + kernel : + func : flatten + data_type : x + inplace : (x -> out) + view : (x -> out) + intermediate : xshape + backward : flatten_grad + interfaces : paddle::dialect::InferSymbolicShapeInterface + +- op : flip + args : (Tensor x, int[] axis) + output : Tensor (out) + infer_meta : + func : FlipInferMeta + kernel : + func : flip + backward : flip_grad + interfaces : paddle::dialect::InferSymbolicShapeInterface + +- op : floor + args : (Tensor x) + output : Tensor(out) + infer_meta : + func : UnchangedInferMeta + kernel : + func : floor + inplace : (x -> out) + backward : floor_grad + interfaces : paddle::dialect::InferSymbolicShapeInterface + +- op : fmax + args : (Tensor x, Tensor y) + output : Tensor(out) + infer_meta : + param: [x, y] + func : ElementwiseInferMeta + kernel : + func : fmax + backward : fmax_grad + interfaces : paddle::dialect::InferSymbolicShapeInterface + +- op : fmin + args : (Tensor x, Tensor y) + output : Tensor(out) + infer_meta : + func : ElementwiseInferMeta + param: [x, y] + kernel : + func : fmin + backward : fmin_grad + interfaces : paddle::dialect::InferSymbolicShapeInterface + +- op : fold + args: (Tensor x, int[] output_sizes, int[] kernel_sizes, int[] strides, int[] paddings, int[] dilations) + output: Tensor(out) + infer_meta: + func: FoldInferMeta + kernel: + func: fold + backward: fold_grad + +- op : fractional_max_pool2d + args : (Tensor x, int[] output_size, int[] kernel_size = {0, 0}, float random_u = 0.0, bool return_mask = true) + output : Tensor(out), Tensor(mask) + infer_meta : + func : FractionalMaxPoolInferMeta + kernel : + func : fractional_max_pool2d + backward : fractional_max_pool2d_grad + +- op : fractional_max_pool3d + args : (Tensor x, int[] output_size, int[] kernel_size = {0, 0, 0}, float random_u = 0.0, bool return_mask = true) + output : Tensor(out), Tensor(mask) + infer_meta : + func : FractionalMaxPoolInferMeta + kernel : + func : fractional_max_pool3d + backward : fractional_max_pool3d_grad + +- op : frame + args : (Tensor x, int frame_length, int hop_length, int axis=-1) + output : Tensor(out) + infer_meta : + func : FrameInferMeta + kernel : + func : frame + backward : frame_grad + +- op : frobenius_norm + args : (Tensor x, IntArray axis, bool keep_dim, bool reduce_all) + output : Tensor(out) + infer_meta : + func : ReduceIntArrayAxisInferMetaBase + kernel : + func : frobenius_norm + backward : frobenius_norm_grad + +- op : ftrl + args: (Tensor param, Tensor squared_accumulator, Tensor linear_accumulator, Tensor grad, Tensor learning_rate, float l1=0.0f, float l2=0.0f, float lr_power=-0.5f) + output: Tensor(param_out), Tensor(squared_accum_out), Tensor(linear_accum_out) + infer_meta: + func: FtrlInferMeta + kernel: + func: ftrl {dense, dense, dense, dense, dense -> dense, dense, dense} + ftrl_sr {dense, dense, dense, selected_rows, dense -> dense, dense, dense} + data_type: param + +- op : full + args : (IntArray shape, Scalar(double) value, DataType dtype=DataType::FLOAT32, Place place=CPUPlace()) + output: Tensor(out) + infer_meta : + func : CreateInferMeta + param : [shape, dtype] + kernel : + func : full + param : [shape, value, dtype] + data_type : dtype + backend : place + interfaces : paddle::dialect::InferSymbolicShapeInterface + +- op : full_ + args : (Tensor output, IntArray shape, Scalar(double) value, DataType dtype=DataType::FLOAT32, Place place=CPUPlace()) + output : Tensor(out) + inplace : (output -> out) + infer_meta : + func : CreateInferMeta + param : [shape, dtype] + kernel : + func : full + param : [shape, value, dtype] + data_type : dtype + backend : place + +- op : full_batch_size_like + args : (Tensor input, int[] shape, DataType dtype, Scalar(double) value, int input_dim_idx, int output_dim_idx, Place place=CPUPlace()) + output: Tensor(out) + infer_meta : + func : FullBatchSizeLikeInferMeta + param : [input, shape, value, dtype, input_dim_idx, output_dim_idx] + kernel : + func : full_batch_size_like + param : [input, shape, value, dtype, input_dim_idx, output_dim_idx] + data_type : dtype + backend : place + +- op : full_int_array + args : (int64_t[] value, DataType dtype=DataType::FLOAT32, Place place=CPUPlace()) + output: Tensor(out) + infer_meta : + func : CreateVecShapeInferMeta + param : [value, dtype] + kernel : + func : full_int_array + param : [value, dtype] + data_type : dtype + backend : place + interfaces : paddle::dialect::InferSymbolicShapeInterface + +- op : full_like + args : (Tensor x, Scalar value, DataType dtype = DataType::UNDEFINED, Place place = {}) + output: Tensor(out) + infer_meta : + func : CreateLikeInferMeta + param : [x, dtype] + spmd_rule : FullLikeInferSpmd + kernel : + func : full_like + param : [x, value, dtype] + data_type : dtype > x + backend : place > x + data_transform : + skip_transform : x + traits : paddle::dialect::ForwardOnlyTrait + interfaces : paddle::dialect::InferSymbolicShapeInterface + +- op : full_with_tensor + args : (Tensor value, IntArray shape, DataType dtype=DataType::FLOAT32) + output: Tensor(out) + infer_meta : + func : FullWithTensorInferMeta + param : [shape, dtype] + kernel : + func : full_with_tensor + data_type : dtype + interfaces : paddle::dialect::InferSymbolicShapeInterface + traits : paddle::dialect::ForwardOnlyTrait + +- op : fused_batch_norm_act + args : (Tensor x, Tensor scale, Tensor bias, Tensor mean, Tensor variance, float momentum, float epsilon, str act_type) + output : Tensor(out), Tensor(mean_out), Tensor(variance_out), Tensor(saved_mean), Tensor(saved_variance), Tensor(reserve_space) + infer_meta: + func : FusedBatchNormActInferMeta + param : [x, scale, bias, mean, variance] + kernel : + func : fused_batch_norm_act + data_type : x + view : (mean -> mean_out), (variance -> variance_out) + backward : fused_batch_norm_act_grad + +- op : fused_bn_add_activation + args : (Tensor x, Tensor z, Tensor scale, Tensor bias, Tensor mean, Tensor variance, float momentum, float epsilon, str act_type) + output : Tensor(out), Tensor(mean_out), Tensor(variance_out), Tensor(saved_mean), Tensor(saved_variance), Tensor(reserve_space) + infer_meta: + func : FusedBatchNormActInferMeta + param : [x, scale, bias, mean, variance] + kernel : + func : fused_bn_add_activation + data_type : x + view : (mean -> mean_out), (variance -> variance_out) + backward : fused_bn_add_activation_grad + +- op : fused_multi_transformer + args : (Tensor x, Tensor[] ln_scales, Tensor[] ln_biases, Tensor[] qkv_weights, Tensor[] qkv_biases, Tensor[] cache_kvs, Tensor[] pre_caches, Tensor rotary_tensor, Tensor beam_offset, Tensor time_step, Tensor seq_lengths, Tensor src_mask, Tensor[] out_linear_weights, Tensor[] out_linear_biases, Tensor[] ffn_ln_scales, Tensor[] ffn_ln_biases, Tensor[] ffn1_weights, Tensor[] ffn1_biases, Tensor[] ffn2_weights, Tensor[] ffn2_biases, bool pre_layer_norm = true, float epsilon = 1e-5, float residual_alpha = 1.0f, float dropout_rate = .5f, int rotary_emb_dims = 0, bool is_test = false, str dropout_implementation = "downgrade_in_infer", str act_method = "gelu", bool trans_qkvw = true, int ring_id = -1, str norm_type = "layernorm", bool use_neox_rotary_style=true, int gqa_group_size=-1) + optional : qkv_biases, cache_kvs, pre_caches, rotary_tensor, beam_offset, time_step, seq_lengths, src_mask, out_linear_biases, ffn1_biases, ffn2_biases, cache_kv_outs + output : Tensor[](cache_kv_outs){out_linear_weights.size()}, Tensor(out) + infer_meta : + func : FusedMultiTransformerInferMeta + kernel : + func : fused_multi_transformer + data_type : x + +- op : fused_softmax_mask + args : (Tensor x, Tensor mask) + output : Tensor(out) + infer_meta : + func : SoftmaxMaskFuseInferMeta + kernel : + func : fused_softmax_mask + data_type : x + backward: fused_softmax_mask_grad + +- op : fused_softmax_mask_upper_triangle + args : (Tensor X) + output : Tensor(Out) + infer_meta : + func : UnchangedInferMeta + kernel: + func : fused_softmax_mask_upper_triangle + backward: fused_softmax_mask_upper_triangle_grad + +- op : gammaincc + args : (Tensor x, Tensor y) + output : Tensor(out) + infer_meta : + func : ElementwiseInferMeta + param : [x, y] + kernel : + func : gammaincc + inplace: (x -> out) + backward : gammaincc_grad + +- op : gammaln + args : (Tensor x) + output : Tensor(out) + infer_meta : + func : UnchangedInferMeta + kernel : + func : gammaln + inplace: (x -> out) + backward : gammaln_grad + +- op : gather + args : (Tensor x, Tensor index, Scalar axis=0) + output : Tensor(out) + infer_meta : + func : GatherInferMeta + kernel : + func : gather + data_type: x + backward : gather_grad + interfaces : paddle::dialect::InferSymbolicShapeInterface + +- op : gather_nd + args : (Tensor x, Tensor index) + output : Tensor(out) + infer_meta : + func : GatherNdInferMeta + spmd_rule : GatherNdInferSpmd + kernel : + func : gather_nd + data_type : x + backward : gather_nd_grad + interfaces : paddle::dialect::InferSymbolicShapeInterface + +- op : gather_tree + args : (Tensor ids, Tensor parents) + output : Tensor(out) + infer_meta : + func : GatherTreeMeta + kernel : + func : gather_tree + data_type : ids + +- op : gaussian + args : (IntArray shape, float mean, float std, int seed, DataType dtype, Place place={}) + output: Tensor(out) + infer_meta : + func : GaussianInferMeta + param : [shape, mean, std, seed, dtype] + kernel : + func : gaussian + param : [shape, mean, std, seed, dtype] + data_type : dtype + backend : place + interfaces : paddle::dialect::InferSymbolicShapeInterface + traits : pir::SideEffectTrait, paddle::dialect::ForwardOnlyTrait + +- op : gaussian_inplace + args: (Tensor x, float mean=0, float std=1.0, int seed=0) + output: Tensor(out) + infer_meta: + func: UnchangedInferMeta + param: [x] + kernel: + func: gaussian_inplace + data_type: x + backend : x + inplace: (x -> out) + backward: gaussian_inplace_grad + +- op : gelu + args : (Tensor x, bool approximate = false) + output : Tensor(out) + infer_meta : + func : UnchangedInferMeta + param: [x] + kernel : + func : gelu + backward : gelu_grad + +- op : generate_proposals + args : (Tensor scores, Tensor bbox_deltas, Tensor im_shape, Tensor anchors, Tensor variances, int pre_nms_top_n, int post_nms_top_n, float nms_thresh, float min_size, float eta, bool pixel_offset=true) + output : Tensor(rpn_rois), Tensor(rpn_roi_probs), Tensor(rpn_rois_num) + infer_meta : + func : GenerateProposalsV2InferMeta + kernel : + func : generate_proposals + data_type : anchors + optional : rpn_rois_num + +- op : graph_khop_sampler + args : (Tensor row, Tensor colptr, Tensor x, Tensor eids, int[] sample_sizes, bool return_eids) + output : Tensor(out_src), Tensor(out_dst), Tensor(sample_index), Tensor(reindex_x), Tensor(out_eids) + infer_meta : + func : GraphKhopSamplerInferMeta + kernel : + func : graph_khop_sampler + data_type : row + optional : eids + +- op : graph_sample_neighbors + args : (Tensor row, Tensor colptr, Tensor x, Tensor eids, Tensor perm_buffer, int sample_size, bool return_eids, bool flag_perm_buffer) + output : Tensor(out), Tensor(out_count), Tensor(out_eids) + infer_meta : + func : GraphSampleNeighborsInferMeta + kernel : + func : graph_sample_neighbors + data_type : row + optional : eids, perm_buffer + +- op : grid_sample + args : (Tensor x, Tensor grid, str mode = "bilinear", str padding_mode = "zeros", bool align_corners = true) + output : Tensor(out) + infer_meta : + func : GridSampleBaseInferMeta + param : [x, grid] + kernel: + func : grid_sample + data_type : x + backward : grid_sample_grad + +- op : group_norm + args : (Tensor x, Tensor scale, Tensor bias, float epsilon = 1e-5, int groups = -1, str data_format = "NCHW") + output : Tensor(y), Tensor(mean), Tensor(variance) + infer_meta : + func : GroupNormInferMeta + kernel : + func : group_norm + optional : scale, bias + intermediate : mean, variance + backward : group_norm_grad + interfaces : paddle::dialect::InferSymbolicShapeInterface, paddle::dialect::LayoutTransformationInterface + +- op : gru + args: (Tensor input, Tensor h0, Tensor weight, Tensor bias, str activation = "tanh", + str gate_activation = "sigmoid", bool is_reverse = false, bool origin_mode = false, bool is_test=false) + output: Tensor (batch_gate), Tensor (batch_reset_hidden_prev), Tensor (batch_hidden), + Tensor (hidden) + infer_meta: + func: GruInferMeta + kernel: + func: gru + data_type: input + optional: h0, bias + intermediate: batch_gate, batch_reset_hidden_prev, batch_hidden + backward: gru_grad + +- op : gru_unit + args: (Tensor input, Tensor hidden_prev, Tensor weight, Tensor bias, int activation + = 2, int gate_activation = 1, bool origin_mode = false) + output: Tensor (gate), Tensor (reset_hidden_prev), Tensor (hidden) + infer_meta: + func: GruUnitInferMeta + kernel: + func: gru_unit + optional: bias + intermediate: gate, reset_hidden_prev + backward: gru_unit_grad + +- op : gumbel_softmax + args : (Tensor x, float temperature = 1.0, bool hard = false, int axis = -1) + output : Tensor + infer_meta : + func : GumbelSoftmaxInferMeta + kernel : + func : gumbel_softmax + backward : gumbel_softmax_grad + +- op : hardshrink + args : (Tensor x, float threshold = 0.5) + output : Tensor (out) + infer_meta : + func : UnchangedInferMeta + param : [x] + kernel : + func : hard_shrink + backward : hardshrink_grad + +- op : hardsigmoid + args : (Tensor x, float slope = 0.2, float offset = 0.5) + output : Tensor (out) + infer_meta : + func : UnchangedInferMeta + param : [x] + kernel : + func : hardsigmoid + backward : hardsigmoid_grad + +- op : hardtanh + args : (Tensor x, float t_min=0, float t_max=24) + output : Tensor(out) + infer_meta : + func : UnchangedInferMeta + param : [x] + kernel : + func : hardtanh + inplace: (x -> out) + backward : hardtanh_grad + +- op : heaviside + args : (Tensor x, Tensor y) + output : Tensor(out) + infer_meta : + func : ElementwiseInferMeta + kernel : + func : heaviside + backward : heaviside_grad + +- op : hinge_loss + args: (Tensor logits, Tensor labels) + output: Tensor (loss) + infer_meta: + func: HingeLossInferMeta + kernel: + func: hinge_loss + data_type: logits + backward: hinge_loss_grad + +- op : histogram + args : (Tensor input, Tensor weight, int64_t bins = 100, int min = 0, int max = 0, bool density = false) + output : Tensor(out) + infer_meta : + func : HistogramInferMeta + optional : weight + kernel : + func : histogram + +- op : hsigmoid_loss + args : (Tensor x, Tensor label, Tensor w, Tensor bias, Tensor path, Tensor code, int num_classes, bool is_sparse) + output : Tensor(out), Tensor(pre_out), Tensor(w_out) + infer_meta : + func : HSigmoidLossInferMeta + optional: path, code, bias + kernel : + func : hsigmoid_loss + data_type : x + backward : hsigmoid_loss_grad + +- op : huber_loss + args : (Tensor input, Tensor label, float delta) + output : Tensor(out), Tensor(residual) + infer_meta : + func : HuberLossInferMeta + kernel : + func : huber_loss + intermediate : residual + backward : huber_loss_grad + +- op : i0 + args : (Tensor x) + output : Tensor(out) + infer_meta : + func : UnchangedInferMeta + kernel : + func : i0 + inplace: (x -> out) + backward : i0_grad + interfaces : paddle::dialect::InferSymbolicShapeInterface + +- op : i0e + args : (Tensor x) + output : Tensor(out) + infer_meta : + func : UnchangedInferMeta + kernel : + func : i0e + backward : i0e_grad + interfaces : paddle::dialect::InferSymbolicShapeInterface + +- op : i1 + args : (Tensor x) + output : Tensor(out) + infer_meta : + func : UnchangedInferMeta + kernel : + func : i1 + backward : i1_grad + interfaces : paddle::dialect::InferSymbolicShapeInterface + +- op : i1e + args : (Tensor x) + output : Tensor(out) + infer_meta : + func : UnchangedInferMeta + kernel : + func : i1e + backward : i1e_grad + interfaces : paddle::dialect::InferSymbolicShapeInterface + +- op : identity_loss + args : (Tensor x, int reduction = 1) + output : Tensor(out) + infer_meta : + func : IdentityLossInferMeta + kernel : + func : identity_loss + inplace: (x -> out) + backward : identity_loss_grad + interfaces : paddle::dialect::InferSymbolicShapeInterface + +- op : im2sequence + args: (Tensor x, Tensor y, int[] kernels, int[] strides = {1, 1}, int[] paddings + = {0, 0, 0, 0}, int[] out_stride = {1, 1}) + output: Tensor (out) + infer_meta: + func: Im2sequenceInferMeta + kernel: + func: im2sequence + optional: y + backward: im2sequence_grad + +- op : imag + args : (Tensor x) + output : Tensor (out) + infer_meta : + func : RealAndImagInferMeta + kernel : + func : imag + backward : imag_grad + interfaces : paddle::dialect::InferSymbolicShapeInterface + +- op : increment + args : (Tensor x, float value = 1.0) + output : Tensor(out) + infer_meta : + func : IncrementInferMeta + kernel : + func : increment + inplace : (x -> out) + interfaces : paddle::dialect::InferSymbolicShapeInterface + +- op : index_add + args : (Tensor x, Tensor index, Tensor add_value, int axis = 0) + output : Tensor(out) + infer_meta : + func : IndexAddInferMeta + kernel : + func : index_add + data_type : x + inplace : (x -> out) + backward : index_add_grad + +- op : index_put + args : (Tensor x, Tensor[] indices, Tensor value, bool accumulate=false) + output : Tensor(out) + infer_meta : + func : IndexPutInferMeta + kernel : + func : index_put + data_type : x + inplace : (x -> out) + backward : index_put_grad + +- op : index_sample + args : (Tensor x, Tensor index) + output : Tensor + infer_meta : + func : IndexSampleInferMeta + kernel : + func : index_sample + data_type : x + backward : index_sample_grad + data_transform : + skip_transform : index + interfaces : paddle::dialect::InferSymbolicShapeInterface + +- op : index_select + args : (Tensor x, Tensor index, int axis = 0) + output : Tensor(out) + infer_meta : + func : IndexSelectInferMeta + kernel : + func : index_select + data_type : x + backward : index_select_grad + data_transform : + skip_transform : index + +- op : index_select_strided + args : (Tensor x, int64_t index, int axis = 0) + output : Tensor(out) + infer_meta : + func : IndexSelectStridedInferMeta + kernel : + func : index_select_strided + data_type : x + backward : index_select_strided_grad + +- op : instance_norm + args : (Tensor x, Tensor scale, Tensor bias, float epsilon=1e-5) + output : Tensor(y), Tensor(saved_mean), Tensor(saved_variance) + infer_meta : + func : InstanceNormInferMeta + kernel : + func : instance_norm + data_type : x + optional : scale, bias + intermediate : saved_mean, saved_variance + backward : instance_norm_grad + +- op : inverse + args : (Tensor x) + output : Tensor(out) + infer_meta : + func : InverseInferMeta + kernel : + func : inverse + backward : inverse_grad + +- op : is_empty + args : (Tensor x) + output : Tensor(out) + infer_meta : + func : IsEmptyInferMeta + kernel : + func : is_empty + +- op : isclose + args : (Tensor x, Tensor y, Scalar(double) rtol=1e-5, Scalar(double) atol=1e-8, bool equal_nan=false) + output : Tensor(out) + infer_meta : + func : ValueCompareInferMeta + param: [x, y] + kernel : + func : isclose + data_type : x + interfaces : paddle::dialect::InferSymbolicShapeInterface + +- op : isfinite + args : (Tensor x) + output : Tensor(out) + infer_meta : + func : IsfiniteInferMeta + kernel : + func : isfinite {dense -> dense}, + isfinite_sr {selected_rows -> selected_rows} + interfaces : paddle::dialect::InferSymbolicShapeInterface + +- op : isinf + args : (Tensor x) + output : Tensor(out) + infer_meta : + func : IsfiniteInferMeta + kernel : + func : isinf {dense -> dense}, + isinf_sr {selected_rows -> selected_rows} + interfaces : paddle::dialect::InferSymbolicShapeInterface + traits : paddle::dialect::ForwardOnlyTrait + +- op : isnan + args : (Tensor x) + output : Tensor(out) + infer_meta : + func : IsfiniteInferMeta + kernel : + func : isnan {dense -> dense}, + isnan_sr {selected_rows -> selected_rows} + interfaces : paddle::dialect::InferSymbolicShapeInterface + traits : paddle::dialect::ForwardOnlyTrait + +- op : kldiv_loss + args : (Tensor x, Tensor label, str reduction = "mean", bool log_target = false) + output : Tensor(out) + infer_meta : + func : KLDivInferMeta + kernel : + func : kldiv_loss + data_type : x + backward : kldiv_loss_grad + +- op : kron + args : (Tensor x, Tensor y) + output : Tensor + infer_meta : + func : KronInferMeta + kernel : + func : kron + backward : kron_grad + interfaces : paddle::dialect::InferSymbolicShapeInterface + +- op : kthvalue + args : (Tensor x, int k = 1, int axis = -1, bool keepdim = false) + output : Tensor(out), Tensor(indices) + infer_meta : + func : KthvalueInferMeta + kernel : + func : kthvalue + backward : kthvalue_grad + interfaces : paddle::dialect::InferSymbolicShapeInterface + +- op : l1_norm + args : (Tensor x) + output : Tensor(out) + infer_meta : + func : L1NormInferMeta + kernel : + func : l1_norm + data_type : x + inplace: (x -> out) + backward : l1_norm_grad + +- op : label_smooth + args : (Tensor label, Tensor prior_dist, float epsilon = 0.0f) + output : Tensor (out) + infer_meta : + func : UnchangedInferMeta + param : [label] + kernel : + func : label_smooth + data_type : label + optional : prior_dist + backward : label_smooth_grad + +- op : lamb_ + args : (Tensor param, Tensor grad, Tensor learning_rate, Tensor moment1, Tensor moment2, Tensor beta1_pow, Tensor beta2_pow, Tensor master_param, Tensor skip_update, float weight_decay, float beta1=0.9, float beta2=0.999, float epsilon=1.0e-6f, bool always_adapt=false, bool multi_precision=false) + output : Tensor(param_out), Tensor(moment1_out), Tensor(moment2_out), Tensor(beta1_pow_out), Tensor(beta2_pow_out), Tensor(master_param_outs) + infer_meta : + func : LambInferMeta + kernel : + func : lamb {dense, dense, dense, dense, dense, dense, dense, dense, dense -> dense, dense, dense, dense, dense, dense}, + lamb_sr {dense, selected_rows, dense, dense, dense, dense, dense, dense, dense -> dense, dense, dense, dense, dense, dense} + data_type : param + optional : master_param, skip_update, beta1_pow_out, beta2_pow_out, master_param_outs + inplace : (param -> param_out), (moment1 -> moment1_out), (moment2 -> moment2_out), (beta1_pow -> beta1_pow_out), (beta2_pow -> beta2_pow_out), (master_param -> master_param_outs) + traits : pir::SideEffectTrait + +- op : layer_norm + args : (Tensor x, Tensor scale, Tensor bias, float epsilon = 1e-5, int begin_norm_axis = 1) + output : Tensor(out), Tensor(mean), Tensor(variance) + infer_meta : + func : LayerNormInferMeta + spmd_rule : LayerNormInferSpmd + kernel : + func : layer_norm + data_type : x + backward : layer_norm_grad + intermediate : mean, variance + optional : scale, bias + interfaces : paddle::dialect::InferSymbolicShapeInterface + +- op : leaky_relu + args : (Tensor x, float negative_slope = 0.02f) + output : Tensor(out) + infer_meta : + func : UnchangedInferMeta + param : [x] + kernel : + func : leaky_relu + inplace: (x -> out) + backward : leaky_relu_grad + interfaces : paddle::dialect::InferSymbolicShapeInterface + +- op : lerp + args : (Tensor x, Tensor y, Tensor weight) + output : Tensor(out) + infer_meta : + func : LerpInferMeta + kernel : + func : lerp + inplace : (x -> out) + backward : lerp_grad + interfaces : paddle::dialect::InferSymbolicShapeInterface + +- op : lgamma + args : (Tensor x) + output : Tensor(out) + infer_meta : + func : UnchangedInferMeta + kernel : + func : lgamma + inplace: (x -> out) + backward : lgamma_grad + interfaces : paddle::dialect::InferSymbolicShapeInterface + +- op : limit_by_capacity + args : (Tensor expert_count, Tensor capacity, int n_worker) + output : Tensor(out) + infer_meta : + func : LimitByCapacityInferMeta + kernel : + func : limit_by_capacity + data_type : expert_count + +- op : linear_interp + args : (Tensor x, Tensor out_size, Tensor[] size_tensor, Tensor scale_tensor, str data_format="NCHW", int out_d=0, int out_h=0, int out_w=0, float[] scale={}, str interp_method="bilinear", bool align_corners=true, int align_mode=1) + output : Tensor(output) + infer_meta : + func : InterpolateInferMeta + optional: out_size, size_tensor, scale_tensor + kernel : + func : linear_interp + data_type : x + backward : linear_interp_grad + data_transform : + skip_transform : out_size, size_tensor, scale_tensor + interfaces : paddle::dialect::InferSymbolicShapeInterface + +- op : linspace + args : (Tensor start, Tensor stop, Tensor number, DataType dtype, Place place) + output : Tensor(out) + infer_meta : + func : LinspaceInferMeta + param: [start, stop, number, dtype] + kernel : + func : linspace + param: [start, stop, number, dtype] + data_type : dtype + backend : place + interfaces : paddle::dialect::InferSymbolicShapeInterface + +- op : llm_int8_linear + args : (Tensor x, Tensor weight, Tensor bias, Tensor weight_scale, float threshold=6.0) + output : Tensor(out) + infer_meta : + func : LLMInt8LinearInferMeta + kernel : + func : llm_int8_linear + data_type : x + optional: bias + +- op : log + args : (Tensor x) + output : Tensor(out) + infer_meta : + func : UnchangedInferMeta + kernel : + func : log + inplace: (x -> out) + backward: log_grad + interfaces : paddle::dialect::InferSymbolicShapeInterface + +- op : log10 + args : (Tensor x) + output : Tensor(out) + infer_meta : + func : UnchangedInferMeta + kernel : + func : log10 + inplace: (x -> out) + backward: log10_grad + +- op : log1p + args : (Tensor x) + output : Tensor(out) + infer_meta : + func : UnchangedInferMeta + kernel : + func : log1p + inplace: (x -> out) + backward: log1p_grad + interfaces : paddle::dialect::InferSymbolicShapeInterface + +- op : log2 + args : (Tensor x) + output : Tensor(out) + infer_meta : + func : UnchangedInferMeta + kernel : + func : log2 + inplace: (x -> out) + backward: log2_grad + +- op : log_loss + args : (Tensor input, Tensor label, float epsilon) + output : Tensor + infer_meta : + func : LogLossInferMeta + kernel : + func : log_loss + backward : log_loss_grad + +- op : log_softmax + args : (Tensor x, int axis = -1) + output : Tensor(out) + infer_meta : + func : UnchangedInferMetaCheckAxis + kernel : + func : log_softmax + data_type : x + backward : log_softmax_grad + +- op : logcumsumexp + args : (Tensor x, int axis=-1, bool flatten=false, bool exclusive=false, bool reverse=false) + output : Tensor(out) + infer_meta : + func : CumInferMeta + kernel : + func : logcumsumexp + backward : logcumsumexp_grad + interfaces : paddle::dialect::InferSymbolicShapeInterface + +- op : logical_and + args : (Tensor x, Tensor y) + output : Tensor(out) + infer_meta : + func : LogicalBinaryInferMeta + kernel : + func : logical_and + data_type : x + backend : x + inplace: (x -> out) + interfaces : paddle::dialect::InferSymbolicShapeInterface + traits : paddle::dialect::ForwardOnlyTrait + +- op : logical_not + args : (Tensor x) + output : Tensor(out) + infer_meta : + func : LogicalNotInferMeta + kernel : + func : logical_not + data_type : x + backend : x + inplace: (x -> out) + interfaces : paddle::dialect::InferSymbolicShapeInterface + traits : paddle::dialect::ForwardOnlyTrait + +- op : logical_or + args : (Tensor x, Tensor y) + output : Tensor(out) + infer_meta : + func : LogicalBinaryInferMeta + kernel : + func : logical_or + data_type : x + backend : x + inplace: (x -> out) + interfaces : paddle::dialect::InferSymbolicShapeInterface + traits : paddle::dialect::ForwardOnlyTrait + +- op : logical_xor + args : (Tensor x, Tensor y) + output : Tensor(out) + infer_meta : + func : LogicalBinaryInferMeta + kernel : + func : logical_xor + data_type : x + backend : x + inplace: (x -> out) + interfaces : paddle::dialect::InferSymbolicShapeInterface + traits : paddle::dialect::ForwardOnlyTrait + +- op : logit + args : (Tensor x, float eps = 1e-6f) + output : Tensor(out) + infer_meta : + func : UnchangedInferMeta + param : [x] + kernel : + func : logit + inplace: (x -> out) + backward : logit_grad + interfaces : paddle::dialect::InferSymbolicShapeInterface + +- op : logsigmoid + args : (Tensor x) + output : Tensor + infer_meta : + func : UnchangedInferMeta + kernel : + func : logsigmoid + backward : logsigmoid_grad + interfaces : paddle::dialect::InferSymbolicShapeInterface + +- op : logspace + args : (Tensor start, Tensor stop, Tensor num, Tensor base, DataType dtype, Place place={}) + output : Tensor(out) + infer_meta: + func : LogspaceInferMeta + param : [start, stop, num, base, dtype] + kernel : + func : logspace + param : [start, stop, num, base, dtype] + data_type : dtype + backend : place + interfaces : paddle::dialect::InferSymbolicShapeInterface + +- op : logsumexp + args : (Tensor x, int[] axis={0}, bool keepdim=false, bool reduce_all=false) + output : Tensor(out) + infer_meta : + func : LogsumexpInferMeta + kernel : + func : logsumexp + backward : logsumexp_grad + interfaces : paddle::dialect::InferSymbolicShapeInterface + +- op : lookup_table_dequant + args: (Tensor w, Tensor ids, int64_t padding_idx = -1) + output: Tensor (out) + infer_meta: + func: LookupTableDequantInferMeta + kernel: + func: lookup_table_dequant + data_type: w + +- op : lp_pool2d + args : (Tensor x, IntArray kernel_size, int[] strides = {1,1}, int[] paddings = {0,0}, bool ceil_mode = false, bool exclusive = true, str data_format = "NCHW", str pooling_type = "", bool global_pooling = false, bool adaptive = false, str padding_algorithm = "EXPLICIT", float norm_type = 0.0f) + output : Tensor(out) + infer_meta : + func : Pool2DInferMeta + param : [x, kernel_size, strides, paddings, ceil_mode, exclusive, data_format, pooling_type, global_pooling, adaptive, padding_algorithm] + kernel : + func : lp_pool2d + param : [x, kernel_size, strides, paddings, ceil_mode, exclusive, data_format, pooling_type, global_pooling, adaptive, padding_algorithm, norm_type] + backward : lp_pool2d_grad + interfaces : paddle::dialect::InferSymbolicShapeInterface + +- op : lstm + args: (Tensor input, Tensor h0, Tensor c0, Tensor weight, Tensor bias, bool use_peepholes + = true, bool is_reverse = false, bool is_test = false, str gate_activation = "sigmoid", + str cell_activation = "tanh", str candidate_activation = "tanh") + output: Tensor (hidden), Tensor (cell), Tensor (batch_gate), Tensor (batch_cell_pre_act) + infer_meta: + func: LSTMInferMeta + kernel: + func: lstm + data_type: input + optional: h0, c0 + intermediate: batch_gate, batch_cell_pre_act + backward: lstm_grad + +- op : lstsq + args : (Tensor x, Tensor y, Scalar rcond=0.0f, str driver="gels") + output : Tensor(solution), Tensor(residuals), Tensor(rank), Tensor(singular_values) + infer_meta : + func : LstsqInferMeta + kernel : + func : lstsq + data_type : x + optional : residuals + +- op : lu + args : (Tensor x, bool pivot = true) + output : Tensor(out), Tensor(pivots), Tensor(infos) + infer_meta : + func : LUInferMeta + kernel : + func : lu + data_type : x + inplace : (x -> out) + backward : lu_grad + +- op : lu_unpack + args : (Tensor x, Tensor y, bool unpack_ludata = true, bool unpack_pivots = true) + output : Tensor(pmat), Tensor(l), Tensor(u) + infer_meta : + func : LUUnpackInferMeta + kernel : + func : lu_unpack + data_type : x + backward : lu_unpack_grad + +- op : margin_cross_entropy + args : (Tensor logits, Tensor label, bool return_softmax = false, int ring_id = 0, int rank = 0, int nranks = 1, float margin1 = 1.0f, float margin2 = 0.5f, float margin3 = 0.0f, float scale = 64.0f) + output : Tensor(softmax), Tensor(loss) + infer_meta : + func : MarginCrossEntropyInferMeta + kernel : + func : margin_cross_entropy + data_type : logits + backward : margin_cross_entropy_grad + +- op : masked_multihead_attention_ + args : (Tensor x, Tensor cache_kv, Tensor bias, Tensor src_mask, Tensor cum_offsets, Tensor sequence_lengths, Tensor rotary_tensor, Tensor beam_cache_offset, Tensor qkv_out_scale, Tensor out_shift, Tensor out_smooth, int seq_len, int rotary_emb_dims, bool use_neox_rotary_style=false, str compute_dtype = "default", float out_scale=-1, int quant_round_type=1, float quant_max_bound=127.0, float quant_min_bound=-127.0) + output : Tensor(out), Tensor(cache_kv_out), Tensor(beam_cache_offset_out) + infer_meta : + func : MaskedMultiheadAttentionInferMeta + kernel : + func : masked_multihead_attention + data_type : x + optional : bias, src_mask, cum_offsets, sequence_lengths, rotary_tensor, beam_cache_offset, qkv_out_scale, out_shift, out_smooth + inplace : (cache_kv -> cache_kv_out), (beam_cache_offset -> beam_cache_offset_out) + +- op : masked_select + args : (Tensor x, Tensor mask) + output : Tensor (out) + infer_meta : + func : MaskedSelectInferMeta + kernel : + func : masked_select + data_type : x + backward : masked_select_grad + interfaces : paddle::dialect::InferSymbolicShapeInterface + +- op : match_matrix_tensor + args: (Tensor x, Tensor y, Tensor w, int dim_t = 1) + output: Tensor (out), Tensor (tmp) + infer_meta: + func: MatchMatrixTensorInferMeta + kernel: + func: match_matrix_tensor + backward: match_matrix_tensor_grad + +- op : matrix_nms + args : (Tensor bboxes, Tensor scores, float score_threshold, int nms_top_k, int keep_top_k, float post_threshold=0., bool use_gaussian = false, float gaussian_sigma = 2., int background_label = 0, bool normalized = true) + output : Tensor(out), Tensor(index), Tensor(roisnum) + infer_meta : + func : MatrixNMSInferMeta + optional : roisnum + kernel : + func : matrix_nms + +- op : matrix_power + args : (Tensor x, int n) + output : Tensor + infer_meta : + func : MatrixPowerInferMeta + kernel : + func : matrix_power + backward : matrix_power_grad + +- op : matrix_rank + args : (Tensor x, float tol, bool use_default_tol=true, bool hermitian=false) + output : Tensor(out) + infer_meta : + func : MatrixRankInferMeta + param : [x, use_default_tol, hermitian] + kernel : + func : matrix_rank + +- op : matrix_rank_tol + args : (Tensor x, Tensor atol_tensor, bool use_default_tol=true, bool hermitian=false) + output : Tensor(out) + infer_meta : + func : MatrixRankTolInferMeta + kernel : + func : matrix_rank_tol + +- op : max + args : (Tensor x, IntArray axis={}, bool keepdim=false) + output : Tensor(out) + infer_meta : + func : ReduceIntArrayAxisInferMeta + spmd_rule: ReductionMaxInferSpmdDynamic + kernel : + func : max + backward : max_grad + interfaces : paddle::dialect::InferSymbolicShapeInterface + +- op : max_pool2d_with_index + args : (Tensor x, int[] kernel_size, int[] strides= {1, 1}, int[] paddings = {0, 0}, bool global_pooling = false, bool adaptive = false, bool ceil_mode = false) + output : Tensor(out), Tensor(mask) + infer_meta : + func : MaxPoolWithIndexInferMeta + kernel : + func : max_pool2d_with_index + backward : max_pool2d_with_index_grad + +- op : max_pool3d_with_index + args : (Tensor x, int[] kernel_size, int[] strides = {1, 1, 1}, int[] paddings = {0, 0, 0}, bool global_pooling = false, bool adaptive = false, bool ceil_mode = false) + output : Tensor(out), Tensor(mask) + infer_meta : + func : MaxPoolWithIndexInferMeta + kernel : + func : max_pool3d_with_index + backward : max_pool3d_with_index_grad + +- op : maxout + args : (Tensor x, int groups, int axis = 1) + output : Tensor(out) + infer_meta : + func : MaxOutInferMeta + kernel : + func : maxout + backward : maxout_grad + interfaces : paddle::dialect::InferSymbolicShapeInterface + +- op : mean + args : (Tensor x, IntArray axis={}, bool keepdim=false) + output : Tensor(out) + infer_meta : + func : ReduceIntArrayAxisInferMeta + spmd_rule : ReductionMeanInferSpmdDynamic + kernel : + func : mean + backward : mean_grad + +- op : mean_all + args : (Tensor x) + output : Tensor + infer_meta : + func : MeanAllInferMeta + kernel : + func : mean_all + backward : mean_all_grad + interfaces : paddle::dialect::InferSymbolicShapeInterface + +- op : memcpy_d2h + args : (Tensor x, int dst_place_type) + output : Tensor + infer_meta : + func : UnchangedInferMeta + param : [x] + kernel : + func : memcpy_d2h + +- op : memcpy_h2d + args : (Tensor x, int dst_place_type) + output : Tensor + infer_meta : + func : UnchangedInferMeta + param : [x] + kernel : + func : memcpy_h2d + +- op : memory_efficient_attention + args : (Tensor query, Tensor key, Tensor value, Tensor bias, Tensor cu_seqlens_q, Tensor cu_seqlens_k, Tensor causal_diagonal, Tensor seqlen_k, Scalar max_seqlen_q, Scalar max_seqlen_k, bool causal, double dropout_p, float scale, bool is_test) + output : Tensor(output), Tensor(logsumexp), Tensor(seed_and_offset) + infer_meta : + func : MemoryEfficientAttentionInferMeta + kernel : + func : memory_efficient_attention + data_type : query + optional : bias, cu_seqlens_q, cu_seqlens_k, causal_diagonal, seqlen_k + backward : memory_efficient_attention_grad + interfaces : paddle::dialect::InferSymbolicShapeInterface + +- op : merge_selected_rows + args : (Tensor x) + output : Tensor(out) + infer_meta : + func : UnchangedInferMeta + kernel : + func : merge_selected_rows {selected_rows -> selected_rows} + +- op : merged_adam_ + args : (Tensor[] param, Tensor[] grad, Tensor[] learning_rate, Tensor[] moment1, Tensor[] moment2, Tensor[] beta1_pow, Tensor[] beta2_pow, Tensor[] master_param, Scalar beta1 = 0.9f, Scalar beta2 = 0.999f, Scalar epsilon = 1.0e-8f, bool multi_precision = false, bool use_global_beta_pow = false) + output : Tensor[](param_out){param.size()}, Tensor[](moment1_out){param.size()}, Tensor[](moment2_out){param.size()}, Tensor[](beta1_pow_out){param.size()}, Tensor[](beta2_pow_out){param.size()}, Tensor[](master_param_out){param.size()} + infer_meta : + func : MergedAdamInferMeta + kernel : + func : merged_adam + data_type : param + optional: master_param, master_param_out + inplace : (param -> param_out), (moment1 -> moment1_out), (moment2 -> moment2_out), (beta1_pow -> beta1_pow_out), (beta2_pow -> beta2_pow_out), (master_param -> master_param_out) + traits : pir::SideEffectTrait + +- op : merged_momentum_ + args : (Tensor[] param, Tensor[] grad, Tensor[] velocity, Tensor[] learning_rate, Tensor[] master_param, float mu, bool use_nesterov = false, str[] regularization_method = {}, float[] regularization_coeff = {}, bool multi_precision = false, float rescale_grad = 1.0f) + output : Tensor[](param_out){param.size()}, Tensor[](velocity_out){param.size()}, Tensor[](master_param_out){param.size()} + infer_meta : + func : MergedMomentumInferMeta + kernel : + func : merged_momentum + data_type : param + optional: master_param, master_param_out + inplace : (param -> param_out), (velocity -> velocity_out), (master_param -> master_param_out) + traits : pir::SideEffectTrait + +- op : meshgrid + args : (Tensor[] inputs) + output : Tensor[]{inputs.size()} + infer_meta : + func : MeshgridInferMeta + kernel : + func : meshgrid + data_type : inputs + backward : meshgrid_grad + interfaces : paddle::dialect::InferSymbolicShapeInterface + +- op : mish + args : (Tensor x, float lambda) + output : Tensor + infer_meta : + func : UnchangedInferMeta + param : [x] + kernel : + func : mish + backward : mish_grad + +- op : mode + args : (Tensor x, int axis = -1, bool keepdim = false) + output : Tensor(out), Tensor(indices) + infer_meta : + func : ModeInferMeta + kernel : + func : mode + backward : mode_grad + +- op : momentum_ + args : (Tensor param, Tensor grad, Tensor velocity, Tensor learning_rate, Tensor master_param, float mu, bool use_nesterov = false, str regularization_method = "", float regularization_coeff = 0.0f, bool multi_precision = false, float rescale_grad = 1.0f) + output : Tensor(param_out), Tensor(velocity_out), Tensor(master_param_out) + infer_meta: + func : MomentumInferMeta + kernel : + func : momentum {dense, dense, dense, dense, dense -> dense, dense, dense}, + momentum_dense_param_sparse_grad {dense, selected_rows, dense, dense, dense -> dense, dense, dense} + data_type : param + optional : master_param, master_param_out + inplace : (param -> param_out), (velocity -> velocity_out), (master_param -> master_param_out) + traits : pir::SideEffectTrait + +- op : multi_dot + args : (Tensor[] x) + output : Tensor + infer_meta : + func : MultiDotInferMeta + kernel : + func : multi_dot + backward : multi_dot_grad + +- op : multiclass_nms3 + args : (Tensor bboxes, Tensor scores, Tensor rois_num, float score_threshold, int nms_top_k, int keep_top_k, float nms_threshold=0.3, bool normalized=true, float nms_eta=1.0, int background_label=0) + output : Tensor(out), Tensor(index), Tensor(nms_rois_num) + infer_meta : + func : MultiClassNMSInferMeta + kernel : + func : multiclass_nms3 + data_type : scores + optional : rois_num, nms_rois_num + +- op : multinomial + args : (Tensor x, Scalar(int) num_samples = 1, bool replacement = false) + output : Tensor(out) + infer_meta : + func : MultinomialInferMeta + kernel : + func : multinomial + data_type : x + traits : paddle::dialect::ForwardOnlyTrait + +- op : multiplex + args : (Tensor[] inputs, Tensor index) + output : Tensor + infer_meta : + func : MultiplexInferMeta + kernel : + func : multiplex + data_type : inputs + backward : multiplex_grad + data_transform : + skip_transform : index + +- op : mv + args : (Tensor x, Tensor vec) + output : Tensor + infer_meta : + func : MvInferMeta + kernel : + func : mv + backward : mv_grad + +- op : nadam_ + args : (Tensor param, Tensor grad, Tensor learning_rate, Tensor momentum_decay_pow, Tensor beta2_pow, Tensor mu_product, Tensor moment1, Tensor moment2, Tensor master_param, float beta1 = 0.9f, float beta2 = 0.999f, float epsilon = 1.0e-8f, float momentum_decay = 0.004f, bool multi_precision = false) + output : Tensor(param_out), Tensor(momentum_decay_pow_out), Tensor(beta2_pow_out), Tensor(mu_product_out), Tensor(moment1_out), Tensor(moment2_out), Tensor(master_param_out) + infer_meta : + func : NAdamInferMeta + kernel : + func : nadam + data_type : param + optional : master_param, master_param_out + inplace : (param -> param_out), (momentum_decay_pow -> momentum_decay_pow_out), (beta2_pow -> beta2_pow_out), (mu_product -> mu_product_out), (moment1 -> moment1_out), (moment2 -> moment2_out), (master_param->master_param_out) + traits : pir::SideEffectTrait + +- op : nanmedian + args : (Tensor x, IntArray axis = {}, bool keepdim = true, str mode="avg") + output : Tensor(out), Tensor(medians) + infer_meta : + func : NanmedianInferMeta + kernel : + func : nanmedian + backward : nanmedian_grad + +- op : nearest_interp + args : (Tensor x, Tensor out_size, Tensor[] size_tensor, Tensor scale_tensor, str data_format="NCHW", int out_d=0, int out_h=0, int out_w=0, float[] scale={}, str interp_method="bilinear", bool align_corners=true, int align_mode=1) + output : Tensor(output) + infer_meta : + func : InterpolateInferMeta + optional: out_size, size_tensor, scale_tensor + kernel : + func : nearest_interp + data_type : x + backward : nearest_interp_grad + data_transform : + skip_transform : out_size, size_tensor, scale_tensor + interfaces : paddle::dialect::InferSymbolicShapeInterface + +- op : nextafter + args : (Tensor x, Tensor y) + output : Tensor(out) + infer_meta : + func : ElementwiseInferMeta + param: [x, y] + kernel : + func : nextafter + data_type : x + traits : paddle::dialect::ForwardOnlyTrait + +- op : nll_loss + args : (Tensor input, Tensor label, Tensor weight, int64_t ignore_index = -100, str reduction = "mean") + output : Tensor(out), Tensor(total_weight) + infer_meta : + func : NllLossRawInferMeta + kernel : + func : nll_loss + data_type : input + optional : weight + backward : nll_loss_grad + +- op : nms + args : (Tensor x, float threshold = 1.0f) + output : Tensor(out) + infer_meta : + func : NMSInferMeta + kernel : + func : nms + data_type : x + +- op : nonzero + args : (Tensor condition) + output : Tensor(out) + infer_meta : + func : NonZeroInferMeta + kernel : + func : nonzero + data_type: condition + interfaces : paddle::dialect::InferSymbolicShapeInterface + traits : paddle::dialect::ForwardOnlyTrait + +- op : norm + args : (Tensor x, int axis, float epsilon, bool is_test) + output : Tensor(out), Tensor(norm) + infer_meta : + func : NormInferMeta + kernel : + func : norm + backward : norm_grad + +- op : npu_identity + args : (Tensor x, int format = -1) + output : Tensor + infer_meta : + func : UnchangedInferMeta + param : [x] + kernel : + func : npu_identity + +- op : numel + args : (Tensor x) + output : Tensor(size) + infer_meta : + func : NumelInferMeta + spmd_rule : NumelInferSpmd + kernel : + func : numel + data_type : x + data_transform: + skip_transform : x + no_need_buffer : x + interfaces : paddle::dialect::InferSymbolicShapeInterface + +- op : one_hot + args : (Tensor x, Scalar(int) num_classes) + output : Tensor(out) + infer_meta : + func : OneHotInferMeta + kernel : + func : one_hot + traits : paddle::dialect::ForwardOnlyTrait + +- op : ones + args : (IntArray shape, DataType dtype=DataType::FLOAT32, Place place=CPUPlace()) + output : Tensor(out) + invoke : full(shape, 1, dtype, place) + +- op : ones_like + args : (Tensor x, DataType dtype=DataType::UNDEFINED, Place place={}) + output : Tensor(out) + invoke : full_like(x, 1, dtype, place) + +- op : overlap_add + args: (Tensor x, int hop_length, int axis=-1) + output: Tensor + infer_meta: + func: OverlapAddInferMeta + kernel: + func: overlap_add + data_type : x + backward: overlap_add_grad + +- op : p_norm + args : (Tensor x, float porder=2, int axis=-1, float epsilon=1.0e-12f, bool keepdim=false, bool asvector=false) + output : Tensor(out) + infer_meta : + func : PNormInferMeta + kernel : + func : p_norm + backward : p_norm_grad + +- op : pad + args : (Tensor x, int[] paddings, Scalar pad_value) + output : Tensor + infer_meta : + func : PadInferMeta + kernel : + func : pad + backward : pad_grad + interfaces : paddle::dialect::InferSymbolicShapeInterface + +- op : pad3d + args : (Tensor x, IntArray paddings, str mode = "constant", float pad_value = 0.0, str data_format = "NCDHW") + output : Tensor(out) + infer_meta : + func : Pad3dInferMeta + kernel : + func : pad3d + backward : pad3d_grad + interfaces : paddle::dialect::InferSymbolicShapeInterface + +- op : partial_concat + args : (Tensor[] x, int start_index = 0, int length = -1) + output : Tensor(out) + infer_meta : + func : PartialConcatInferMeta + kernel : + func : partial_concat + data_type : x + backward : partial_concat_grad + +- op : partial_sum + args : (Tensor[] x, int start_index = 0, int length = -1) + output : Tensor(out) + infer_meta : + func : PartialSumInferMeta + kernel : + func : partial_sum + data_type : x + backward : partial_sum_grad + +- op : pixel_shuffle + args : (Tensor x, int upscale_factor=1, str data_format="NCHW") + output : Tensor + infer_meta : + func : PixelShuffleInferMeta + kernel : + func : pixel_shuffle + backward : pixel_shuffle_grad + +- op : pixel_unshuffle + args : (Tensor x, int downscale_factor=1, str data_format="NCHW") + output : Tensor + infer_meta : + func : PixelUnshuffleInferMeta + kernel : + func : pixel_unshuffle + backward : pixel_unshuffle_grad + +- op : poisson + args : (Tensor x) + output : Tensor + infer_meta : + func : UnchangedInferMeta + kernel : + func : poisson + backward : poisson_grad + interfaces : paddle::dialect::InferSymbolicShapeInterface + +- op : polygamma + args : (Tensor x, int n) + output : Tensor(out) + infer_meta : + func : UnchangedInferMeta + param: [x] + kernel : + func : polygamma + inplace: (x -> out) + backward : polygamma_grad + interfaces : paddle::dialect::InferSymbolicShapeInterface + +- op : pool2d + args : (Tensor x, IntArray kernel_size, int[] strides, int[] paddings, bool ceil_mode, bool exclusive, str data_format, str pooling_type, bool global_pooling, bool adaptive, str padding_algorithm) + output : Tensor(out) + infer_meta : + func : Pool2DInferMeta + param : [x, kernel_size, strides, paddings, ceil_mode, exclusive, data_format, pooling_type, global_pooling, adaptive, padding_algorithm] + kernel : + func : pool2d + param : [x, kernel_size, strides, paddings, ceil_mode, exclusive, data_format, pooling_type, global_pooling, adaptive, padding_algorithm] + backward : pool2d_grad + interfaces : paddle::dialect::LayoutTransformationInterface, paddle::dialect::InferSymbolicShapeInterface + +- op : pool3d + args : (Tensor x, int[] kernel_size, int[] strides, int[] paddings, bool ceil_mode, bool exclusive, str data_format, str pooling_type, bool global_pooling, bool adaptive, str padding_algorithm) + output : Tensor(out) + infer_meta : + func : PoolInferMeta + param : [x, kernel_size, strides, paddings, ceil_mode, exclusive, data_format, pooling_type, global_pooling, adaptive, padding_algorithm] + kernel : + func : pool3d + param : [x, kernel_size, strides, paddings, ceil_mode, exclusive, data_format, pooling_type, global_pooling, adaptive, padding_algorithm] + backward : pool3d_grad + +- op : pow + args : (Tensor x, Scalar y=1.0f) + output : Tensor(out) + infer_meta : + func : UnchangedInferMeta + param: [x] + spmd_rule: PowInferSpmd + kernel : + func : pow + data_type : x + inplace: (x -> out) + backward : pow_grad + interfaces : paddle::dialect::InferSymbolicShapeInterface + +- op : prelu + args : (Tensor x, Tensor alpha, str data_format="NCHW", str mode="all") + output : Tensor(out) + infer_meta : + func : PReluInferMeta + kernel : + func : prelu + data_type : x + backward : prelu_grad + interfaces : paddle::dialect::InferSymbolicShapeInterface + +- op : prior_box + args : (Tensor input, Tensor image, float[] min_sizes, float[] max_sizes = {}, float[] aspect_ratios = {}, float[] variances = {}, bool flip=true, bool clip=true, float step_w=0.0, float step_h=0.0, float offset=0.5, bool min_max_aspect_ratios_order=false) + output : Tensor(out), Tensor(var) + infer_meta : + func : PriorBoxInferMeta + kernel : + func : prior_box + data_type : input + traits : paddle::dialect::ForwardOnlyTrait + +- op : prod + args : (Tensor x, IntArray axis, bool keepdim, bool reduce_all) + output : Tensor + infer_meta : + func : ReduceIntArrayAxisInferMetaBase + kernel : + func : prod + backward : prod_grad + interfaces : paddle::dialect::InferSymbolicShapeInterface + +- op : prune_gate_by_capacity + args : (Tensor gate_idx, Tensor expert_count, int64_t n_expert=0, int64_t n_worker=0) + output : Tensor(out_gate_idx) + infer_meta : + func : PruneGateByCapacityInferMeta + kernel : + func : prune_gate_by_capacity + data_type : gate_idx + +- op : psroi_pool + args : (Tensor x, Tensor boxes, Tensor boxes_num, int pooled_height=1, int pooled_width=1, int output_channels=1, float spatial_scale=1.0) + output : Tensor + infer_meta : + func : PsroiPoolInferMeta + kernel : + func : psroi_pool + data_type : x + optional : boxes_num + backward : psroi_pool_grad + +- op : put_along_axis + args : (Tensor arr, Tensor indices, Tensor values, int axis, str reduce = "assign", bool include_self = true) + output : Tensor(out) + infer_meta : + func : UnchangedInferMeta + param : [arr] + kernel : + func : put_along_axis + data_type : arr + inplace : (arr -> out) + backward : put_along_axis_grad + interfaces : paddle::dialect::InferSymbolicShapeInterface + +- op : pyramid_hash + args: (Tensor x, Tensor w, Tensor white_list, Tensor black_list, int num_emb = 0, + int space_len = 0, int pyramid_layer = 2, int rand_len = 0, float drop_out_percent + = 0, int is_training = 0, bool use_filter = true, int white_list_len = 0, int + black_list_len = 0, int seed = 0, float lr = 0.0, str distribute_update_vars = + "") + output: Tensor (out), Tensor (drop_pos), Tensor (x_temp_out) + infer_meta: + func: PyramidHashInferMeta + kernel: + func: pyramid_hash + data_type: w + intermediate: x_temp_out + backward: pyramid_hash_grad + +- op : qr + args : (Tensor x, str mode = "reduced") + output : Tensor(q), Tensor(r) + infer_meta : + func : QrInferMeta + kernel : + func : qr + backward : qr_grad + +- op : radam_ + args : (Tensor param, Tensor grad, Tensor learning_rate, Tensor beta1_pow, Tensor beta2_pow, Tensor rho, Tensor moment1, Tensor moment2, Tensor master_param, float beta1 = 0.9f, float beta2 = 0.999f, float epsilon = 1.0e-8f, bool multi_precision = false) + output : Tensor(param_out), Tensor(beta1_pow_out), Tensor(beta2_pow_out), Tensor(rho_out), Tensor(moment1_out), Tensor(moment2_out), Tensor(master_param_out) + infer_meta : + func : RAdamInferMeta + kernel : + func : radam + data_type : param + optional : master_param, master_param_out + inplace : (param -> param_out), (beta1_pow -> beta1_pow_out), (beta2_pow -> beta2_pow_out), (rho -> rho_out), (moment1 -> moment1_out), (moment2 -> moment2_out), (master_param->master_param_out) + traits : pir::SideEffectTrait + +- op : randint + args : (int low, int high, IntArray shape, DataType dtype=DataType::INT64, Place place={}) + output : Tensor(out) + infer_meta : + func : RandintInferMeta + param : [low, high, shape, dtype] + kernel : + func : randint + param : [low, high, shape, dtype] + data_type : dtype + backend : place + interfaces : paddle::dialect::InferSymbolicShapeInterface + traits : pir::SideEffectTrait, paddle::dialect::ForwardOnlyTrait + +- op : random_routing + args : (Tensor prob, Tensor topk_value, Tensor topk_idx) + output : Tensor(out) + infer_meta : + func : RandomRoutingInferMeta + kernel : + func : random_routing + data_type : prob + inplace : (topk_idx -> out) + traits : pir::SideEffectTrait + +- op : randperm + args : (int n, DataType dtype, Place place={}) + output : Tensor(out) + infer_meta : + func : RandpermInferMeta + param : [n, dtype] + kernel : + func : randperm + param : [n, dtype] + data_type : dtype + backend : place + traits : pir::SideEffectTrait + +- op : rank_attention + args : (Tensor x, Tensor rank_offset, Tensor rank_param, int max_rank = 3, int max_size = 0) + output : Tensor(input_help), Tensor(out), Tensor(ins_rank) + infer_meta : + func : RankAttentionInferMeta + kernel : + func : rank_attention + data_type : x + backward : rank_attention_grad + optional : ins_rank, input_help + +- op : read_file + args : (str filename = "", DataType dtype=DataType::UINT8, Place place=CPUPlace()) + output : Tensor(out) + infer_meta : + func : ReadFileInferMeta + param : [filename] + kernel : + func : read_file + param : [filename] + data_type : dtype + backend : place + +- op : real + args : (Tensor x) + output : Tensor (out) + infer_meta : + func : RealAndImagInferMeta + kernel : + func : real + backward : real_grad + interfaces : paddle::dialect::InferSymbolicShapeInterface + +- op : reciprocal + args : (Tensor x) + output : Tensor(out) + infer_meta : + func : UnchangedInferMeta + kernel : + func : reciprocal + inplace : (x -> out) + backward : reciprocal_grad + interfaces : paddle::dialect::InferSymbolicShapeInterface + +- op : reduce_as + args : (Tensor x, Tensor target) + output : Tensor(out) + infer_meta : + func : ReduceAsInferMeta + kernel : + func : reduce_as + data_type : x + backward : reduce_as_grad + interfaces : paddle::dialect::InferSymbolicShapeInterface + +- op : reduce_scatter + args : (Tensor x, int ring_id = 0, int nranks = 1) + output : Tensor(out) + infer_meta : + func : ReduceScatterInferMeta + param: [x, nranks] + kernel : + func : reduce_scatter + param: [x, nranks] + +- op : reindex_graph + args : (Tensor x, Tensor neighbors, Tensor count, Tensor hashtable_value, Tensor hashtable_index) + output : Tensor(reindex_src), Tensor(reindex_dst), Tensor(out_nodes) + infer_meta : + func : GraphReindexInferMeta + kernel : + func : graph_reindex + data_type : x + optional : hashtable_value, hashtable_index + +- op : relu + args : (Tensor x) + output : Tensor(out) + infer_meta : + func : UnchangedInferMeta + spmd_rule : ElementwiseUnaryInferSpmd + kernel : + func : relu + inplace : (x -> out) + backward : relu_grad + interfaces : paddle::dialect::InferSymbolicShapeInterface + +- op : relu6 + args : (Tensor x) + output : Tensor + infer_meta : + func : UnchangedInferMeta + param : [x] + kernel : + func : relu6 + backward : relu6_grad + interfaces : paddle::dialect::InferSymbolicShapeInterface + +- op : renorm + args : (Tensor x, float p, int axis, float max_norm) + output : Tensor(out) + infer_meta : + func : UnchangedInferMeta + param : [x] + kernel : + func : renorm + inplace: (x -> out) + backward : renorm_grad + +- op : repeat_interleave + args : (Tensor x, int repeats, int axis) + output : Tensor(out) + infer_meta : + func : RepeatInterleaveInferMeta + kernel : + func : repeat_interleave + data_type : x + backward: repeat_interleave_grad + interfaces : paddle::dialect::InferSymbolicShapeInterface + +- op : repeat_interleave_with_tensor_index + args : (Tensor x, Tensor repeats, int axis) + output : Tensor(out) + infer_meta : + func : RepeatInterleaveWithTensorIndexInferMeta + kernel : + func : repeat_interleave_with_tensor_index + data_type : x + backward: repeat_interleave_with_tensor_index_grad + +- op : reverse + args : (Tensor x, IntArray axis) + output : Tensor + infer_meta : + func : ReverseInferMeta + kernel : + func : reverse + data_type : x + backward : reverse_grad + interfaces : paddle::dialect::InferSymbolicShapeInterface + +- op : rms_norm + args : (Tensor x, Tensor bias, Tensor residual, Tensor norm_weight, Tensor norm_bias, float epsilon, int begin_norm_axis, float quant_scale, int quant_round_type, float quant_max_bound, float quant_min_bound) + output : Tensor(out), Tensor(residual_out), Tensor(inv_var) + infer_meta : + func : RmsNormInferMeta + kernel : + func : rms_norm + data_type : x + optional : bias, residual, norm_bias, residual_out + intermediate : inv_var + backward : rms_norm_grad + +- op : rmsprop_ + args : (Tensor param, Tensor mean_square, Tensor grad, Tensor moment, Tensor learning_rate, Tensor mean_grad, Tensor master_param, float epsilon = 1.0e-10f, float decay = 0.9f, float momentum = 0.0f, bool centered = false, bool multi_precision = false) + output : Tensor(param_out), Tensor(moment_out), Tensor(mean_square_out), Tensor(mean_grad_out), Tensor(master_param_outs) + infer_meta : + func : RmspropInferMeta + kernel : + func : rmsprop {dense, dense, dense, dense, dense, dense, dense-> dense, dense, dense, dense, dense} + rmsprop_dense_param_sparse_grad {dense, dense, selected_rows, dense, dense, dense, dense-> dense, dense, dense, dense, dense} + data_type : param + optional : mean_grad, master_param, master_param_outs + inplace : (param -> param_out), (moment -> moment_out), (mean_square -> mean_square_out), (mean_grad -> mean_grad_out), (master_param->master_param_outs) + traits : pir::SideEffectTrait + +- op : rnn + args: (Tensor x, Tensor[] pre_state, Tensor[] weight_list, Tensor sequence_length, Tensor dropout_state_in, float dropout_prob=0.0, bool is_bidirec=false, int input_size=10, int hidden_size=100, int num_layers=1, str mode="RNN_TANH", int seed=0, bool is_test=false) + output: Tensor(out), Tensor(dropout_state_out), Tensor[](state){pre_state.size()}, Tensor(reserve) + infer_meta: + func: RnnInferMeta + param : [x, pre_state, weight_list, sequence_length, dropout_prob, is_bidirec, input_size, hidden_size, num_layers, mode, seed, is_test] + kernel: + func: rnn + param : [x, pre_state, weight_list, sequence_length, dropout_prob, is_bidirec, input_size, hidden_size, num_layers, mode, seed, is_test] + data_type: x + backward: rnn_grad + optional : sequence_length + intermediate : reserve + view : (dropout_state_in -> dropout_state_out) + +- op : roi_align + args : (Tensor x, Tensor boxes, Tensor boxes_num, int pooled_height=1, int pooled_width=1, float spatial_scale=1.0, int sampling_ratio=-1, bool aligned=false) + output : Tensor + infer_meta : + func : RoiAlignInferMeta + kernel : + func : roi_align + data_type : x + optional : boxes_num + backward : roi_align_grad + interfaces : paddle::dialect::InferSymbolicShapeInterface + +- op : roi_pool + args : (Tensor x, Tensor boxes, Tensor boxes_num, int pooled_height=1, int pooled_width=1, float spatial_scale=1.0) + output : Tensor(out), Tensor(arg_max) + infer_meta : + func : RoiPoolInferMeta + kernel : + func : roi_pool + data_type : x + optional : boxes_num + intermediate : arg_max + backward : roi_pool_grad + +- op : roll + args : (Tensor x, IntArray shifts={}, int64_t[] axis={}) + output : Tensor(out) + infer_meta : + func : RollInferMeta + kernel : + func : roll + data_type : x + backward : roll_grad + interfaces : paddle::dialect::InferSymbolicShapeInterface + +- op : round + args : (Tensor x, int decimals = 0 ) + output : Tensor(out) + infer_meta : + func : UnchangedInferMeta + param : [x] + kernel : + func : round + inplace : (x -> out) + backward : round_grad + interfaces : paddle::dialect::InferSymbolicShapeInterface + +- op : rprop_ + args : (Tensor param, Tensor grad, Tensor prev, Tensor learning_rate, Tensor master_param, Tensor learning_rate_range, Tensor etas, bool multi_precision=false) + output : Tensor(param_out), Tensor(prev_out), Tensor(learning_rate_out), Tensor(master_param_out) + infer_meta : + func : RpropInferMeta + kernel : + func : rprop + data_type : param + data_transform : + support_trans_dtype : learning_rate + optional : master_param, master_param_out + inplace : (param -> param_out), (prev -> prev_out), (learning_rate -> learning_rate_out), (master_param -> master_param_out) + traits : pir::SideEffectTrait + +- op : rrelu + args : (Tensor x, float lower=1.0f/8, float upper=1.0f/3, bool is_test=false) + output : Tensor(out), Tensor(noise) + infer_meta : + func : RReluInferMeta + kernel : + func : rrelu + data_type : x + intermediate : noise + backward : rrelu_grad + +- op : rsqrt + args : (Tensor x) + output : Tensor(out) + infer_meta : + func : UnchangedInferMeta + spmd_rule : ElementwiseUnaryInferSpmd + kernel : + func : rsqrt + inplace : (x -> out) + backward : rsqrt_grad + interfaces : paddle::dialect::InferSymbolicShapeInterface + +- op : scale + args : (Tensor x, Scalar scale=1.0, Scalar bias=0.0, bool bias_after_scale=true) + output : Tensor(out) + infer_meta : + func : UnchangedInferMeta + param : [x] + spmd_rule : ScaleInferSpmd + kernel : + func : scale {dense -> dense}, + scale_sr {selected_rows -> selected_rows} + data_type : x + inplace : (x -> out) + backward : scale_grad + interfaces : paddle::dialect::InferSymbolicShapeInterface + +- op : scatter + args : (Tensor x, Tensor index, Tensor updates, bool overwrite=true) + output : Tensor(out) + infer_meta : + func : ScatterInferMeta + kernel : + func : scatter + data_type : x + inplace : (x -> out) + backward : scatter_grad + interfaces : paddle::dialect::InferSymbolicShapeInterface + +- op : scatter_nd_add + args : (Tensor x, Tensor index, Tensor updates) + output : Tensor + infer_meta : + func : ScatterNdAddInferMeta + kernel : + func : scatter_nd_add + data_type : x + backward : scatter_nd_add_grad + interfaces : paddle::dialect::InferSymbolicShapeInterface + +- op : searchsorted + args : (Tensor sorted_sequence, Tensor values, bool out_int32 = false, bool right = false) + output : Tensor(out) + infer_meta : + func : SearchsortedInferMeta + kernel : + func : searchsorted + data_type : sorted_sequence + interfaces : paddle::dialect::InferSymbolicShapeInterface + +- op : segment_pool + args : (Tensor x, Tensor segment_ids, str pooltype="SUM") + output : Tensor(out), Tensor(summed_ids) + infer_meta : + func : SegmentPoolInferMeta + kernel : + func : segment_pool + data_type : x + intermediate : summed_ids + backward : segment_pool_grad + +- op : selu + args : (Tensor x, float scale=1.0507009873554804934193349852946, float alpha=1.6732632423543772848170429916717) + output : Tensor + infer_meta : + func : UnchangedInferMeta + param : [x] + kernel : + func : selu + backward : selu_grad + +- op : send_u_recv + args : (Tensor x, Tensor src_index, Tensor dst_index, str reduce_op = "SUM", IntArray out_size = {0}) + output : Tensor(out), Tensor(dst_count) + infer_meta : + func : SendURecvInferMeta + kernel : + func : send_u_recv + data_type : x + intermediate : dst_count + backward : send_u_recv_grad + +- op : send_ue_recv + args : (Tensor x, Tensor y, Tensor src_index, Tensor dst_index, str message_op="ADD", str reduce_op="SUM", IntArray out_size={0}) + output : Tensor(out), Tensor(dst_count) + infer_meta : + func : SendUERecvInferMeta + kernel : + func : send_ue_recv + data_type : x + intermediate : dst_count + backward : send_ue_recv_grad + +- op : send_uv + args : (Tensor x, Tensor y, Tensor src_index, Tensor dst_index, str message_op = "ADD") + output : Tensor(out) + infer_meta : + func : SendUVInferMeta + kernel : + func : send_uv + data_type : x + backward : send_uv_grad + +- op : sequence_conv + args: (Tensor x, Tensor padding_data, Tensor filter, int context_length, bool padding_trainable = false, + int context_start = 0, int context_stride = 1) + output: Tensor (out) + infer_meta: + func: SequenceConvInferMeta + kernel: + func: sequence_conv + data_type: x + optional: padding_data + backward: sequence_conv_grad + +- op : sequence_mask + args: (Tensor x, Scalar(int) max_len, DataType out_dtype) + output: Tensor(y) + infer_meta: + func: SequenceMaskScalarInferMeta + kernel: + func: sequence_mask_scalar + data_type : x + +- op : sequence_pool + args: (Tensor x, bool is_test=false, str pooltype = "AVERAGE", float pad_value = 0.0) + output: Tensor (out), Tensor (max_index) + infer_meta: + func: SequencePoolInferMeta + kernel: + func: sequence_pool + intermediate: max_index + backward: sequence_pool_grad + +- op : set_value_with_tensor + args : (Tensor x, Tensor values, IntArray starts, IntArray ends, IntArray steps, int64_t[] axes, int64_t[] decrease_axes, int64_t[] none_axes) + output : Tensor(out) + inplace: (x -> out) + infer_meta: + func: SetValueInferMeta + param: [x] + kernel: + func: set_value_with_tensor + backward: set_value_with_tensor_grad + +- op : sgd_ + args : (Tensor param, Tensor learning_rate, Tensor grad, Tensor master_param, bool multi_precision=false) + output : Tensor(param_out), Tensor(master_param_out) + infer_meta : + func : SgdInferMeta + spmd_rule : SgdInferSpmd + kernel : + func : sgd {dense, dense, dense, dense -> dense, dense}, + sgd_dense_param_sparse_grad {dense, dense, selected_rows, dense -> dense, dense}, + sgd_sparse_param_sparse_grad {selected_rows, dense, selected_rows, selected_rows -> selected_rows, selected_rows} + data_type : param + data_transform : + support_trans_dtype : learning_rate + optional : master_param, master_param_out + inplace : (param -> param_out), (master_param -> master_param_out) + traits : pir::SideEffectTrait + +- op : shape + args : (Tensor input) + output : Tensor(out) + infer_meta : + func : ShapeInferMeta + kernel : + func : shape {dense -> dense}, + shape_sr {selected_rows -> dense} + data_transform : + skip_transform : input + interfaces : paddle::dialect::InferSymbolicShapeInterface + traits : paddle::dialect::ForwardOnlyTrait + +- op : shard_index + args : (Tensor input, int index_num, int nshards, int shard_id, int ignore_value=-1) + output : Tensor(out) + infer_meta : + func : ShardIndexInferMeta + kernel : + func : shard_index + +- op : share_data + args: (Tensor x) + output: Tensor (out) + infer_meta: + func: ShareDataInferMeta + kernel: + func: share_data {dense -> dense} + share_data_sr {selected_rows -> selected_rows} + +- op : shuffle_batch + args : (Tensor x, Tensor seed, int startup_seed=0) + output : Tensor(out), Tensor(shuffle_idx), Tensor(seed_out) + infer_meta: + func: ShuffleBatchInferMeta + kernel: + func: shuffle_batch + data_type: x + backward : shuffle_batch_grad + traits : pir::SideEffectTrait + data_transform : + skip_transform : seed + +- op : shuffle_channel + args : (Tensor x, int group = 1) + output : Tensor(out) + infer_meta : + func : ShuffleChannelInferMeta + kernel : + func : shuffle_channel + backward : shuffle_channel_grad + +- op : sigmoid + args : (Tensor x) + output : Tensor + infer_meta : + func : UnchangedInferMeta + kernel : + func : sigmoid + inplace : (x -> out) + backward : sigmoid_grad + interfaces : paddle::dialect::InferSymbolicShapeInterface + +- op : sigmoid_cross_entropy_with_logits + args : (Tensor x, Tensor label, Tensor pos_weight, bool normalize=false, int ignore_index=-100) + output : Tensor + infer_meta : + func : SigmoidCrossEntropyWithLogitsInferMeta + kernel : + func : sigmoid_cross_entropy_with_logits + inplace : (x -> out) + backward : sigmoid_cross_entropy_with_logits_grad + optional : pos_weight + +- op : sign + args : (Tensor x) + output : Tensor(out) + infer_meta : + func : UnchangedInferMeta + kernel : + func : sign + backward : sign_grad + interfaces : paddle::dialect::InferSymbolicShapeInterface + +- op : silu + args : (Tensor x) + output : Tensor + infer_meta : + func : UnchangedInferMeta + spmd_rule : ElementwiseUnaryInferSpmd + kernel : + func : silu + backward : silu_grad + interfaces : paddle::dialect::LayoutTransformationInterface + +- op : sin + args : (Tensor x) + output : Tensor(out) + infer_meta : + func : UnchangedInferMeta + spmd_rule : ElementwiseUnaryInferSpmd + kernel : + func : sin + inplace : (x -> out) + backward : sin_grad + interfaces : paddle::dialect::InferSymbolicShapeInterface + +- op : sinh + args : (Tensor x) + output : Tensor(out) + infer_meta : + func : UnchangedInferMeta + kernel : + func : sinh + inplace: (x -> out) + backward : sinh_grad + interfaces : paddle::dialect::InferSymbolicShapeInterface + +- op : slice + args : (Tensor input, int64_t[] axes, IntArray starts, IntArray ends, int64_t[] infer_flags, int64_t[] decrease_axis) + output : Tensor + infer_meta : + func : SliceRawInferMeta + spmd_rule : SliceInferSpmdDynamic + kernel : + func : slice + backward : slice_grad + interfaces : paddle::dialect::InferSymbolicShapeInterface + +- op : slogdet + args : (Tensor x) + output : Tensor + infer_meta : + func : UnchangedInferMeta + kernel : + func : slogdet + backward : slogdet_grad + +- op : softplus + args : (Tensor x, float beta = 1.0, float threshold = 20.0f) + output : Tensor + infer_meta : + func : UnchangedInferMeta + param : [x] + kernel : + func : softplus + backward : softplus_grad + +- op : softshrink + args : (Tensor x, float threshold = 0.5) + output : Tensor + infer_meta : + func : UnchangedInferMeta + param : [x] + kernel : + func : softshrink + backward : softshrink_grad + +- op : softsign + args : (Tensor x) + output : Tensor + infer_meta : + func : UnchangedInferMeta + param : [x] + kernel : + func : softsign + backward : softsign_grad + +- op : solve + args : (Tensor x, Tensor y) + output : Tensor + infer_meta : + func : SolveInferMeta + kernel : + func : solve + data_type : x + backward : solve_grad + +- op : sparse_attention + args: (Tensor q, Tensor k, Tensor v, Tensor offset, Tensor columns, Tensor key_padding_mask, + Tensor attn_mask) + output: Tensor (out), Tensor (sparse_dot_sdd), Tensor (softmax) + infer_meta: + func: SparseAttentionInferMeta + kernel: + func: sparse_attention + data_type: q + optional: key_padding_mask, attn_mask + intermediate: sparse_dot_sdd, softmax + backward: sparse_attention_grad + +- op : spectral_norm + args : (Tensor weight, Tensor u, Tensor v, int dim = 0, int power_iters = 1, float eps = 1e-12f) + output : Tensor + infer_meta : + func : SpectralNormInferMeta + kernel : + func : spectral_norm + data_type : weight + backward : spectral_norm_grad + +- op : split + args : (Tensor x, IntArray sections, Scalar(int) axis) + output : Tensor[]{sections.size()} + infer_meta : + func : SplitInferMeta + spmd_rule : SplitInferSpmdDynamic + kernel : + func : split + backward : split_grad + interfaces : paddle::dialect::InferSymbolicShapeInterface + +- op : split_with_num + args : (Tensor x, int num, Scalar(int) axis) + output : Tensor[]{num} + infer_meta : + func : SplitWithNumInferMeta + spmd_rule : SplitWithNumInferSpmdDynamic + kernel : + func : split_with_num + backward : split_with_num_grad + interfaces : paddle::dialect::InferSymbolicShapeInterface + +- op : sqrt + args : (Tensor x) + output : Tensor(out) + infer_meta : + func : UnchangedInferMeta + kernel : + func : sqrt {dense -> dense}, + sqrt_sr {selected_rows -> selected_rows} + inplace : (x -> out) + backward : sqrt_grad + +- op : square + args : (Tensor x) + output : Tensor + infer_meta : + func : UnchangedInferMeta + spmd_rule : ElementwiseUnaryInferSpmd + kernel : + func : square {dense -> dense}, + square_sr {selected_rows -> selected_rows} + backward : square_grad + interfaces : paddle::dialect::InferSymbolicShapeInterface + +- op : squared_l2_norm + args : (Tensor x) + output : Tensor(out) + infer_meta : + func : SquaredL2NormInferMeta + spmd_rule : SquaredL2NormInferSpmd + kernel : + func : squared_l2_norm + backward : squared_l2_norm_grad + +- op : squeeze + args : (Tensor x, IntArray axis={}) + output : Tensor(out), Tensor(xshape) + infer_meta : + func : SqueezeWithXShapeInferMeta + spmd_rule : SqueezeInferSpmd + kernel : + func : squeeze + data_type : x + inplace : (x -> out) + view: (x -> out) + intermediate : xshape + backward : squeeze_grad + interfaces : paddle::dialect::InferSymbolicShapeInterface, paddle::dialect::LayoutTransformationInterface + +- op : stack + args : (Tensor[] x, int axis = 0) + output : Tensor (out) + infer_meta : + func : StackInferMeta + spmd_rule : StackInferSpmd + kernel : + func : stack + backward : stack_grad + interfaces : paddle::dialect::InferSymbolicShapeInterface + +- op : standard_gamma + args : (Tensor x) + output : Tensor(out) + infer_meta : + func : UnchangedInferMeta + kernel : + func : standard_gamma + +- op : stanh + args : (Tensor x, float scale_a=0.67f, float scale_b=1.7159f) + output : Tensor(out) + infer_meta : + func : UnchangedInferMeta + param : [x] + kernel : + func : stanh + backward : stanh_grad + +- op : stft + args: (Tensor x, Tensor window, int n_fft, int hop_length, bool normalized, bool onesided) + output: Tensor (out) + infer_meta: + func: StftInferMeta + kernel: + func: stft + data_type: x + backward: stft_grad + +- op : strided_slice + args : (Tensor x, int[] axes, IntArray starts, IntArray ends, IntArray strides) + output : Tensor + infer_meta : + func : StridedSliceInferMeta + spmd_rule : StridedSliceInferSpmdDynamic + kernel : + func : strided_slice + backward : strided_slice_grad + +- op : sum + args : (Tensor x, IntArray axis={}, DataType dtype=DataType::UNDEFINED, bool keepdim=false) + output : Tensor(out) + infer_meta : + func : SumInferMeta + spmd_rule : ReductionSumInferSpmdDynamic + kernel : + func : sum + data_type : x + backward : sum_grad + interfaces : paddle::dialect::InferSymbolicShapeInterface + +- op : svd + args : (Tensor x, bool full_matrices = false) + output : Tensor(u), Tensor(s), Tensor(vh) + infer_meta : + func : SvdInferMeta + kernel : + func : svd + backward : svd_grad + +- op : swiglu + args : (Tensor x, Tensor y) + output : Tensor(out) + infer_meta : + func : SwiGLUInferMeta + spmd_rule : SwiGLUInferSpmd + kernel : + func : swiglu + optional : y + backward: swiglu_grad + +- op : swish + args : (Tensor x) + output : Tensor(out) + infer_meta : + func : UnchangedInferMeta + param : [x] + kernel : + func : swish + backward : swish_grad + interfaces : paddle::dialect::InferSymbolicShapeInterface, paddle::dialect::LayoutTransformationInterface + +- op : sync_batch_norm_ + args : (Tensor x, Tensor mean, Tensor variance, Tensor scale, Tensor bias, bool is_test, float momentum, float epsilon, str data_format, bool use_global_stats, bool trainable_statistics) + output : Tensor(out), Tensor(mean_out), Tensor(variance_out), Tensor(saved_mean), Tensor(saved_variance), Tensor(reserve_space) + infer_meta : + func : BatchNormInferMeta + kernel : + func : sync_batch_norm + data_type : x + backward : sync_batch_norm_grad + inplace : (mean -> mean_out), (variance -> variance_out) + optional : reserve_space + +- op : take_along_axis + args : (Tensor arr, Tensor indices, int axis) + output : Tensor + infer_meta : + func : TakeAlongAxisInferMeta + param : [arr, indices, axis] + kernel : + func : take_along_axis + data_type : arr + backward : take_along_axis_grad + interfaces : paddle::dialect::InferSymbolicShapeInterface + +- op : tan + args : (Tensor x) + output : Tensor(out) + infer_meta : + func : UnchangedInferMeta + kernel : + func : tan + inplace : (x -> out) + backward : tan_grad + interfaces : paddle::dialect::InferSymbolicShapeInterface + +- op : tanh + args : (Tensor x) + output : Tensor(out) + infer_meta : + func : UnchangedInferMeta + kernel : + func : tanh + inplace : (x -> out) + backward : tanh_grad + interfaces : paddle::dialect::InferSymbolicShapeInterface + +- op : tanh_shrink + args : (Tensor x) + output : Tensor + infer_meta : + func : UnchangedInferMeta + kernel : + func : tanh_shrink + backward : tanh_shrink_grad + +- op : tdm_child + args: (Tensor x, Tensor tree_info, int child_nums, DataType dtype = DataType::INT32) + output: Tensor (child), Tensor (leaf_mask) + infer_meta: + func: TdmChildInferMeta + kernel: + func: tdm_child + data_type: x + +- op : tdm_sampler + args: (Tensor x, Tensor travel, Tensor layer, bool output_positive=true, int[] neg_samples_num_list={}, int[] layer_offset_lod={}, int seed = 0, int dtype=2) + output: Tensor(out), Tensor(labels), Tensor(mask) + infer_meta: + func : TdmSamplerInferMeta + kernel: + func : tdm_sampler + data_type : x + optional : labels + +- op : temporal_shift + args : (Tensor x, int seg_num, float shift_ratio = 0.25f, str data_format = "NCHW") + output : Tensor(out) + infer_meta : + func : TemporalShiftInferMeta + kernel : + func : temporal_shift + data_type : x + backward : temporal_shift_grad + +- op : tensor_unfold + args : (Tensor input, int64_t axis, int64_t size, int64_t step) + output : Tensor + infer_meta : + func : StridedUnChangedInferMeta + param : [input] + kernel : + func : tensor_unfold + backward : tensor_unfold_grad + no_need_buffer : input + +- op : thresholded_relu + args : (Tensor x, float threshold = 1.0, float value = 0.0) + output : Tensor(out) + infer_meta : + func : UnchangedInferMeta + param : [x] + kernel : + func : thresholded_relu + inplace: (x -> out) + backward : thresholded_relu_grad + interfaces : paddle::dialect::InferSymbolicShapeInterface + +- op : top_p_sampling + args : (Tensor x, Tensor ps, Tensor threshold, Tensor topp_seed, int seed=-1, int k=0, str mode="truncate") + output : Tensor (out), Tensor(ids), Tensor(topk_scores), Tensor(topk_ids) + infer_meta : + func : TopPSamplingInferMeta + kernel : + func : top_p_sampling + data_type : x + optional : threshold, topp_seed, topk_scores, topk_ids + interfaces : paddle::dialect::InferSymbolicShapeInterface + +- op : topk + args : (Tensor x, Scalar(int) k = 1, int axis = -1, bool largest = true, bool sorted = true) + output : Tensor(out), Tensor(indices) + infer_meta : + func : TopKInferMeta + kernel : + func : topk + data_type : x + backward : topk_grad + interfaces : paddle::dialect::InferSymbolicShapeInterface + +- op : trace + args : (Tensor x, int offset = 0, int axis1 = 0, int axis2 = 1) + output : Tensor + infer_meta : + func : TraceInferMeta + kernel : + func : trace + backward : trace_grad + +- op : trans_layout + args : (Tensor x, int[] perm) + output : Tensor + infer_meta : + func : TransposeInferMeta + kernel : + func : transpose + backward : trans_layout_grad + +- op : transpose + args : (Tensor x, int[] perm) + output : Tensor(out) + infer_meta : + func : TransposeInferMeta + spmd_rule: TransposeInferSpmd + kernel : + func : transpose + inplace : (x -> out) + backward : transpose_grad + interfaces : paddle::dialect::InferSymbolicShapeInterface + +- op : triangular_solve + args : (Tensor x, Tensor y, bool upper=true, bool transpose=false, bool unitriangular=false) + output : Tensor + infer_meta : + func : TriangularSolveInferMeta + kernel : + func : triangular_solve + data_type : x + backward : triangular_solve_grad + +- op : tril + args : (Tensor x, int diagonal) + output : Tensor(out) + infer_meta : + func : TrilInferMeta + kernel : + func : tril + inplace: (x -> out) + backward : tril_grad + interfaces : paddle::dialect::InferSymbolicShapeInterface + +- op : tril_indices + args : (int rows, int cols, int offset, DataType dtype, Place place={}) + output : Tensor(out) + infer_meta : + func : TrilIndicesInferMeta + param : [rows, cols, offset, dtype] + kernel : + func : tril_indices + param : [rows, cols, offset, dtype] + data_type : dtype + backend : place + interfaces : paddle::dialect::InferSymbolicShapeInterface + +- op : trilinear_interp + args : (Tensor x, Tensor out_size, Tensor[] size_tensor, Tensor scale_tensor, str data_format="NCHW", int out_d=0, int out_h=0, int out_w=0, float[] scale={}, str interp_method="bilinear", bool align_corners=true, int align_mode=1) + output : Tensor(output) + infer_meta : + func : InterpolateInferMeta + optional: out_size, size_tensor, scale_tensor + kernel : + func : trilinear_interp + data_type : x + backward : trilinear_interp_grad + data_transform : + skip_transform : out_size, size_tensor, scale_tensor + interfaces : paddle::dialect::InferSymbolicShapeInterface + +- op : triu + args : (Tensor x, int diagonal) + output : Tensor(out) + infer_meta : + func : TriuInferMeta + spmd_rule : TriuInferSpmd + kernel : + func : triu + inplace: (x -> out) + backward : triu_grad + interfaces : paddle::dialect::InferSymbolicShapeInterface + +- op : triu_indices + args : (int row, int col, int offset, DataType dtype, Place place={}) + output : Tensor(out) + infer_meta : + func : TriuIndicesInferMeta + param : [row, col, offset, dtype] + kernel : + func : triu_indices + param : [row, col, offset, dtype] + data_type : dtype + backend : place + interfaces : paddle::dialect::InferSymbolicShapeInterface + +- op : trunc + args : (Tensor input) + output : Tensor(out) + infer_meta : + func : UnchangedInferMeta + kernel : + func : trunc + inplace: (input -> out) + backward : trunc_grad + interfaces : paddle::dialect::InferSymbolicShapeInterface + +# python API: paddle.nn.initializer.TruncatedNormal +- op : truncated_gaussian_random + args : (int[] shape, float mean, float std, int seed, float a, float b, DataType dtype=DataType::FLOAT32, Place place={}) + output : Tensor(out) + infer_meta : + func : TruncatedGaussianRandomInferMeta + param : [shape, mean, std, seed, a, b, dtype] + kernel : + func : truncated_gaussian_random + param : [shape, mean, std, seed, a, b, dtype] + backend : place + data_type : dtype + traits : pir::SideEffectTrait + +- op : unbind + args : (Tensor input, int axis = 0) + output : Tensor[] {axis<0 ? input.dims()[input.dims().size()+axis]:input.dims()[axis]} + infer_meta : + func : UnbindInferMeta + spmd_rule : UnbindInferSpmdDynamic + kernel : + func : unbind + backward : unbind_grad + interfaces : paddle::dialect::InferSymbolicShapeInterface + +- op : unfold + args : (Tensor x, int[] kernel_sizes, int[] strides, int[] paddings, int[] dilations) + output : Tensor(out) + infer_meta : + func : UnfoldInferMeta + kernel : + func : unfold + backward : unfold_grad + +- op : uniform + args : (IntArray shape, DataType dtype, Scalar min, Scalar max, int seed, Place place={}) + output : Tensor(out) + infer_meta : + func : UniformRandomInferMeta + param: [shape, dtype] + kernel : + func : uniform + param: [shape, dtype, min, max, seed] + data_type : dtype + backend : place + interfaces : paddle::dialect::InferSymbolicShapeInterface + traits : pir::SideEffectTrait, paddle::dialect::ForwardOnlyTrait + +- op : uniform_inplace + args: (Tensor x, float min = -1.0, float max = 1.0, int seed = 0, int diag_num = 0, int diag_step = 0, float diag_val = 1.0) + output: Tensor(out) + infer_meta: + func: UniformRandomInplaceInferMeta + kernel: + func: uniform_inplace + data_type: x + inplace: (x -> out) + backward: uniform_inplace_grad + traits : pir::SideEffectTrait + +- op : uniform_random_batch_size_like + args: (Tensor input, int[] shape, int input_dim_idx = 0, int output_dim_idx = 0, + float min=-1.0f, float max=1.0f, int seed=0, int diag_num=0, int diag_step=0, float diag_val=1.0f, DataType dtype=DataType::FLOAT32) + output: Tensor (out) + infer_meta: + func: UniformRandomBatchSizeLikeInferMeta + kernel: + func : uniform_random_batch_size_like {dense -> dense}, + uniform_random_batch_size_like_sr {selected_rows -> selected_rows} + data_type: dtype + no_need_buffer: input + traits : pir::SideEffectTrait + +- op : unique_consecutive + args : (Tensor x, bool return_inverse = false, bool return_counts = false, int[] axis = {}, DataType dtype = DataType::FLOAT32) + output : Tensor(out), Tensor(index), Tensor(counts) + infer_meta : + func : UniqueConsecutiveInferMeta + kernel : + func : unique_consecutive + data_type : x + optional : index, counts + interfaces : paddle::dialect::InferSymbolicShapeInterface + +- op : unpool + args: (Tensor x, Tensor indices, int[] ksize, int[] strides, int[] padding, IntArray output_size, str data_format) + output: Tensor(out) + infer_meta: + func: UnpoolInferMeta + kernel: + func: unpool + data_type: x + backward: unpool_grad + +- op : unpool3d + args: (Tensor x, Tensor indices, int[] ksize, int[] strides={1,1,1}, int[] paddings={0,0,0}, int[] output_size={0,0,0}, str data_format="NCDHW") + output: Tensor(out) + infer_meta: + func: Unpool3dInferMeta + kernel: + func: unpool3d + data_type: x + backward: unpool3d_grad + +- op : unsqueeze + args : (Tensor x, IntArray axis = {}) + output : Tensor(out), Tensor(xshape) + infer_meta : + func : UnsqueezeWithXShapeInferMeta + spmd_rule : UnsqueezeInferSpmd + kernel : + func : unsqueeze + data_type : x + inplace : (x -> out) + view: (x -> out) + intermediate : xshape + backward : unsqueeze_grad + interfaces : paddle::dialect::InferSymbolicShapeInterface + +- op : unstack + args : (Tensor x, int axis=0, int num=0) + output : Tensor[](out){num} + infer_meta : + func : UnStackInferMeta + kernel : + func : unstack + backward : unstack_grad + +- op : update_loss_scaling_ + args : (Tensor[] x, Tensor found_infinite, Tensor prev_loss_scaling, Tensor in_good_steps, Tensor in_bad_steps, int incr_every_n_steps, int decr_every_n_nan_or_inf, float incr_ratio, float decr_ratio, Scalar stop_update=false) + output : Tensor[](out){x.size()}, Tensor(loss_scaling), Tensor(out_good_steps), Tensor(out_bad_steps) + infer_meta : + func : UpdateLossScalingInferMeta + param : [x, found_infinite, prev_loss_scaling, in_good_steps, in_bad_steps] + spmd_rule : UpdateLossScalingSpmd + kernel : + func : update_loss_scaling + data_type : x + data_transform : + skip_transform : found_infinite + inplace : (x -> out), (prev_loss_scaling -> loss_scaling), (in_good_steps -> out_good_steps), (in_bad_steps -> out_bad_steps) + +- op : view_dtype + args : (Tensor input, DataType dtype) + output : Tensor(out) + infer_meta : + func : StridedUnChangedInferMeta + param : [input] + kernel : + func : view_dtype + data_type : input + backward : view_dtype_grad + no_need_buffer : input + +- op : view_shape + args : (Tensor input, int64_t[] dims = {}) + output : Tensor(out) + infer_meta : + func : StridedUnChangedInferMeta + param : [input] + kernel : + func : view_shape + backward : view_shape_grad + no_need_buffer : input + interfaces : paddle::dialect::InferSymbolicShapeInterface + +- op : viterbi_decode + args : (Tensor potentials, Tensor transition_params, Tensor lengths, bool include_bos_eos_tag = true) + output : Tensor(scores), Tensor(path) + infer_meta : + func : ViterbiDecodeInferMeta + kernel : + func : viterbi_decode + data_type : potentials + +- op : warpctc + args : (Tensor logits, Tensor label, Tensor logits_length, Tensor labels_length, int blank = 0, bool norm_by_times = false) + output : Tensor(loss), Tensor(warpctcgrad) + infer_meta : + func : WarpctcInferMeta + kernel : + func : warpctc + data_type : logits + optional : logits_length, labels_length + intermediate : warpctcgrad + backward : warpctc_grad + +- op : warprnnt + args : (Tensor input, Tensor label, Tensor input_lengths, Tensor label_lengths, int blank = 0, float fastemit_lambda = 0.0) + output : Tensor(loss), Tensor(warprnntgrad) + infer_meta : + func : WarprnntInferMeta + kernel : + func : warprnnt + data_type : input + intermediate : warprnntgrad + backward : warprnnt_grad + +- op : weight_dequantize + args : (Tensor x, Tensor scale, str algo = "weight_only_int8", DataType out_dtype = DataType::FLOAT16, int group_size = -1) + output : Tensor(out) + infer_meta : + func : WeightDequantizeInferMeta + kernel : + func : weight_dequantize + data_type : out_dtype + +- op : weight_only_linear + args : (Tensor x, Tensor weight, Tensor bias, Tensor weight_scale, str weight_dtype, int arch = 80, int group_size = -1) + output : Tensor(out) + infer_meta : + func : WeightOnlyLinearInferMeta + kernel : + func : weight_only_linear + data_type : x + optional : bias + backward : weight_only_linear_grad + +- op : weight_quantize + args : (Tensor x, str algo = "weight_only_int8", int arch = 80, int group_size = -1) + output : Tensor(out), Tensor(scale) + infer_meta : + func : WeightQuantizeInferMeta + kernel : + func : weight_quantize + data_type : x + backend : x + +- op : weighted_sample_neighbors + args : (Tensor row, Tensor colptr, Tensor edge_weight, Tensor input_nodes, Tensor eids, int sample_size, bool return_eids) + output : Tensor(out_neighbors), Tensor(out_count), Tensor(out_eids) + infer_meta : + func : WeightedSampleNeighborsInferMeta + kernel : + func : weighted_sample_neighbors + optional : eids + +- op : where + args : (Tensor condition, Tensor x, Tensor y) + output : Tensor(out) + infer_meta : + func : WhereInferMeta + spmd_rule: WhereInferSpmd + kernel : + func : where + inplace : (x -> out) + backward : where_grad + interfaces : paddle::dialect::InferSymbolicShapeInterface + +- op : yolo_box + args : (Tensor x, Tensor img_size, int[] anchors={}, int class_num = 1, float conf_thresh = 0.01, int downsample_ratio = 32, bool clip_bbox = true, float scale_x_y=1.0, bool iou_aware=false, float iou_aware_factor=0.5) + output : Tensor(boxes), Tensor(scores) + infer_meta : + func : YoloBoxInferMeta + kernel : + func : yolo_box + data_type : x + +- op : yolo_box_head + args : (Tensor x, int[] anchors, int class_num) + output : Tensor(out) + infer_meta : + func : YoloBoxHeadInferMeta + kernel : + func : yolo_box_head + data_type : x + +- op : yolo_box_post + args : (Tensor boxes0, Tensor boxes1, Tensor boxes2, Tensor image_shape, Tensor image_scale, int[] anchors0, int[] anchors1, int[] anchors2, int class_num, float conf_thresh, int downsample_ratio0, int downsample_ratio1, int downsample_ratio2, bool clip_bbox, float scale_x_y, float nms_threshold) + output : Tensor(out), Tensor(nms_rois_num) + infer_meta : + func : YoloBoxPostInferMeta + kernel : + func : yolo_box_post + data_type : boxes0 + +- op : yolo_loss + args : (Tensor x, Tensor gt_box, Tensor gt_label, Tensor gt_score, int[] anchors={}, int[] anchor_mask={}, int class_num =1 , float ignore_thresh=0.7, int downsample_ratio=32, bool use_label_smooth=true, float scale_x_y=1.0) + output : Tensor(loss), Tensor(objectness_mask), Tensor(gt_match_mask) + infer_meta : + func : YoloLossInferMeta + kernel : + func : yolo_loss + data_type : x + optional : gt_score + intermediate : objectness_mask, gt_match_mask + backward : yolo_loss_grad + +- op : zeros + args : (IntArray shape, DataType dtype=DataType::FLOAT32, Place place=CPUPlace()) + output : Tensor(out) + invoke : full(shape, 0, dtype, place) + +- op : zeros_like + args : (Tensor x, DataType dtype=DataType::UNDEFINED, Place place = {}) + output : Tensor(out) + invoke : full_like(x, 0, dtype, place) + +- op: chunk_eval + args: (Tensor inference, Tensor label, Tensor seq_length, int num_chunk_types, str + chunk_scheme = "IOB", int[] excluded_chunk_types = {}) + output: Tensor (precision), Tensor (recall), Tensor (f1_score), Tensor (num_infer_chunks), + Tensor (num_label_chunks), Tensor (num_correct_chunks) + infer_meta: + func: ChunkEvalInferMeta + kernel: + func: chunk_eval + data_type: DataType::FLOAT32 + optional: seq_length + +- op: moe + args: (Tensor x, Tensor gate, Tensor bmm0, Tensor bias0, Tensor bmm1, Tensor bias1, + str act_type = "gelu") + output: Tensor (out) + infer_meta: + func: MoeInferMeta + kernel: + func: moe + +- op: number_count + args: (Tensor numbers, int upper_range) + output: Tensor(out) + infer_meta: + func: NumberCountInferMeta + kernel: + func: number_count + data_type: numbers From 7f66ddce144729d857e0e9ca9dbd9e6df0b561b2 Mon Sep 17 00:00:00 2001 From: Fripping <15010770306@163.com> Date: Mon, 5 Aug 2024 15:46:55 +0800 Subject: [PATCH 2/8] update cinn api --- .../multiary_infer_sym-checkpoint.cc | 74 + .../multiary_infer_sym-checkpoint.h | 54 + .../same_operands_result-checkpoint.cc | 215 ++ .../same_operands_result-checkpoint.h | 165 ++ .../unary_infer_sym-checkpoint.cc | 2138 +++++++++++++++++ .../unary_infer_sym-checkpoint.h | 95 + .../multiary_infer_sym.cc | 74 + .../infer_symbolic_shape/multiary_infer_sym.h | 1 + .../same_operands_result.cc | 1 + .../same_operands_result.h | 1 + .../infer_symbolic_shape/unary_infer_sym.cc | 18 + .../infer_symbolic_shape/unary_infer_sym.h | 1 + .../.ipynb_checkpoints/ops-checkpoint.yaml | 3 + paddle/phi/ops/yaml/ops.yaml | 3 + 14 files changed, 2843 insertions(+) create mode 100644 paddle/fluid/pir/dialect/operator/interface/infer_symbolic_shape/.ipynb_checkpoints/multiary_infer_sym-checkpoint.h create mode 100644 paddle/fluid/pir/dialect/operator/interface/infer_symbolic_shape/.ipynb_checkpoints/same_operands_result-checkpoint.cc create mode 100644 paddle/fluid/pir/dialect/operator/interface/infer_symbolic_shape/.ipynb_checkpoints/same_operands_result-checkpoint.h create mode 100644 paddle/fluid/pir/dialect/operator/interface/infer_symbolic_shape/.ipynb_checkpoints/unary_infer_sym-checkpoint.cc create mode 100644 paddle/fluid/pir/dialect/operator/interface/infer_symbolic_shape/.ipynb_checkpoints/unary_infer_sym-checkpoint.h diff --git a/paddle/fluid/pir/dialect/operator/interface/infer_symbolic_shape/.ipynb_checkpoints/multiary_infer_sym-checkpoint.cc b/paddle/fluid/pir/dialect/operator/interface/infer_symbolic_shape/.ipynb_checkpoints/multiary_infer_sym-checkpoint.cc index 5fb88771a0490..a84326612f4b8 100644 --- a/paddle/fluid/pir/dialect/operator/interface/infer_symbolic_shape/.ipynb_checkpoints/multiary_infer_sym-checkpoint.cc +++ b/paddle/fluid/pir/dialect/operator/interface/infer_symbolic_shape/.ipynb_checkpoints/multiary_infer_sym-checkpoint.cc @@ -1201,6 +1201,80 @@ bool Where_OpInferSymbolicShape(pir::Operation *op, return WhereOpInferSymbolicShape(op, infer_context); } +bool YoloLossOpInferSymbolicShape( + pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) { + const auto &dim_x = + infer_context->GetShapeOrDataForValue(op->operand_source(0)).shape(); + const auto &dim_gtbox = + infer_context->GetShapeOrDataForValue(op->operand_source(1)).shape(); + const auto &dim_gtlabel = + infer_context->GetShapeOrDataForValue(op->operand_source(2)).shape(); + std::vector anchors_mask = + paddle::dialect::details::GetVectorAttr(op, "anchors_mask"); + int mask_num = static_cast(anchors_mask.size()); + int class_num = op->attribute("class_num").data(); + + PADDLE_ENFORCE_EQ(dim_x.size(), + 4, + phi::errors::InvalidArgument( + "Input(X) should be a 4-D tensor. But received " + "X dimension size(%s)", + dim_x.size())); + PADDLE_ENFORCE_EQ( + dim_gtbox.size(), + 3, + phi::errors::InvalidArgument("Input(GTBox) should be a 3-D tensor, but " + "received gtbox dimension size(%s)", + dim_gtbox.size())); + PADDLE_ENFORCE_EQ( + dim_gtbox[2], + 4, + phi::errors::InvalidArgument("Input(GTBox) dim[2] should be 4", + "But receive dim[2](%s) != 5. ", + dim_gtbox[2])); + PADDLE_ENFORCE_EQ(dim_gtlabel.size(), + 2, + phi::errors::InvalidArgument( + "Input(GTLabel) should be a 2-D tensor," + "But received Input(GTLabel) dimension size(%s) != 2.", + dim_gtlabel.size())); + infer_context->AddEqualCstr(dim_x[2], dim_x[3]); + infer_context->AddEqualCstr(dim_x[1], mask_num * (5 + class_num)); + infer_context->AddEqualCstr(dim_gtlabel[0], dim_gtbox[0]); + infer_context->AddEqualCstr(dim_gtlabel[1], dim_gtbox[1]); + + const auto &dim_gtscore = + infer_context->GetShapeOrDataForValue(op->operand_source(3)).shape(); + PADDLE_ENFORCE_EQ( + dim_gtscore.size(), + 2, + phi::errors::InvalidArgument("Input(GTScore) should be a 2-D tensor" + "But received GTScore dimension(%s)", + dim_gtbox.size())); + infer_context->AddEqualCstr(dim_gtscore[0], dim_gtbox[0]); + infer_context->AddEqualCstr(dim_gtscore[1], dim_gtbox[1]); + + std::vector dim_out = {dim_x[0]}; + infer_context->SetShapeOrDataForValue( + op->result(0), + symbol::ShapeOrDataDimExprs{symbol::TensorShapeOrDataDimExprs(dim_out)}); + + std::vector dim_obj_mask = { + dim_x[0], mask_num, dim_x[2], dim_x[3]}; + infer_context->SetShapeOrDataForValue( + op->result(1), + symbol::ShapeOrDataDimExprs{ + symbol::TensorShapeOrDataDimExprs(dim_obj_mask)}); + + std::vector dim_gt_match_mask = {dim_gtbox[0], dim_gtbox[1]}; + infer_context->SetShapeOrDataForValue( + op->result(2), + symbol::ShapeOrDataDimExprs{ + symbol::TensorShapeOrDataDimExprs(dim_gt_match_mask)}); + + return true; +} + bool FakeChannelWiseDequantizeMaxAbsOpInferSymbolicShape( pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) { const auto &x_shape_or_data = diff --git a/paddle/fluid/pir/dialect/operator/interface/infer_symbolic_shape/.ipynb_checkpoints/multiary_infer_sym-checkpoint.h b/paddle/fluid/pir/dialect/operator/interface/infer_symbolic_shape/.ipynb_checkpoints/multiary_infer_sym-checkpoint.h new file mode 100644 index 0000000000000..2c347e4cd9ca4 --- /dev/null +++ b/paddle/fluid/pir/dialect/operator/interface/infer_symbolic_shape/.ipynb_checkpoints/multiary_infer_sym-checkpoint.h @@ -0,0 +1,54 @@ +// Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include "paddle/pir/include/dialect/shape/utils/shape_analysis.h" + +namespace paddle::dialect { + +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Accuracy) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Addmm) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Addmm_) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(AddN) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Auc) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(BatchNorm) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(BatchNorm_) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(BicubicInterp) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Bilinear) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(BilinearInterp) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Concat) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(CrossEntropyWithSoftmax) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(CrossEntropyWithSoftmax_) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(FullWithTensor) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(FlashAttn) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(GroupNorm) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Lerp) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Lerp_) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(LayerNorm) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Linspace) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(LinearInterp) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Logspace) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(MemoryEfficientAttention) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Meshgrid) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(NearestInterp) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(RoiAlign) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Stack) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(TrilinearInterp) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Where) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Where_) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(YoloLoss) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(FakeChannelWiseDequantizeMaxAbs) + +} // namespace paddle::dialect diff --git a/paddle/fluid/pir/dialect/operator/interface/infer_symbolic_shape/.ipynb_checkpoints/same_operands_result-checkpoint.cc b/paddle/fluid/pir/dialect/operator/interface/infer_symbolic_shape/.ipynb_checkpoints/same_operands_result-checkpoint.cc new file mode 100644 index 0000000000000..22d202775eb17 --- /dev/null +++ b/paddle/fluid/pir/dialect/operator/interface/infer_symbolic_shape/.ipynb_checkpoints/same_operands_result-checkpoint.cc @@ -0,0 +1,215 @@ +// Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/fluid/pir/dialect/operator/interface/infer_symbolic_shape/same_operands_result.h" + +#define OP_SAME_OPERANDS_AND_RESULT(name) \ + bool name##OpInferSymbolicShape( \ + pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) { \ + const auto &operand_shape = \ + infer_context->GetShapeOrDataForValue(op->operand_source(0)).shape(); \ + infer_context->SetShapeOrDataForValue( \ + op->result(0), \ + symbol::ShapeOrDataDimExprs{ \ + symbol::TensorShapeOrDataDimExprs(operand_shape)}); \ + return true; \ + } + +namespace paddle::dialect { + +OP_SAME_OPERANDS_AND_RESULT(Abs) +OP_SAME_OPERANDS_AND_RESULT(Abs_) +OP_SAME_OPERANDS_AND_RESULT(Acos) +OP_SAME_OPERANDS_AND_RESULT(Acos_) +OP_SAME_OPERANDS_AND_RESULT(Acosh) +OP_SAME_OPERANDS_AND_RESULT(Acosh_) +OP_SAME_OPERANDS_AND_RESULT(Angle) +OP_SAME_OPERANDS_AND_RESULT(Asin) +OP_SAME_OPERANDS_AND_RESULT(Asin_) +OP_SAME_OPERANDS_AND_RESULT(Asinh) +OP_SAME_OPERANDS_AND_RESULT(Asinh_) +OP_SAME_OPERANDS_AND_RESULT(Atan) +OP_SAME_OPERANDS_AND_RESULT(Atan_) +OP_SAME_OPERANDS_AND_RESULT(Atanh) +OP_SAME_OPERANDS_AND_RESULT(Atanh_) +OP_SAME_OPERANDS_AND_RESULT(AsStrided) +OP_SAME_OPERANDS_AND_RESULT(Bernoulli) +OP_SAME_OPERANDS_AND_RESULT(BitwiseNot) +OP_SAME_OPERANDS_AND_RESULT(BitwiseNot_) +OP_SAME_OPERANDS_AND_RESULT(Ceil) +OP_SAME_OPERANDS_AND_RESULT(Ceil_) +OP_SAME_OPERANDS_AND_RESULT(Celu) +OP_SAME_OPERANDS_AND_RESULT(Clip) +OP_SAME_OPERANDS_AND_RESULT(Clip_) +OP_SAME_OPERANDS_AND_RESULT(Conj) +OP_SAME_OPERANDS_AND_RESULT(CopyTo) +OP_SAME_OPERANDS_AND_RESULT(Cos) +OP_SAME_OPERANDS_AND_RESULT(Cos_) +OP_SAME_OPERANDS_AND_RESULT(Cosh) +OP_SAME_OPERANDS_AND_RESULT(Cosh_) +OP_SAME_OPERANDS_AND_RESULT(DequantizeLog) +OP_SAME_OPERANDS_AND_RESULT(Digamma) +OP_SAME_OPERANDS_AND_RESULT(Digamma_) +OP_SAME_OPERANDS_AND_RESULT(Dirichlet) +OP_SAME_OPERANDS_AND_RESULT(EmptyLike) +OP_SAME_OPERANDS_AND_RESULT(Erf) +OP_SAME_OPERANDS_AND_RESULT(Erf_) +OP_SAME_OPERANDS_AND_RESULT(Erfinv) +OP_SAME_OPERANDS_AND_RESULT(Erfinv_) +OP_SAME_OPERANDS_AND_RESULT(Exp) +OP_SAME_OPERANDS_AND_RESULT(Exp_) +OP_SAME_OPERANDS_AND_RESULT(Expm1) +OP_SAME_OPERANDS_AND_RESULT(Expm1_) +OP_SAME_OPERANDS_AND_RESULT(Exponential_) +OP_SAME_OPERANDS_AND_RESULT(Fill) +OP_SAME_OPERANDS_AND_RESULT(Fill_) +OP_SAME_OPERANDS_AND_RESULT(Fetch) +OP_SAME_OPERANDS_AND_RESULT(Flip) +OP_SAME_OPERANDS_AND_RESULT(Floor) +OP_SAME_OPERANDS_AND_RESULT(Floor_) +OP_SAME_OPERANDS_AND_RESULT(FullLike) +OP_SAME_OPERANDS_AND_RESULT(Imag) +OP_SAME_OPERANDS_AND_RESULT(Increment) +OP_SAME_OPERANDS_AND_RESULT(Increment_) +OP_SAME_OPERANDS_AND_RESULT(Isfinite) +OP_SAME_OPERANDS_AND_RESULT(IsfiniteSr) +OP_SAME_OPERANDS_AND_RESULT(Isinf) +OP_SAME_OPERANDS_AND_RESULT(IsinfSr) +OP_SAME_OPERANDS_AND_RESULT(Isnan) +OP_SAME_OPERANDS_AND_RESULT(IsnanSr) +OP_SAME_OPERANDS_AND_RESULT(I0) +OP_SAME_OPERANDS_AND_RESULT(I0_) +OP_SAME_OPERANDS_AND_RESULT(I0e) +OP_SAME_OPERANDS_AND_RESULT(I1) +OP_SAME_OPERANDS_AND_RESULT(I1e) +OP_SAME_OPERANDS_AND_RESULT(Lgamma) +OP_SAME_OPERANDS_AND_RESULT(Lgamma_) +OP_SAME_OPERANDS_AND_RESULT(Log1p) +OP_SAME_OPERANDS_AND_RESULT(Log1p_) +OP_SAME_OPERANDS_AND_RESULT(Log) +OP_SAME_OPERANDS_AND_RESULT(Log_) +OP_SAME_OPERANDS_AND_RESULT(LogicalNot) +OP_SAME_OPERANDS_AND_RESULT(LogicalNot_) +OP_SAME_OPERANDS_AND_RESULT(Logit) +OP_SAME_OPERANDS_AND_RESULT(Logit_) +OP_SAME_OPERANDS_AND_RESULT(Logsigmoid) +OP_SAME_OPERANDS_AND_RESULT(Logsigmoid_) +OP_SAME_OPERANDS_AND_RESULT(Pow) +OP_SAME_OPERANDS_AND_RESULT(Poisson) +OP_SAME_OPERANDS_AND_RESULT(Pow_) +OP_SAME_OPERANDS_AND_RESULT(Prelu) +OP_SAME_OPERANDS_AND_RESULT(Print) +OP_SAME_OPERANDS_AND_RESULT(PutAlongAxis) +OP_SAME_OPERANDS_AND_RESULT(PutAlongAxis_) +OP_SAME_OPERANDS_AND_RESULT(Real) +OP_SAME_OPERANDS_AND_RESULT(Reciprocal) +OP_SAME_OPERANDS_AND_RESULT(Reciprocal_) +OP_SAME_OPERANDS_AND_RESULT(Relu) +OP_SAME_OPERANDS_AND_RESULT(Relu6) +OP_SAME_OPERANDS_AND_RESULT(Relu_) +OP_SAME_OPERANDS_AND_RESULT(Reverse) +OP_SAME_OPERANDS_AND_RESULT(Roll) +OP_SAME_OPERANDS_AND_RESULT(Round) +OP_SAME_OPERANDS_AND_RESULT(Round_) +OP_SAME_OPERANDS_AND_RESULT(RowConv) +OP_SAME_OPERANDS_AND_RESULT(Rsqrt) +OP_SAME_OPERANDS_AND_RESULT(Rsqrt_) +OP_SAME_OPERANDS_AND_RESULT(ScaleSr) +OP_SAME_OPERANDS_AND_RESULT(ScaleSr_) +OP_SAME_OPERANDS_AND_RESULT(Scale_) +OP_SAME_OPERANDS_AND_RESULT(ScatterNdAdd) +OP_SAME_OPERANDS_AND_RESULT(Scatter) +OP_SAME_OPERANDS_AND_RESULT(Scatter_) +OP_SAME_OPERANDS_AND_RESULT(Select) +OP_SAME_OPERANDS_AND_RESULT(Sign) +OP_SAME_OPERANDS_AND_RESULT(Sin) +OP_SAME_OPERANDS_AND_RESULT(Sin_) +OP_SAME_OPERANDS_AND_RESULT(Sinh) +OP_SAME_OPERANDS_AND_RESULT(Sinh_) +OP_SAME_OPERANDS_AND_RESULT(Softmax) +OP_SAME_OPERANDS_AND_RESULT(Softmax_) +OP_SAME_OPERANDS_AND_RESULT(Swish) +OP_SAME_OPERANDS_AND_RESULT(Tan) +OP_SAME_OPERANDS_AND_RESULT(Tan_) +OP_SAME_OPERANDS_AND_RESULT(Tanh) +OP_SAME_OPERANDS_AND_RESULT(Tanh_) +OP_SAME_OPERANDS_AND_RESULT(Tril) +OP_SAME_OPERANDS_AND_RESULT(Tril_) +OP_SAME_OPERANDS_AND_RESULT(Triu) +OP_SAME_OPERANDS_AND_RESULT(Triu_) +OP_SAME_OPERANDS_AND_RESULT(Trunc) +OP_SAME_OPERANDS_AND_RESULT(Trunc_) +OP_SAME_OPERANDS_AND_RESULT(Sigmoid) +OP_SAME_OPERANDS_AND_RESULT(Sigmoid_) +OP_SAME_OPERANDS_AND_RESULT(LeakyRelu) +OP_SAME_OPERANDS_AND_RESULT(LeakyRelu_) +OP_SAME_OPERANDS_AND_RESULT(ThresholdedRelu) +OP_SAME_OPERANDS_AND_RESULT(ThresholdedRelu_) +OP_SAME_OPERANDS_AND_RESULT(SquareSr) +OP_SAME_OPERANDS_AND_RESULT(Square) +OP_SAME_OPERANDS_AND_RESULT(Polygamma) +OP_SAME_OPERANDS_AND_RESULT(Polygamma_) +OP_SAME_OPERANDS_AND_RESULT(EnableCheckModelNanInf) +OP_SAME_OPERANDS_AND_RESULT(ViewShape) + +bool ScaleOpInferSymbolicShape(pir::Operation *op, + pir::InferSymbolicShapeContext *infer_context) { + pir::Value operand_source = op->operand_source(0); + const symbol::ShapeOrDataDimExprs &operand_shape_or_data = + infer_context->GetShapeOrDataForValue(operand_source); + std::vector shape(operand_shape_or_data.shape()); + + if (operand_shape_or_data.data()) { + const std::vector data = [&] { + const symbol::DimExpr scale = [&]() -> symbol::DimExpr { + if (op->num_operands() == 2) { + return infer_context->GetShapeOrDataForValue(op->operand_source(1)) + .data() + ->at(0); + } + return static_cast( + op->attribute("scale").dyn_cast().data()); + }(); + int bias = op->attribute("bias").dyn_cast().data(); + + std::vector data; + for (auto &val : *(operand_shape_or_data.data())) { + data.push_back(val * scale + bias); + } + return data; + }(); + + infer_context->SetShapeOrDataForValue( + op->result(0), symbol::TensorShapeOrDataDimExprs(shape, data)); + } else { + infer_context->SetShapeOrDataForValue(op->result(0), operand_shape_or_data); + } + + return true; +} + +bool ArgsortOpInferSymbolicShape( + pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) { + const symbol::ShapeOrDataDimExprs &operand_shape_or_data = + infer_context->GetShapeOrDataForValue(op->operand_source(0)); + infer_context->SetShapeOrDataForValue(op->result(0), operand_shape_or_data); + infer_context->SetShapeOrDataForValue(op->result(1), operand_shape_or_data); + return true; +} + +} // namespace paddle::dialect + +namespace cinn::dialect {} // namespace cinn::dialect + +#undef OP_SAME_OPERANDS_AND_RESULT diff --git a/paddle/fluid/pir/dialect/operator/interface/infer_symbolic_shape/.ipynb_checkpoints/same_operands_result-checkpoint.h b/paddle/fluid/pir/dialect/operator/interface/infer_symbolic_shape/.ipynb_checkpoints/same_operands_result-checkpoint.h new file mode 100644 index 0000000000000..ed3565456c841 --- /dev/null +++ b/paddle/fluid/pir/dialect/operator/interface/infer_symbolic_shape/.ipynb_checkpoints/same_operands_result-checkpoint.h @@ -0,0 +1,165 @@ +// Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include "paddle/pir/include/dialect/shape/utils/shape_analysis.h" + +namespace paddle::dialect { +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Abs) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Abs_) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Acos) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Acos_) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Acosh) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Acosh_) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Angle) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Argsort) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Asin) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Asin_) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Asinh) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Asinh_) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Assign) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Assign_) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(AsStrided) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Atan) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Atan_) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Atanh) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Atanh_) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Bernoulli) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(BitwiseNot) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(BitwiseNot_) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Ceil) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Ceil_) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Celu) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Clip) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Clip_) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Conj) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(CopyTo) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Cos) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Cos_) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Cosh) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Cosh_) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(DequantizeLog) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Digamma) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Digamma_) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Dirichlet) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(EmptyLike) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Erf) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Erf_) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Erfinv) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Erfinv_) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Exp) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Exp_) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Expm1) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Expm1_) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Exponential_) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Fetch) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Fill) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Fill_) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Flip) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Floor) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Floor_) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(FullLike) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Imag) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Increment) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Increment_) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Isfinite) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(IsfiniteSr) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Isinf) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(IsinfSr) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Isnan) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(IsnanSr) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(I0) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(I0_) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(I0e) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(I1) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(I1e) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Lgamma) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Lgamma_) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Log1p) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Log1p_) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Log) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Log_) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(LogicalNot) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(LogicalNot_) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Logit) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Logit_) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Logsigmoid) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Logsigmoid_) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Poisson) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Pow) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Pow_) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Prelu) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Print) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(PutAlongAxis) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(PutAlongAxis_) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Real) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Reciprocal) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Reciprocal_) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Relu) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Relu6) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Relu_) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Reverse) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Roll) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Round) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Round_) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(RowConv) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Rsqrt) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Rsqrt_) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Scale) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(ScaleSr) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(ScaleSr_) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Scale_) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(ScatterNdAdd) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Scatter) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Scatter_) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Select) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Sign) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Sin) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Sin_) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Sinh) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Sinh_) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Softmax) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Softmax_) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Swish) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Tan) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Tan_) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Tanh) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Tanh_) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Tril) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Tril_) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Triu) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Triu_) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Trunc) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Trunc_) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Sigmoid) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Sigmoid_) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(LeakyRelu) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(LeakyRelu_) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(ThresholdedRelu) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(ThresholdedRelu_) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(SquareSr) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Square) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Polygamma) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Polygamma_) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(EnableCheckModelNanInf) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(ViewShape) + +} // namespace paddle::dialect + +namespace cinn::dialect { +using paddle::dialect::ReverseOpInferSymbolicShape; +using paddle::dialect::ScaleOpInferSymbolicShape; +using paddle::dialect::SelectOpInferSymbolicShape; +} // namespace cinn::dialect diff --git a/paddle/fluid/pir/dialect/operator/interface/infer_symbolic_shape/.ipynb_checkpoints/unary_infer_sym-checkpoint.cc b/paddle/fluid/pir/dialect/operator/interface/infer_symbolic_shape/.ipynb_checkpoints/unary_infer_sym-checkpoint.cc new file mode 100644 index 0000000000000..8781161564eeb --- /dev/null +++ b/paddle/fluid/pir/dialect/operator/interface/infer_symbolic_shape/.ipynb_checkpoints/unary_infer_sym-checkpoint.cc @@ -0,0 +1,2138 @@ +// Copyright (c) 2024 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/fluid/pir/dialect/operator/interface/infer_symbolic_shape/infer_sym_slice_utils.h" +#include "paddle/fluid/pir/dialect/operator/interface/infer_symbolic_shape/infer_sym_utils.h" +#include "paddle/fluid/pir/dialect/operator/interface/infer_symbolic_shape/unary_infer_sym.h" + +namespace { +std::vector GetRealPadding( + const std::vector &origin_paddings, + const bool global_pooling, + const bool adaptive, + const std::string padding_algorithm, + const std::vector data_dims, + const std::vector &strides, + const std::vector &kernel_size) { + const auto &GetInitPadding = [&]() -> std::vector { + std::vector res; + // set padding size == data_dims.size() * 2 + if (origin_paddings.size() == data_dims.size()) { + for (std::size_t i = 0; i < origin_paddings.size(); ++i) { + res.emplace_back(symbol::DimExpr{origin_paddings.at(i)}); + res.emplace_back(symbol::DimExpr{origin_paddings.at(i)}); + } + } else { + PADDLE_ENFORCE_EQ( + data_dims.size() * 2, + origin_paddings.size(), + phi::errors::InvalidArgument( + "Paddings size %d should be the same or twice as the " + "pooling size %d.", + origin_paddings.size(), + data_dims.size() * 2)); + for (std::size_t i = 0; i < origin_paddings.size(); ++i) { + res.emplace_back(symbol::DimExpr{origin_paddings.at(i)}); + } + } + return res; + }; + + std::vector real_padding = GetInitPadding(); + + const auto &UpdataPadding = [&]() { + symbol::DimExpr one_dimexpr{1}; + symbol::DimExpr zero_dimexpr{0}; + // when padding_algorithm is "VALID" or "SAME" + if (padding_algorithm == "SAME") { + for (std::size_t i = 0; i < data_dims.size(); ++i) { + symbol::DimExpr stride_dimexpr = symbol::DimExpr{strides[i]}; + + symbol::DimExpr out_size = + (data_dims[i] + stride_dimexpr - one_dimexpr) / stride_dimexpr; + symbol::DimExprBuilder builder; + symbol::DimExpr pad_sum = + builder.Max((out_size - one_dimexpr) * stride_dimexpr + + kernel_size[i] - data_dims[i], + zero_dimexpr); + symbol::DimExpr pad_0 = pad_sum / symbol::DimExpr{2}; + symbol::DimExpr pad_1 = pad_sum - pad_0; + real_padding[i * 2] = pad_0; + real_padding[i * 2 + 1] = pad_1; + } + } else if (padding_algorithm == "VALID") { + real_padding.assign(real_padding.size(), zero_dimexpr); + } + + // if global_pooling == true or adaptive == true, padding will be ignore + if (global_pooling || adaptive) { + real_padding.assign(real_padding.size(), zero_dimexpr); + } + }; + + UpdataPadding(); + return real_padding; +} + +symbol::ShapeOrDataDimExprs Pool2dRawInferSymbolicShape( + pir::Operation *op, + const std::vector &kernel_size, + pir::InferSymbolicShapeContext *infer_context) { + const auto &x_shape_or_data = + infer_context->GetShapeOrDataForValue(op->operand_source(0)); + + const auto &x_dims = x_shape_or_data.shape(); + PADDLE_ENFORCE_EQ( + x_dims.size() == 4 || x_dims.size() == 5, + true, + phi::errors::InvalidArgument( + "the input of Op(pool) should be 4-D or 5-D Tensor. But " + "received: %u-D Tensor.", + x_dims.size())); + + PADDLE_ENFORCE_EQ(x_dims.size() - kernel_size.size(), + 2U, + phi::errors::InvalidArgument( + "the rank of input minus the size of kernel_size " + "must be equal to 2 in Op(pool). " + "But received: the rank of input is %d and the " + "rank of kernel_size is %d.", + x_dims.size(), + kernel_size.size())); + + std::vector strides = [&]() { + std::vector res; + const auto &stride_attr = + op->attributes().at("strides").dyn_cast(); + for (size_t i = 0; i < stride_attr.size(); i++) { + res.emplace_back( + stride_attr.at(i).dyn_cast().data()); + } + return res; + }(); + + PADDLE_ENFORCE_EQ( + kernel_size.size(), + strides.size(), + phi::errors::InvalidArgument( + "the rank of kernel_size and strides in Op(pool) must be equal. " + "But received: the rank of kernel_size is %d and the rank of stride " + "is %d.", + kernel_size.size(), + strides.size())); + + const std::string &data_format = + op->attribute("data_format").AsString(); + const bool channel_last = data_format == "NHWC" || data_format == "NDHWC"; + + const auto &data_dims = [&]() -> std::vector { + if (channel_last) { + return std::vector(x_dims.begin() + 1, x_dims.end() - 1); + } else { + return std::vector(x_dims.begin() + 2, x_dims.end()); + } + }(); + + bool global_pooling = + op->attribute("global_pooling").data(); + bool adaptive = op->attribute("adaptive").data(); + std::string padding_algorithm = + op->attribute("padding_algorithm").AsString(); + + const auto &real_paddings = [&]() -> std::vector { + std::vector paddings; + const auto &padding_attr = + op->attributes().at("paddings").dyn_cast(); + for (size_t i = 0; i < padding_attr.size(); i++) { + paddings.emplace_back( + padding_attr.at(i).dyn_cast().data()); + } + return GetRealPadding(paddings, + global_pooling, + adaptive, + padding_algorithm, + data_dims, + strides, + kernel_size + + ); + }(); + + const auto &real_kernel_size = [&]() -> std::vector { + if (global_pooling) { + return data_dims; + } + return kernel_size; + }(); + + const auto &output_shape_or_data = [&]() -> symbol::ShapeOrDataDimExprs { + std::vector output_shape; + bool ceil_mode = op->attribute("ceil_mode").data(); + if (adaptive) { + output_shape.insert( + output_shape.end(), real_kernel_size.begin(), real_kernel_size.end()); + } else { + for (size_t i = 0; i < data_dims.size(); ++i) { + symbol::DimExpr stride_dimexpr{strides[i]}; + symbol::DimExpr one_dimexpr{1}; + if (!ceil_mode) { + output_shape.emplace_back((data_dims[i] - real_kernel_size[i] + + real_paddings[2 * i] + + real_paddings[2 * i + 1]) / + stride_dimexpr + + one_dimexpr); + } else { + output_shape.emplace_back( + (data_dims[i] - real_kernel_size[i] + real_paddings[2 * i] + + real_paddings[2 * i + 1] + stride_dimexpr - one_dimexpr) / + stride_dimexpr + + one_dimexpr); + } + } + } + + // output_N = input_N + output_shape.insert(output_shape.begin(), x_dims[0]); + // output_C = input_C + if (channel_last) { + output_shape.push_back(x_dims[x_dims.size() - 1]); + } else { + output_shape.insert(output_shape.begin() + 1, x_dims[1]); + } + return symbol::ShapeOrDataDimExprs{ + symbol::TensorShapeOrDataDimExprs(output_shape)}; + }(); + + return output_shape_or_data; +} +} // namespace + +namespace paddle::dialect { +using paddle::dialect::details::CreateShapeOrDataForXShape; + +bool AllOpInferSymbolicShape(pir::Operation *op, + pir::InferSymbolicShapeContext *infer_context) { + const auto &axis = details::GetVectorAttr(op, "axis"); + return details::ReduceInferDim(op, + infer_context, + axis, + GetBoolAttr(op, "keepdim"), /*keepdim*/ + axis.size() == 0 /*reduce_all*/); +} + +bool AmaxOpInferSymbolicShape(pir::Operation *op, + pir::InferSymbolicShapeContext *infer_context) { + const auto &axis = details::GetVectorAttr(op, "axis"); + return details::ReduceInferDim(op, + infer_context, + axis, + GetBoolAttr(op, "keepdim"), /*keepdim*/ + axis.size() == 0 /*reduce_all*/); +} + +bool AminOpInferSymbolicShape(pir::Operation *op, + pir::InferSymbolicShapeContext *infer_context) { + const auto &axis = details::GetVectorAttr(op, "axis"); + return details::ReduceInferDim(op, + infer_context, + axis, + GetBoolAttr(op, "keepdim"), /*keepdim*/ + axis.size() == 0 /*reduce_all*/); +} + +bool AnyOpInferSymbolicShape(pir::Operation *op, + pir::InferSymbolicShapeContext *infer_context) { + const auto &axis = details::GetVectorAttr(op, "axis"); + return details::ReduceInferDim(op, + infer_context, + axis, + GetBoolAttr(op, "keepdim"), /*keepdim*/ + axis.size() == 0 /*reduce_all*/); +} + +bool ArgmaxOpInferSymbolicShape(pir::Operation *op, + pir::InferSymbolicShapeContext *infer_context) { + bool flatten = GetBoolAttr(op, "flatten"); + bool keepdims = GetBoolAttr(op, "keepdims"); + + const auto &input_sym_shape = + infer_context->GetShapeOrDataForValue(op->operand_source(0)).shape(); + int rank = input_sym_shape.size(); + + const auto &axis_shape_or_data = + infer_context->GetShapeOrDataForValue(op->operand_source(1)); + int axis = + static_cast(axis_shape_or_data.data().value().at(0).Get()); + if (axis < 0) axis += rank; + + const auto &out_sym_shape = [&] { + std::vector out_sym_shape; + if (flatten) { + if (keepdims) { + out_sym_shape.emplace_back(std::int64_t(rank)); + } else { + out_sym_shape.emplace_back(std::int64_t(0)); + } + } else { + for (int i = 0; i < axis; i++) { + out_sym_shape.emplace_back(input_sym_shape.at(i)); + } + if (keepdims) { + out_sym_shape.emplace_back(std::int64_t(1)); + } + + for (int i = axis + 1; i < rank; i++) { + out_sym_shape.emplace_back(input_sym_shape.at(i)); + } + } + return out_sym_shape; + }(); + + symbol::ShapeOrDataDimExprs shape_data{ + symbol::TensorShapeOrDataDimExprs(out_sym_shape)}; + + infer_context->SetShapeOrDataForValue(op->result(0), shape_data); + return true; +} + +bool ArgminOpInferSymbolicShape(pir::Operation *op, + pir::InferSymbolicShapeContext *infer_context) { + return ArgmaxOpInferSymbolicShape(op, infer_context); +} + +bool AsComplexOpInferSymbolicShape( + pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) { + pir::Value operand_source = op->operand_source(0); + const symbol::ShapeOrDataDimExprs &operand_shape_or_data = + infer_context->GetShapeOrDataForValue(operand_source); + + const std::vector out_dims = [&] { + std::vector out_dims = operand_shape_or_data.shape(); + out_dims.pop_back(); + return out_dims; + }(); + + symbol::ShapeOrDataDimExprs shape_data{ + symbol::TensorShapeOrDataDimExprs(out_dims)}; + + infer_context->SetShapeOrDataForValue(op->result(0), shape_data); + return true; +} +bool AsRealOpInferSymbolicShape(pir::Operation *op, + pir::InferSymbolicShapeContext *infer_context) { + pir::Value operand_source = op->operand_source(0); + const symbol::ShapeOrDataDimExprs &operand_shape_or_data = + infer_context->GetShapeOrDataForValue(operand_source); + + const std::vector out_dims = [&] { + std::vector out_dims = operand_shape_or_data.shape(); + out_dims.push_back(symbol::DimExpr(2)); + return out_dims; + }(); + + symbol::ShapeOrDataDimExprs shape_data{ + symbol::TensorShapeOrDataDimExprs(out_dims)}; + + infer_context->SetShapeOrDataForValue(op->result(0), shape_data); + return true; +} + +bool AssignOpInferSymbolicShape(pir::Operation *op, + pir::InferSymbolicShapeContext *infer_context) { + infer_context->SetShapeOrDataForValue( + op->result(0), + infer_context->GetShapeOrDataForValue(op->operand_source(0))); + return true; +} + +bool Assign_OpInferSymbolicShape( + pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) { + return AssignOpInferSymbolicShape(op, infer_context); +} + +bool BipartiteMatchOpInferSymbolicShape( + pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) { + const auto &dist_mat_shape_or_data = + infer_context->GetShapeOrDataForValue(op->operand_source(0)); + const auto &dims = dist_mat_shape_or_data.shape(); + + PADDLE_ENFORCE_EQ( + dims.size(), + 2, + phi::errors::InvalidArgument("The rank of Input(DistMat) must be 2.")); + + infer_context->SetShapeOrDataForValue(op->result(0), dist_mat_shape_or_data); + + infer_context->SetShapeOrDataForValue(op->result(1), dist_mat_shape_or_data); + + return true; +} + +bool CastOpInferSymbolicShape(pir::Operation *op, + pir::InferSymbolicShapeContext *infer_context) { + infer_context->SetShapeOrDataForValue( + op->result(0), + infer_context->GetShapeOrDataForValue(op->operand_source(0))); + return true; +} + +bool Cast_OpInferSymbolicShape(pir::Operation *op, + pir::InferSymbolicShapeContext *infer_context) { + return CastOpInferSymbolicShape(op, infer_context); +} + +bool CholeskyOpInferSymbolicShape( + pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) { + const auto &x_shape = + infer_context->GetShapeOrDataForValue(op->operand_source(0)); + + auto rank = x_shape.shape().size(); + PADDLE_ENFORCE_GE(rank, + 2, + common::errors::InvalidArgument( + "The Input(X) should have at least 2 dimensions. But " + "received a %d dimension tensor.", + rank)); + + infer_context->AddEqualCstr(x_shape.shape()[rank - 2], + x_shape.shape()[rank - 1]); + + infer_context->SetShapeOrDataForValue(op->result(0), x_shape); + + return true; +} + +bool ClipByNormOpInferSymbolicShape( + pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) { + const auto &input_shape = + infer_context->GetShapeOrDataForValue(op->operand_source(0)); + float max_norm = op->attribute("max_norm").data(); + PADDLE_ENFORCE_GT( + max_norm, + 0, + phi::errors::InvalidArgument("max_norm should be greater than 0. " + "Received max_norm is %f.", + max_norm)); + + infer_context->SetShapeOrDataForValue(op->result(0), input_shape); + return true; +} + +bool ClipByNormSrOpInferSymbolicShape( + pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) { + return ClipByNormOpInferSymbolicShape(op, infer_context); +} + +bool CummaxOpInferSymbolicShape(pir::Operation *op, + pir::InferSymbolicShapeContext *infer_context) { + pir::Value operand_source = op->operand_source(0); + const symbol::ShapeOrDataDimExprs &operand_shape_or_data = + infer_context->GetShapeOrDataForValue(operand_source); + + infer_context->SetShapeOrDataForValue(op->result(0), operand_shape_or_data); + infer_context->SetShapeOrDataForValue(op->result(1), operand_shape_or_data); + return true; +} +bool CumminOpInferSymbolicShape(pir::Operation *op, + pir::InferSymbolicShapeContext *infer_context) { + return CummaxOpInferSymbolicShape(op, infer_context); +} +bool CumprodOpInferSymbolicShape( + pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) { + pir::Value operand_source = op->operand_source(0); + const symbol::ShapeOrDataDimExprs &operand_shape_or_data = + infer_context->GetShapeOrDataForValue(operand_source); + infer_context->SetShapeOrDataForValue(op->result(0), operand_shape_or_data); + return true; +} +bool Cumprod_OpInferSymbolicShape( + pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) { + return CumprodOpInferSymbolicShape(op, infer_context); +} +bool CumsumOpInferSymbolicShape(pir::Operation *op, + pir::InferSymbolicShapeContext *infer_context) { + pir::Value operand_source = op->operand_source(0); + + const symbol::ShapeOrDataDimExprs &operand_shape_or_data = + infer_context->GetShapeOrDataForValue(operand_source); + + bool flatten = GetBoolAttr(op, "flatten"); + if (flatten) { + symbol::DimExpr product{1}; + const auto &dim_exprs = operand_shape_or_data.shape(); + for (const auto &dim_expr : dim_exprs) { + product = product * dim_expr; + } + const std::vector out_dims = {product}; + symbol::ShapeOrDataDimExprs shape_data{ + symbol::TensorShapeOrDataDimExprs(out_dims)}; + infer_context->SetShapeOrDataForValue(op->result(0), shape_data); + + } else { + infer_context->SetShapeOrDataForValue(op->result(0), operand_shape_or_data); + } + return true; +} +bool Cumsum_OpInferSymbolicShape( + pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) { + return CumsumOpInferSymbolicShape(op, infer_context); +} +bool ChannelShuffleOpInferSymbolicShape( + pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) { + const auto &x_shape_or_data = + infer_context->GetShapeOrDataForValue(op->operand_source(0)); + const std::vector &input_dims = x_shape_or_data.shape(); + + int groups = op->attribute("groups").data(); + std::string data_format = + op->attribute("data_format").AsString(); + + PADDLE_ENFORCE_EQ( + input_dims.size(), + 4, + phi::errors::InvalidArgument("Input should be a 4-D tensor of format [N, " + "C, H, W] or [N, H, W, C], but got %u.", + input_dims.size())); + PADDLE_ENFORCE_GE( + groups, + 1, + phi::errors::InvalidArgument("groups should be larger than 0.")); + PADDLE_ENFORCE_EQ( + data_format == "NCHW" || data_format == "NHWC", + true, + phi::errors::InvalidArgument("data_format must be one of NCHW and NHWC. " + "But received data_format: %s", + data_format)); + + const bool channel_last = (data_format == "NHWC"); + + symbol::DimExpr channels; + if (!channel_last) { + channels = input_dims[1]; + } else { + channels = input_dims[3]; + } + + symbol::DimExpr groups_expr = symbol::DimExpr(groups); + symbol::DimExpr expected_channels = groups_expr * (channels / groups_expr); + + infer_context->AddEqualCstr(channels, expected_channels); + + infer_context->SetShapeOrDataForValue(op->result(0), x_shape_or_data); + + return true; +} + +bool DiagEmbedOpInferSymbolicShape( + pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) { + pir::Value operand_source = op->operand_source(0); + const symbol::ShapeOrDataDimExprs &operand_shape_or_data = + infer_context->GetShapeOrDataForValue(operand_source); + const auto &attributes = op->attributes(); + int dim1 = attributes.at("dim1").dyn_cast().data(); + int dim2 = attributes.at("dim2").dyn_cast().data(); + int offset = attributes.at("offset").dyn_cast().data(); + + const auto &x_dims = operand_shape_or_data.shape(); + int dim1_ = dim1 < 0 ? x_dims.size() + dim1 + 1 : dim1; + int dim2_ = dim2 < 0 ? x_dims.size() + dim2 + 1 : dim2; + int64_t offset_ = static_cast(std::abs(offset)); + symbol::DimExpr new_dim_len = + symbol::DimExpr(offset_) + x_dims.at(x_dims.size() - 1); + + const auto &out_dims = [&] { + std::vector out_dims = x_dims; + out_dims.pop_back(); + out_dims.insert(out_dims.begin() + std::min(dim1_, dim2_), new_dim_len); + out_dims.insert(out_dims.begin() + std::max(dim1_, dim2_), new_dim_len); + return out_dims; + }(); + symbol::ShapeOrDataDimExprs shape_data{ + symbol::TensorShapeOrDataDimExprs(out_dims)}; + infer_context->SetShapeOrDataForValue(op->result(0), shape_data); + return true; +} +bool DiagonalOpInferSymbolicShape( + pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) { + pir::Value operand_source = op->operand_source(0); + const symbol::ShapeOrDataDimExprs &operand_shape_or_data = + infer_context->GetShapeOrDataForValue(operand_source); + const auto &attributes = op->attributes(); + int axis1 = attributes.at("axis1").dyn_cast().data(); + int axis2 = attributes.at("axis2").dyn_cast().data(); + int offset = attributes.at("offset").dyn_cast().data(); + + const auto &x_dims = operand_shape_or_data.shape(); + int axis1_ = axis1 < 0 ? x_dims.size() + axis1 : axis1; + int axis2_ = axis2 < 0 ? x_dims.size() + axis2 : axis2; + + auto out_dims = x_dims; + auto axis1_size = out_dims.at(axis1_); + auto axis2_size = out_dims.at(axis2_); + out_dims.erase(out_dims.begin() + std::max(axis1_, axis2_)); + out_dims.erase(out_dims.begin() + std::min(axis1_, axis2_)); + + symbol::DimExprBuilder builder; + symbol::DimExpr zero{0}; + symbol::DimExpr res_shape; + symbol::DimExpr offset_sym{offset}; + if (offset == 0) { + res_shape = builder.Min(axis1_size, axis2_size); + } else if (offset > 0) { + if (axis2_size.isa()) { + res_shape = (axis2_size.dyn_cast() - offset) > 0 + ? builder.Min(axis1_size, axis2_size - offset_sym) + : zero; + } else { + res_shape = infer_context->GetNextSymName(); + } + } else { + if (axis1_size.isa()) { + res_shape = (axis1_size.dyn_cast() + offset) > 0 + ? builder.Min(axis1_size + offset_sym, axis2_size) + : zero; + } else { + res_shape = infer_context->GetNextSymName(); + } + } + out_dims.push_back(symbol::SimplifyDimExpr(res_shape)); + + symbol::ShapeOrDataDimExprs shape_data{ + symbol::TensorShapeOrDataDimExprs(out_dims)}; + infer_context->SetShapeOrDataForValue(op->result(0), shape_data); + return true; +} + +bool DistributeFpnProposalsOpInferSymbolicShape( + pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) { + const auto &attributes = op->attributes(); + int32_t min_level = + attributes.at("min_level").dyn_cast().data(); + int32_t max_level = + attributes.at("max_level").dyn_cast().data(); + int32_t num_levels = max_level - min_level + 1; + int64_t batch_size = 1; + + symbol::DimExpr num_rois = + infer_context->GetShapeOrDataForValue(op->operand_source(0)) + .shape() + .at(0); + + const auto &multi_rois_out_shape = [&]() { + symbol::TensorListShapeOrDataDimExprs multi_rois_out_shape; + if (num_levels == 1) { + multi_rois_out_shape.emplace_back( + symbol::TensorShapeOrDataDimExprs({num_rois, 4})); + } else { + symbol::DimExpr last_dim = num_rois; + for (int i = 0; i < num_levels - 1; i++) { + const auto &next_sym_name = infer_context->GetNextSymName(); + std::vector level_dim = {next_sym_name, 4}; + multi_rois_out_shape.emplace_back( + symbol::TensorShapeOrDataDimExprs(level_dim)); + last_dim = last_dim - level_dim.at(0); + } + multi_rois_out_shape.emplace_back(symbol::TensorShapeOrDataDimExprs( + {infer_context->GetNextSymName(), 4})); + } + + return multi_rois_out_shape; + }(); + + const auto &rois_num_per_level_out_shape = [&]() { + symbol::TensorListShapeOrDataDimExprs rois_num_per_level_out_shape; + rois_num_per_level_out_shape.resize( + num_levels, symbol::TensorShapeOrDataDimExprs({batch_size})); + return rois_num_per_level_out_shape; + }(); + + const auto &restore_ind = [&]() { + if (op->operand_source(1)) { + return symbol::TensorShapeOrDataDimExprs( + {infer_context->GetNextSymName(), 1}); + } + return symbol::TensorShapeOrDataDimExprs({num_rois, 1}); + }(); + + infer_context->SetShapeOrDataForValue(op->result(0), multi_rois_out_shape); + infer_context->SetShapeOrDataForValue(op->result(1), + rois_num_per_level_out_shape); + infer_context->SetShapeOrDataForValue(op->result(2), restore_ind); + return true; +} + +bool EighOpInferSymbolicShape(pir::Operation *op, + pir::InferSymbolicShapeContext *infer_context) { + const auto &x_shape = + infer_context->GetShapeOrDataForValue(op->operand_source(0)).shape(); + std::vector out_shape; + for (size_t i = 0; i < x_shape.size() - 1; ++i) { + out_shape.push_back(x_shape.at(i)); + } + infer_context->SetShapeOrDataForValue( + op->result(0), symbol::TensorShapeOrDataDimExprs(out_shape)); + infer_context->SetShapeOrDataForValue( + op->result(1), symbol::TensorShapeOrDataDimExprs(x_shape)); + return true; +} + +bool EigvalshOpInferSymbolicShape( + pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) { + return EighOpInferSymbolicShape(op, infer_context); +} + +bool FakeChannelWiseQuantizeAbsMaxOpInferSymbolicShape( + pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) { + const auto &x_shape_or_data = + infer_context->GetShapeOrDataForValue(op->operand_source(0)); + + int bit_length = op->attribute("bit_length").data(); + int quant_axis = op->attribute("quant_axis").data(); + + PADDLE_ENFORCE_EQ(bit_length >= 1 && bit_length <= 16, + true, + common::errors::InvalidArgument( + "'bit_length' should be between 1 and 16, but " + "the received is %d", + bit_length)); + PADDLE_ENFORCE_EQ( + quant_axis == 0 || quant_axis == 1, + true, + common::errors::InvalidArgument("'quant_axis' should be 0 or 1, but " + "the received is %d", + quant_axis)); + + infer_context->SetShapeOrDataForValue( + op->result(0), + symbol::ShapeOrDataDimExprs{ + symbol::TensorShapeOrDataDimExprs(x_shape_or_data.shape())}); + + std::vector out_scale_shape = { + x_shape_or_data.shape()[quant_axis]}; + infer_context->SetShapeOrDataForValue( + op->result(1), + symbol::ShapeOrDataDimExprs{ + symbol::TensorShapeOrDataDimExprs(out_scale_shape)}); + + return true; +} + +bool FftC2cOpInferSymbolicShape(pir::Operation *op, + pir::InferSymbolicShapeContext *infer_context) { + const auto &x_shape_or_data = + infer_context->GetShapeOrDataForValue(op->operand_source(0)); + std::vector x_dims = x_shape_or_data.shape(); + + // Set the output shape to be the same as the input shape + infer_context->SetShapeOrDataForValue( + op->result(0), + symbol::ShapeOrDataDimExprs{symbol::TensorShapeOrDataDimExprs(x_dims)}); + + return true; +} + +bool FftC2rOpInferSymbolicShape(pir::Operation *op, + pir::InferSymbolicShapeContext *infer_context) { + const auto &x_shape_or_data = + infer_context->GetShapeOrDataForValue(op->operand_source(0)); + std::vector x_dims = x_shape_or_data.shape(); + + auto axes = paddle::dialect::details::GetVectorAttr(op, "axes"); + int64_t last_dim_size = + op->attribute("last_dim_size").data(); + int last_fft_axis = static_cast(axes.back()); + + std::vector out_dims = x_dims; + + if (last_dim_size > 0) { + out_dims[last_fft_axis] = symbol::DimExpr(last_dim_size); + } else { + symbol::DimExprBuilder builder; + out_dims[last_fft_axis] = + builder.Mul(x_dims[last_fft_axis], 2) - symbol::DimExpr{1}; + } + + infer_context->SetShapeOrDataForValue( + op->result(0), + symbol::ShapeOrDataDimExprs{symbol::TensorShapeOrDataDimExprs(out_dims)}); + + return true; +} + +bool FftR2cOpInferSymbolicShape(pir::Operation *op, + pir::InferSymbolicShapeContext *infer_context) { + const auto &x_shape_or_data = + infer_context->GetShapeOrDataForValue(op->operand_source(0)); + std::vector x_dims = x_shape_or_data.shape(); + + auto axes = paddle::dialect::details::GetVectorAttr(op, "axes"); + bool onesided = op->attribute("onesided").data(); + + std::vector out_dims = x_dims; + + if (onesided) { + int last_fft_axis = static_cast(axes.back()); + symbol::DimExprBuilder builder; + out_dims[last_fft_axis] = + builder.Add(builder.Div(x_dims[last_fft_axis], 2), 1); + } + + infer_context->SetShapeOrDataForValue( + op->result(0), + symbol::ShapeOrDataDimExprs{symbol::TensorShapeOrDataDimExprs(out_dims)}); + + return true; +} + +bool FillDiagonalOpInferSymbolicShape( + pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) { + const auto &x_shape_or_data = + infer_context->GetShapeOrDataForValue(op->operand_source(0)); + std::vector x_dims = x_shape_or_data.shape(); + + infer_context->SetShapeOrDataForValue( + op->result(0), + symbol::ShapeOrDataDimExprs{symbol::TensorShapeOrDataDimExprs(x_dims)}); + + return true; +} + +bool FillDiagonal_OpInferSymbolicShape( + pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) { + return FillDiagonalOpInferSymbolicShape(op, infer_context); +} + +bool FlattenOpInferSymbolicShape( + pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) { + const auto &attributes = op->attributes(); + int start_axis = + attributes.at("start_axis").dyn_cast().data(); + int stop_axis = + attributes.at("stop_axis").dyn_cast().data(); + + const auto &x_shape = + infer_context->GetShapeOrDataForValue(op->operand_source(0)).shape(); + int in_dims_size = x_shape.size(); + + if (in_dims_size == 0) { + PADDLE_ENFORCE_EQ( + start_axis == 0 || start_axis == -1, + true, + common::errors::InvalidArgument("The start_axis should be 0 or -1 when " + "the input tensor is a 0D-Tensor")); + PADDLE_ENFORCE_EQ(stop_axis == 0 || stop_axis == -1, + true, + common::errors::InvalidArgument( + "The stop_axis should be 0 or -1 when the " + "input tensor is a 0D-Tensor")); + // this can ensure out shape {1} + start_axis = 0; + stop_axis = -1; + } + + if (start_axis < 0) { + start_axis = start_axis + in_dims_size; + } + if (stop_axis < 0) { + stop_axis = stop_axis + in_dims_size; + } + if (in_dims_size > 0) { + PADDLE_ENFORCE_GE( + stop_axis, + start_axis, + common::errors::InvalidArgument("The stop_axis should be greater" + "than or equal to start_axis.")); + } + + symbol::DimExpr outer{1}; + std::vector out_shape; + out_shape.reserve(in_dims_size - stop_axis + start_axis + 1); + for (int i = 0; i < start_axis; ++i) { + out_shape.push_back(x_shape.at(i)); + } + for (int i = start_axis; i <= stop_axis; i++) { + outer = outer * x_shape.at(i); + } + out_shape.push_back(outer); + for (int i = stop_axis + 1; i < in_dims_size; i++) { + out_shape.push_back(x_shape.at(i)); + } + + symbol::ShapeOrDataDimExprs out_shape_data{ + symbol::TensorShapeOrDataDimExprs(out_shape)}; + infer_context->SetShapeOrDataForValue(op->result(0), out_shape_data); + + std::vector xshape_shape = x_shape; + xshape_shape.insert(xshape_shape.begin(), symbol::DimExpr{0}); + symbol::ShapeOrDataDimExprs xshape_shape_data{ + symbol::TensorShapeOrDataDimExprs(xshape_shape)}; + infer_context->SetShapeOrDataForValue(op->result(1), xshape_shape_data); + return true; +} + +bool Flatten_OpInferSymbolicShape( + pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) { + return FlattenOpInferSymbolicShape(op, infer_context); +} + +bool FoldOpInferSymbolicShape(pir::Operation *op, + pir::InferSymbolicShapeContext *infer_context) { + const auto &in_dims = + infer_context->GetShapeOrDataForValue(op->operand_source(0)).shape(); + + std::vector out_dims; + out_dims.push_back(in_dims[0]); + std::vector kernel_sizes = + paddle::dialect::details::GetVectorAttr(op, "kernel_sizes"); + out_dims.push_back(in_dims[1] / (kernel_sizes[0] * kernel_sizes[1])); + + infer_context->SetShapeOrDataForValue( + op->result(0), + symbol::ShapeOrDataDimExprs{symbol::TensorShapeOrDataDimExprs(out_dims)}); + + return true; +} + +bool IdentityLossOpInferSymbolicShape( + pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) { + const auto &input_shape = + infer_context->GetShapeOrDataForValue(op->operand_source(0)); + int reduction = op->attribute("reduction").data(); + if (reduction == 2) { + infer_context->SetShapeOrDataForValue(op->result(0), input_shape); + } else { + std::vector out_shape = {}; + infer_context->SetShapeOrDataForValue( + op->result(0), + symbol::ShapeOrDataDimExprs{ + symbol::TensorShapeOrDataDimExprs(out_shape)}); + } + + return true; +} + +bool IdentityLoss_OpInferSymbolicShape( + pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) { + return IdentityLossOpInferSymbolicShape(op, infer_context); +} + +bool KthvalueOpInferSymbolicShape( + pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) { + pir::Value operand_source = op->operand_source(0); + const symbol::ShapeOrDataDimExprs &operand_shape_or_data = + infer_context->GetShapeOrDataForValue(operand_source); + const auto &attributes = op->attributes(); + int axis = attributes.at("axis").dyn_cast().data(); + bool keepdim = GetBoolAttr(op, "keepdim"); + + const auto &input_dims = operand_shape_or_data.shape(); + const int &dim_size = input_dims.size(); + if (axis < 0) axis += dim_size; + std::vector out_dims; + for (int i = 0; i < axis; i++) { + out_dims.emplace_back(input_dims.at(i)); + } + if (keepdim && dim_size > 0) { + out_dims.emplace_back(symbol::DimExpr(1)); + } + for (int i = axis + 1; i < dim_size; i++) { + out_dims.emplace_back(input_dims.at(i)); + } + symbol::ShapeOrDataDimExprs shape_data{ + symbol::TensorShapeOrDataDimExprs(out_dims)}; + infer_context->SetShapeOrDataForValue(op->result(0), shape_data); + infer_context->SetShapeOrDataForValue(op->result(1), shape_data); + return true; +} + +bool LpPool2dOpInferSymbolicShape( + pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) { + const auto &kernel_size = [&]() -> std::vector { + std::vector kernel_size_int_vec = + op->attribute("kernel_size") + .data() + .GetData(); + return details::VecInt642Expr(kernel_size_int_vec); + }(); + infer_context->SetShapeOrDataForValue( + op->result(0), + Pool2dRawInferSymbolicShape(op, kernel_size, infer_context)); + return true; +} + +bool LogcumsumexpOpInferSymbolicShape( + pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) { + // same as CumsumOpInferSymbolicShape + return CumsumOpInferSymbolicShape(op, infer_context); +} + +bool LogsumexpOpInferSymbolicShape( + pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) { + bool keepdim = GetBoolAttr(op, "keepdim"); + std::vector axis_in = details::GetVectorAttr(op, "axis"); + std::vector axis; + axis.reserve(axis_in.size()); + std::for_each(axis_in.begin(), axis_in.end(), [&axis](const int &t) { + axis.push_back(static_cast(t)); + }); + bool reduce_all = axis.size() == 0 ? true : false; + return details::ReduceInferDim(op, infer_context, axis, keepdim, reduce_all); +} + +bool MaxOpInferSymbolicShape(pir::Operation *op, + pir::InferSymbolicShapeContext *infer_context) { + bool keepdim = GetBoolAttr(op, "keepdim"); + + const std::vector axis = [&] { + pir::Operation *axis_gen_op = op->operand_source(1).defining_op(); + std::vector axis_vec; + if (axis_gen_op->isa()) { + axis_vec = details::GetVectorAttr( + axis_gen_op->dyn_cast(), "value"); + } else { + // TODO(lanxianghit): there's other source: pir::VectorType, + // paddle::dialect::DenseTensorType, but after PRIM, maybe always + // FullIntArrayOp, to be confirmed + PADDLE_THROW(common::errors::Unimplemented( + "MaxOpInferSymbolicShape: 'axis' only " + "support FullIntArrayOp's result now.")); + } + return axis_vec; + }(); + + bool reduce_all = axis.size() == 0 ? true : false; + + return details::ReduceInferDim(op, infer_context, axis, keepdim, reduce_all); +} + +bool MaxoutOpInferSymbolicShape(pir::Operation *op, + pir::InferSymbolicShapeContext *infer_context) { + const auto &x_shape_or_data = + infer_context->GetShapeOrDataForValue(op->operand_source(0)); + const std::vector &in_x_dims = x_shape_or_data.shape(); + + int groups = op->attribute("groups").data(); + int axis = op->attribute("axis").data(); + + if (axis < 0) { + axis += in_x_dims.size(); + } + + std::vector output_shape = in_x_dims; + output_shape[axis] = in_x_dims[axis] / groups; + infer_context->SetShapeOrDataForValue( + op->result(0), + symbol::ShapeOrDataDimExprs{ + symbol::TensorShapeOrDataDimExprs(output_shape)}); + + return true; +} + +bool MinOpInferSymbolicShape(pir::Operation *op, + pir::InferSymbolicShapeContext *infer_context) { + return MaxOpInferSymbolicShape(op, infer_context); +} + +bool MeanAllOpInferSymbolicShape( + pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) { + const auto &x_shape_or_data = + infer_context->GetShapeOrDataForValue(op->operand_source(0)); + const std::vector &x_dims = x_shape_or_data.shape(); + + PADDLE_ENFORCE_GT( + x_dims.size(), + 0, + phi::errors::InvalidArgument("Input(x) of MeanAllOp must have rank " + "greater than 0, but received rank 0.")); + + std::vector output_shape = {}; + + infer_context->SetShapeOrDataForValue( + op->result(0), + symbol::ShapeOrDataDimExprs{ + symbol::TensorShapeOrDataDimExprs(output_shape)}); + + return true; +} + +bool NonzeroOpInferSymbolicShape( + pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) { + const auto &x_shape_or_data = + infer_context->GetShapeOrDataForValue(op->operand_source(0)); + const auto &x_shape = x_shape_or_data.shape(); + int rank = x_shape.size(); + + PADDLE_ENFORCE_GE( + rank, + 1UL, + common::errors::InvalidArgument( + "Input(x) should have number of dimension at least 1.")); + + std::string sym_name = infer_context->GetNextSymName(); + std::vector out_shape{symbol::DimExpr{sym_name}, + symbol::DimExpr{rank}}; + + symbol::ShapeOrDataDimExprs shape_data{ + symbol::TensorShapeOrDataDimExprs(out_shape)}; + infer_context->SetShapeOrDataForValue(op->result(0), shape_data); + return true; +} + +bool NumelOpInferSymbolicShape(pir::Operation *op, + pir::InferSymbolicShapeContext *infer_context) { + std::vector out_shape = {}; + infer_context->SetShapeOrDataForValue( + op->result(0), + symbol::ShapeOrDataDimExprs{ + symbol::TensorShapeOrDataDimExprs(out_shape)}); + + return true; +} + +bool PadOpInferSymbolicShape(pir::Operation *op, + pir::InferSymbolicShapeContext *infer_context) { + // input(0): Tensor x + const auto &x_shape_or_data = + infer_context->GetShapeOrDataForValue(op->operand_source(0)); + PADDLE_ENFORCE_EQ(x_shape_or_data.data().has_value(), + false, + common::errors::InvalidArgument( + "InferSymbolicShape of PadOp only support input with " + "value now.")); + const auto &x_dims_sym = x_shape_or_data.shape(); + const size_t rank = x_dims_sym.size(); + + // input(1): int[] paddings + std::vector paddings = + paddle::dialect::details::GetVectorAttr(op, "paddings"); + PADDLE_ENFORCE_EQ(rank * 2, + paddings.size(), + common::errors::InvalidArgument( + "The size of paddings should be 2 * input's rank. But " + "got paddings.size() = %d, input's rank = %d.", + paddings.size(), + rank)); + + // output + const auto &out_dims = [&] { + std::vector out_dims; + out_dims.reserve(rank); + for (size_t i = 0; i < rank; ++i) { + out_dims.push_back(x_dims_sym.at(i) + paddings.at(2 * i) + + paddings.at(2 * i + 1)); + } + return out_dims; + }(); + + infer_context->SetShapeOrDataForValue( + op->result(0), symbol::TensorShapeOrDataDimExprs(out_dims)); + + return true; +} + +bool Pad3dOpInferSymbolicShape(pir::Operation *op, + pir::InferSymbolicShapeContext *infer_context) { + const auto &x_shape = + infer_context->GetShapeOrDataForValue(op->operand_source(0)).shape(); + PADDLE_ENFORCE_EQ(x_shape.size(), + 5, + common::errors::InvalidArgument( + "The size of Input(X)'s dimension should be equal to " + "5, but received %d. ", + x_shape.size())); + const auto &paddings_shape = + infer_context->GetShapeOrDataForValue(op->operand_source(1)); + if (!paddings_shape.data().has_value()) { + std::stringstream ss; + ss << paddings_shape; + PADDLE_THROW( + common::errors::InvalidArgument("The data of paddings's symbol shape " + "should have value, but now got [%s].", + ss.str())); + } + const std::string &data_format = + op->attribute("data_format").AsString(); + + const std::vector &out_dims = [&] { + std::vector out_dims = x_shape; + const auto &paddings = paddings_shape.data().value(); + PADDLE_ENFORCE_EQ(paddings.size(), + 6, + common::errors::InvalidArgument( + "Shape of Input(Paddings) should be equal to " + "[6], but received [%d].", + paddings.size())); + if (data_format == "NCDHW") { + out_dims.at(1) = x_shape.at(1); + out_dims.at(2) = x_shape.at(2) + paddings.at(4) + paddings.at(5); + out_dims.at(3) = x_shape.at(3) + paddings.at(2) + paddings.at(3); + out_dims.at(4) = x_shape.at(4) + paddings.at(0) + paddings.at(1); + } else { + out_dims.at(1) = x_shape.at(1) + paddings.at(4) + paddings.at(5); + out_dims.at(2) = x_shape.at(2) + paddings.at(2) + paddings.at(3); + out_dims.at(3) = x_shape.at(3) + paddings.at(0) + paddings.at(1); + out_dims.at(4) = x_shape.at(4); + } + return out_dims; + }(); + + infer_context->SetShapeOrDataForValue( + op->result(0), symbol::TensorShapeOrDataDimExprs(out_dims)); + + return true; +} + +bool Pool2dOpInferSymbolicShape(pir::Operation *op, + pir::InferSymbolicShapeContext *infer_context) { + const auto &kernel_size_shape_or_data = + infer_context->GetShapeOrDataForValue(op->operand_source(1)); + const auto &kernel_size = + details::GetExprVecFromData(kernel_size_shape_or_data); + infer_context->SetShapeOrDataForValue( + op->result(0), + Pool2dRawInferSymbolicShape(op, kernel_size, infer_context)); + return true; +} + +bool ProdOpInferSymbolicShape(pir::Operation *op, + pir::InferSymbolicShapeContext *infer_context) { + bool keepdim = GetBoolAttr(op, "keepdim"); + bool reduce_all = GetBoolAttr(op, "reduce_all"); + + auto axis_gen_op = op->operand_source(1).defining_op(); + if (axis_gen_op->isa()) { + std::vector axis = details::GetVectorAttr( + axis_gen_op->dyn_cast(), "value"); + return details::ReduceInferDim( + op, infer_context, axis, keepdim, reduce_all); + } else { + // TODO(lanxianghit): deal with other source: pir::VectorType, + // paddle::dialect::DenseTensorType + PADDLE_THROW( + common::errors::Unimplemented("ProdOpInferSymbolicShape: 'axis' only " + "support FullIntArrayOp's result now.")); + } + + return true; +} + +bool RepeatInterleaveOpInferSymbolicShape( + pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) { + pir::Value operand_source = op->operand_source(0); + const symbol::ShapeOrDataDimExprs &operand_shape_or_data = + infer_context->GetShapeOrDataForValue(operand_source); + + const auto &attributes = op->attributes(); + int repeats = attributes.at("repeats").dyn_cast().data(); + // what should I do if axis is null + int axis = attributes.at("axis").dyn_cast().data(); + + const std::vector &in_dims_sym = [&] { + std::vector dims; + if (operand_shape_or_data.data().has_value()) { + dims = operand_shape_or_data.data().value(); + } else { + dims = operand_shape_or_data.shape(); + } + return dims; + }(); + + int x_rank = in_dims_sym.size(); + if (axis < 0) axis += x_rank; + + const auto &out_sym_shape = [&] { + std::vector out_sym_shape; + for (int i = 0; i < x_rank; i++) { + if (i == axis) { + out_sym_shape.push_back(in_dims_sym.at(i) * repeats); + } else { + out_sym_shape.push_back(in_dims_sym.at(i)); + } + } + return out_sym_shape; + }(); + + infer_context->SetShapeOrDataForValue( + op->result(0), + symbol::ShapeOrDataDimExprs{ + symbol::TensorShapeOrDataDimExprs(out_sym_shape)}); + + return true; +} + +bool ReshapeOpInferSymbolicShape( + pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) { + const symbol::ShapeOrDataDimExprs &x_dim_expr = + infer_context->GetShapeOrDataForValue(op->operand_source(0)); + const symbol::ShapeOrDataDimExprs &shape_dim_expr = + infer_context->GetShapeOrDataForValue(op->operand_source(1)); + + const auto &GetProduct = [&](const auto &dim_exprs, const auto &Filter) { + symbol::DimExpr product{1}; + for (const auto &dim_expr : dim_exprs) { + if (Filter(dim_expr)) { + product = product * dim_expr; + } + } + return product; + }; + + const auto &IsNotMinusOne = [&](const symbol::DimExpr &dim_expr) { + if (dim_expr.isa()) { + return dim_expr.dyn_cast() != static_cast(-1); + } + return true; + }; + + const auto &IsPositiveInteger = [&](const symbol::DimExpr &dim_expr) { + if (dim_expr.isa()) { + return dim_expr.dyn_cast() > static_cast(0); + } + return true; + }; + + const auto &IsZero = [&](const symbol::DimExpr &dim_expr) { + if (dim_expr.isa()) { + return dim_expr.dyn_cast() == static_cast(0); + } + return false; + }; + + const std::vector out_dims = [&] { + const auto &original_shape = + infer_context->GetShapeOrDataForValue(op->operand_source(0)).shape(); + ExprVec target_shape = details::GetExprVecFromData(shape_dim_expr); + + // replace '0' with original shape + for (size_t i = 0; i < target_shape.size(); i++) { + if (i < original_shape.size() && IsZero(target_shape.at(i))) { + target_shape.at(i) = original_shape.at(i); + } + } + + // replace '-1' with infered shape + const auto &numel = + GetProduct(original_shape, [](const auto &) { return true; }); + const auto &product_exclude_minus_one = + GetProduct(target_shape, IsPositiveInteger); + const auto &input_dims = target_shape; + + std::vector out_dims; + out_dims.reserve(input_dims.size()); + for (size_t i = 0; i < input_dims.size(); ++i) { + auto out_dim_expr = IsNotMinusOne(input_dims.at(i)) + ? input_dims.at(i) + : (numel / product_exclude_minus_one); + out_dims.emplace_back(out_dim_expr); + } + return out_dims; + }(); + + symbol::ShapeOrDataDimExprs shape_data = [&] { + if (x_dim_expr.data().has_value()) { + return symbol::TensorShapeOrDataDimExprs(out_dims, + x_dim_expr.data().value()); + } + return symbol::TensorShapeOrDataDimExprs(out_dims); + }(); + + infer_context->SetShapeOrDataForValue(op->result(0), shape_data); + return true; +} + +bool Reshape_OpInferSymbolicShape( + pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) { + return ReshapeOpInferSymbolicShape(op, infer_context); +} + +bool ShapeOpInferSymbolicShape(pir::Operation *op, + pir::InferSymbolicShapeContext *infer_context) { + const symbol::ShapeOrDataDimExprs &operand_shape_or_data = + infer_context->GetShapeOrDataForValue(op->operand_source(0)); + const auto &out_data = operand_shape_or_data.shape(); + const std::vector shape{std::int64_t(out_data.size())}; + symbol::ShapeOrDataDimExprs shape_or_data{ + symbol::TensorShapeOrDataDimExprs(shape, out_data)}; + + infer_context->SetShapeOrDataForValue(op->result(0), shape_or_data); + return true; +} + +bool ShapeSrOpInferSymbolicShape( + pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) { + return ShapeOpInferSymbolicShape(op, infer_context); +} + +bool SliceOpInferSymbolicShape(pir::Operation *op, + pir::InferSymbolicShapeContext *infer_context) { + pir::Value operand_source = op->operand_source(0); + pir::Value operand_starts = op->operand_source(1); + pir::Value operand_ends = op->operand_source(2); + pir::Value res = op->result(0); + + const symbol::ShapeOrDataDimExprs &starts_shape_data = + infer_context->GetShapeOrDataForValue(operand_starts); + const symbol::ShapeOrDataDimExprs &ends_shape_data = + infer_context->GetShapeOrDataForValue(operand_ends); + + std::vector axes_vec = details::GetVectorAttr(op, "axes"); + + ExprVec starts = slice_utils::GetExprVecFromData(starts_shape_data); + ExprVec ends = slice_utils::GetExprVecFromData(ends_shape_data); + + std::vector infer_flags = details::GetVectorAttr(op, "infer_flags"); + const std::vector decrease_axis = + details::GetVectorAttr(op, "decrease_axis"); + + infer_context->SetShapeOrDataForValue( + res, + slice_utils::SliceRawInferSymbolicShape(operand_source, + res, + starts, + ends, + axes_vec, + infer_flags, + decrease_axis, + infer_context)); + + return true; +} + +bool SplitOpInferSymbolicShape(pir::Operation *op, + pir::InferSymbolicShapeContext *infer_context) { + // input + const auto &x_shape_or_data = + infer_context->GetShapeOrDataForValue(op->operand_source(0)); + PADDLE_ENFORCE_EQ(x_shape_or_data.data().has_value(), + false, + common::errors::InvalidArgument( + "InferSymbolicShape of SplitOp only support input with " + "value now.")); + const auto &x_dims_sym = x_shape_or_data.shape(); + + // axis + CHECK(op->operand_source(2).defining_op()->isa()); + + int64_t axis = op->operand_source(2) + .defining_op() + .attributes() + .at("value") + .dyn_cast() + .data() + .to(); + size_t rank = x_dims_sym.size(); + axis = axis >= 0 ? axis : std::max(int64_t(0), int64_t(axis + rank)); + + // sections + const std::vector §ions_sym = + details::GetExprVecFromData( + infer_context->GetShapeOrDataForValue(op->operand_source(1))); + + // output + const symbol::TensorListShapeOrDataDimExprs &output_shape_data_list = [&] { + const auto &GetSum = [&](const auto &dim_exprs, const auto &Filter) { + symbol::DimExpr sum{0}; + for (const auto &dim_expr : dim_exprs) { + if (Filter(dim_expr)) { + sum = sum + dim_expr; + } + } + return sum; + }; + const auto &All = [&](const auto &dim_exprs, const auto &Cond) { + for (const auto &dim_expr : dim_exprs) { + if (!Cond(dim_expr)) { + return false; + } + } + return true; + }; + const auto &IsNotMinusOne = [&](const symbol::DimExpr &dim_expr) { + if (dim_expr.isa()) { + return dim_expr.dyn_cast() != static_cast(-1); + } + return true; + }; + const auto &sum_exclude_minus_one = GetSum(sections_sym, IsNotMinusOne); + + const bool &all_sections_sym_not_minus_one = + All(sections_sym, IsNotMinusOne); + if (all_sections_sym_not_minus_one) { + infer_context->AddEqualCstr(x_dims_sym.at(axis), sum_exclude_minus_one); + } + + symbol::TensorListShapeOrDataDimExprs shape_data_list; + std::vector output_dims_sym = x_dims_sym; + if (!all_sections_sym_not_minus_one && sections_sym.size() == 1) { + VLOG(3) << "[SplitOp]-1 is the only split section. The output shape is " + "identical to the input shape."; + shape_data_list.push_back( + symbol::TensorShapeOrDataDimExprs(output_dims_sym)); + return shape_data_list; + } + for (uint32_t idx = 0; idx < sections_sym.size(); idx++) { + const auto §ion_sym = sections_sym.at(idx); + output_dims_sym.at(axis) = + IsNotMinusOne(section_sym) + ? section_sym + : x_dims_sym.at(axis) - sum_exclude_minus_one; + + shape_data_list.push_back( + symbol::TensorShapeOrDataDimExprs(output_dims_sym)); + } + return shape_data_list; + }(); + + infer_context->SetShapeOrDataForValue( + op->result(0), symbol::ShapeOrDataDimExprs{output_shape_data_list}); + + return true; +} + +bool SplitWithNumOpInferSymbolicShape( + pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) { + const symbol::ShapeOrDataDimExprs &axis_shape_data = + infer_context->GetShapeOrDataForValue(op->operand_source(1)); + PADDLE_ENFORCE_EQ( + axis_shape_data.data().has_value(), + true, + common::errors::InvalidArgument( + "In InferSymbolicShape, axis of SplitWithNumOp is null")); + const std::vector &axis_data = + axis_shape_data.data().value(); + PADDLE_ENFORCE_EQ( + axis_data.size() == 1, + true, + common::errors::InvalidArgument( + "In SplitWithNumOp, data of axis should be one dimension")); + + const auto &attributes = op->attributes(); + int num = attributes.at("num").dyn_cast().data(); + + const auto &x_s_or_d = + infer_context->GetShapeOrDataForValue(op->operand_source(0)); + int rank = x_s_or_d.shape().size(); + + const auto &out_s_d = [&](int64_t split_axis, int64_t res_num) { + symbol::DimExpr input_axis_dim = x_s_or_d.shape().at(split_axis); + symbol::DimExpr axis_shape = input_axis_dim / symbol::DimExpr{res_num}; + + std::vector res_s_d; + for (size_t i = 0; i < x_s_or_d.shape().size(); ++i) { + const auto &sym_dim = split_axis == static_cast(i) + ? axis_shape + : x_s_or_d.shape().at(i); + res_s_d.push_back(sym_dim); + } + return symbol::TensorShapeOrDataDimExprs(res_s_d); + }; + + if (axis_data.at(0).isa()) { + // case 1: DimExpr of axis is int. axis_shape_or_data: {shape:{1}, + // data:{3}} eg: axis generator op is full_op and assign_op + int64_t axis = axis_data[0].dyn_cast(); + axis = axis < 0 ? axis + rank : axis; + symbol::TensorListShapeOrDataDimExprs res_list_s_d(num, out_s_d(axis, num)); + infer_context->SetShapeOrDataForValue( + op->result(0), symbol::ShapeOrDataDimExprs{res_list_s_d}); + } else if (axis_data.at(0).isa()) { + // case 2: DimExpr of axis is a symbol(string). axis_shape_or_data: + // {shape:{1}, data:{s0}} eg: axis generator op is data_op + int candidate_axis = -1; + int count = 0; + for (size_t i = 0; i < x_s_or_d.shape().size(); ++i) { + if (x_s_or_d.shape().at(i).isa()) { + if (x_s_or_d.shape().at(i).dyn_cast() % num == 0) { + count++; + candidate_axis = i; + } + } else { + PADDLE_THROW( + common::errors::InvalidArgument("The type of X must be int64_t.")); + } + } + if (count == 1) { + // caculate the axis of split_with_num_op + symbol::TensorListShapeOrDataDimExprs res_list_s_d( + num, out_s_d(candidate_axis, num)); + infer_context->SetShapeOrDataForValue( + op->result(0), symbol::ShapeOrDataDimExprs{res_list_s_d}); + } else { + // create new Symbol + std::vector res_s; + for (size_t i = 0; i < x_s_or_d.shape().size(); ++i) { + const auto &s_dim = + x_s_or_d.shape().at(i).dyn_cast() % num == 0 + ? symbol::DimExpr(infer_context->GetNextSymName()) + : x_s_or_d.shape().at(i); + res_s.emplace_back(s_dim); + } + const symbol::TensorShapeOrDataDimExprs &res_s_d = + symbol::TensorShapeOrDataDimExprs(res_s); + symbol::TensorListShapeOrDataDimExprs res_list_s_d(num, res_s_d); + infer_context->SetShapeOrDataForValue( + op->result(0), symbol::ShapeOrDataDimExprs{res_list_s_d}); + } + } else { + PADDLE_THROW(common::errors::InvalidArgument( + "The type of axis must be int64_t or string.")); + } + return true; +} + +bool SumOpInferSymbolicShape(pir::Operation *op, + pir::InferSymbolicShapeContext *infer_context) { + bool keepdim = GetBoolAttr(op, "keepdim"); + bool reduce_all = false; + + auto axis_gen_op = op->operand_source(1).defining_op(); + if (axis_gen_op->isa()) { + std::vector axis = details::GetVectorAttr( + axis_gen_op->dyn_cast(), "value"); + if (axis.size() == 0) { + reduce_all = true; + } + return details::ReduceInferDim( + op, infer_context, axis, keepdim, reduce_all); + } else { + // TODO(lanxianghit): deal with other source: pir::VectorType, + // paddle::dialect::DenseTensorType + PADDLE_THROW( + common::errors::Unimplemented("SumOpInferSymbolicShape: 'axis' only " + "support FullIntArrayOp's result now.")); + } + + return true; +} + +bool TileOpInferSymbolicShape(pir::Operation *op, + pir::InferSymbolicShapeContext *infer_context) { + pir::Value operand_x = op->operand_source(0); + symbol::ShapeOrDataDimExprs x_shape_or_data = + infer_context->GetShapeOrDataForValue(operand_x); + pir::Value operand_repeat_times = op->operand_source(1); + symbol::ShapeOrDataDimExprs repeat_times_shape_or_data = + infer_context->GetShapeOrDataForValue(operand_repeat_times); + + std::vector x_dimexpr = x_shape_or_data.shape(); + std::vector repeat_times_dimexpr = + details::GetExprVecFromData(repeat_times_shape_or_data); + if (repeat_times_dimexpr.empty()) { + repeat_times_dimexpr = std::vector(x_dimexpr.size(), 1); + } + + auto out_rank = std::max(static_cast(x_dimexpr.size()), + repeat_times_dimexpr.size()); + std::vector out_shape(out_rank); + if (x_dimexpr.size() > repeat_times_dimexpr.size()) { + auto diff = x_dimexpr.size() - repeat_times_dimexpr.size(); + repeat_times_dimexpr.insert(repeat_times_dimexpr.begin(), diff, 1); + } else { + auto diff = repeat_times_dimexpr.size() - x_dimexpr.size(); + x_dimexpr.insert(x_dimexpr.begin(), diff, 1); + } + + for (size_t i = 0; i < repeat_times_dimexpr.size(); ++i) { + out_shape.at(i) = x_dimexpr.at(i) * repeat_times_dimexpr.at(i); + } + + symbol::ShapeOrDataDimExprs shape_data{ + symbol::TensorShapeOrDataDimExprs(out_shape)}; + + pir::Value res = op->result(0); + infer_context->SetShapeOrDataForValue(res, shape_data); + + return true; +} + +bool TopkOpInferSymbolicShape(pir::Operation *op, + pir::InferSymbolicShapeContext *infer_context) { + symbol::ShapeOrDataDimExprs x_shape_or_data = + infer_context->GetShapeOrDataForValue(op->operand_source(0)); + symbol::ShapeOrDataDimExprs k_shape_or_data = + infer_context->GetShapeOrDataForValue(op->operand_source(1)); + const auto &attributes = op->attributes(); + int axis = attributes.at("axis").dyn_cast().data(); + const std::vector &in_dims_sym = [&] { + std::vector dims; + if (x_shape_or_data.data().has_value()) { + dims = x_shape_or_data.data().value(); + } else { + dims = x_shape_or_data.shape(); + } + return dims; + }(); + + int x_rank = in_dims_sym.size(); + + int k = k_shape_or_data.data().value().at(0).Get(); + + if (axis < 0) axis += x_rank; + const auto &out_sym_shape = [&] { + std::vector out_sym_shape; + for (int i = 0; i < x_rank; ++i) { + if (i == axis) { + out_sym_shape.push_back(symbol::DimExpr(k)); + } else { + out_sym_shape.push_back(in_dims_sym.at(i)); + } + } + return out_sym_shape; + }(); + + symbol::ShapeOrDataDimExprs shape_data{ + symbol::TensorShapeOrDataDimExprs(out_sym_shape)}; + + infer_context->SetShapeOrDataForValue(op->result(0), shape_data); + infer_context->SetShapeOrDataForValue(op->result(1), shape_data); + + return true; +} + +bool TopkV1OpInferSymbolicShape(pir::Operation *op, + pir::InferSymbolicShapeContext *infer_context) { + return TopkOpInferSymbolicShape(op, infer_context); +} + +bool TransposeOpInferSymbolicShape( + pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) { + std::vector perm = + op->attributes().at("perm").dyn_cast().AsVector(); + if (perm.size() == 1) { + // perm must be [0], which means nothing to do with input, just copy the + // info from input + infer_context->SetShapeOrDataForValue( + op->result(0), + infer_context->GetShapeOrDataForValue(op->operand_source(0))); + return true; + } + const std::vector &x_dims = [&] { + std::vector dims; + const auto &x_shape_or_data = + infer_context->GetShapeOrDataForValue(op->operand_source(0)); + if (x_shape_or_data.data().has_value()) { + dims = x_shape_or_data.data().value(); + } else { + dims = x_shape_or_data.shape(); + } + return dims; + }(); + + int x_rank = x_dims.size(); + + const std::vector formatted_axis = [x_rank, &perm] { + std::vector out(perm.size(), 0); + std::transform(perm.begin(), + perm.end(), + out.begin(), + [](pir::Attribute &p) -> int32_t { + return p.dyn_cast().data(); + }); + + // format the negative axis + std::for_each(out.begin(), out.end(), [x_rank](int32_t &v) { + if (v < 0) { + v += x_rank; + } + }); + return out; + }(); + + int axis_size = static_cast(formatted_axis.size()); + + std::vector out_dims(x_dims); + for (int i = 0; i < axis_size; ++i) { + out_dims.at(i) = x_dims.at(formatted_axis.at(i)); + } + + infer_context->SetShapeOrDataForValue(op->result(0), + ShapeOrData{TensorExprs(out_dims)}); + + return true; +} + +bool Transpose_OpInferSymbolicShape( + pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) { + return TransposeOpInferSymbolicShape(op, infer_context); +} + +bool SqueezeOpInferSymbolicShape( + pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) { + PADDLE_ENFORCE_EQ( + op->num_operands(), + 2, + common::errors::InvalidArgument( + "SqueezeOpInferSymbolicShape ONLY support num_operands() == 2 " + "now, but got %d operands", + op->num_operands())); + + auto x_shape_or_data = + infer_context->GetShapeOrDataForValue(op->operand_source(0)); + auto axes_shape_or_data = + infer_context->GetShapeOrDataForValue(op->operand_source(1)); + + std::vector in_dims_sym; + if (x_shape_or_data.data().has_value()) { + in_dims_sym = x_shape_or_data.data().value(); + } else { + in_dims_sym = x_shape_or_data.shape(); + } + + std::vector squeeze_dims_sym; + if (axes_shape_or_data.data().has_value()) { + squeeze_dims_sym = axes_shape_or_data.data().value(); + } else { + squeeze_dims_sym = axes_shape_or_data.shape(); + } + + std::vector squeeze_dims; + for (auto squeeze_dim : squeeze_dims_sym) { + PADDLE_ENFORCE_EQ( + squeeze_dim.Has(), + true, + common::errors::InvalidArgument( + "in SqueezeOpInferSymbolicShape, axes must be known int type, " + "but got: %s", + symbol::ToString(squeeze_dim))); + squeeze_dims.emplace_back( + static_cast(squeeze_dim.Get())); + } + + // GetOutputSqueezeShape + size_t num_squeeze_dims = squeeze_dims.size(); + std::vector should_squeeze(in_dims_sym.size(), false); + // Mark dimensions need to be squeezed. + if (num_squeeze_dims == 0) { + for (size_t i = 0; i < in_dims_sym.size(); ++i) { + // TODO(lanxianghit): if symbol here, maybe we need the result of dim expr + // simplification + if (in_dims_sym.at(i) == 1) { + should_squeeze.at(i) = true; + } + } + } else { + for (size_t i = 0; i < num_squeeze_dims; ++i) { + if (in_dims_sym.size() == 0) { + continue; + } + int current = squeeze_dims.at(i) < 0 + ? squeeze_dims.at(i) + in_dims_sym.size() + : squeeze_dims.at(i); + + if (!should_squeeze.at(current)) { + // At compile time, dim of SYMBOL is allowed to squeeze? + if (in_dims_sym.at(current) == 1) { + should_squeeze.at(current) = true; + } else if (!in_dims_sym.at(current).Has()) { + should_squeeze.at(current) = true; + } else { + should_squeeze.at(current) = true; + } + } + } + } + + // Make output dimensions + std::vector output_shape_sym; + for (size_t i = 0; i < in_dims_sym.size(); ++i) { + if (!should_squeeze.at(i)) { + output_shape_sym.emplace_back(in_dims_sym.at(i)); + } + } + + symbol::ShapeOrDataDimExprs shape_data{ + symbol::TensorShapeOrDataDimExprs(output_shape_sym)}; + + pir::Value res = op->result(0); + infer_context->SetShapeOrDataForValue(res, shape_data); + infer_context->SetShapeOrDataForValue( + op->result(1), CreateShapeOrDataForXShape(x_shape_or_data)); + + return true; +} +bool Squeeze_OpInferSymbolicShape( + pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) { + return SqueezeOpInferSymbolicShape(op, infer_context); +} + +bool UnbindOpInferSymbolicShape(pir::Operation *op, + pir::InferSymbolicShapeContext *infer_context) { + // input + const auto &x_shape_or_data = + infer_context->GetShapeOrDataForValue(op->operand_source(0)); + PADDLE_ENFORCE_EQ( + x_shape_or_data.data().has_value(), + false, + common::errors::InvalidArgument( + "InferSymbolicShape of UnbindOp only support input with " + "value now.")); + const auto &x_dims_sym = x_shape_or_data.shape(); + + // axis + int axis = op->attributes().at("axis").dyn_cast().data(); + int rank = x_dims_sym.size(); + axis = axis >= 0 ? axis : axis + rank; + + // output + const symbol::TensorListShapeOrDataDimExprs &output_shape_data_list = [&] { + symbol::TensorListShapeOrDataDimExprs shape_data_list; + std::vector output_dims_sym = x_dims_sym; + + const symbol::DimExpr &unbound_dim = x_dims_sym.at(axis); + PADDLE_ENFORCE_EQ(unbound_dim.isa(), + true, + common::errors::InvalidArgument( + "InferSymbolicShape of UnbindOp only support unbound " + "dim with constant length!")); + output_dims_sym.erase(output_dims_sym.begin() + axis); + const int64_t unbound_dim_length = unbound_dim.dyn_cast(); + + for (uint32_t idx = 0; idx < unbound_dim_length; idx++) { + shape_data_list.push_back( + symbol::TensorShapeOrDataDimExprs(output_dims_sym)); + } + return shape_data_list; + }(); + + infer_context->SetShapeOrDataForValue( + op->result(0), symbol::ShapeOrDataDimExprs{output_shape_data_list}); + + return true; +} + +bool UniqueOpInferSymbolicShape(pir::Operation *op, + pir::InferSymbolicShapeContext *infer_context) { + const auto &x_shape_or_data = + infer_context->GetShapeOrDataForValue(op->operand_source(0)); + PADDLE_ENFORCE_EQ( + x_shape_or_data.data().has_value(), + false, + common::errors::InvalidArgument( + "InferSymbolicShape of UniqueOp only support input with " + "value now.")); + const auto &x_dims_sym = x_shape_or_data.shape(); + const size_t rank = x_dims_sym.size(); + std::vector axes = + paddle::dialect::details::GetVectorAttr(op, "axis"); + + symbol::DimExpr unique_dim_sym = + infer_context->GetNextSymName(); // unknown until runtime + + const std::vector &counts_dims = [&] { + std::vector out_dims; + out_dims.push_back(unique_dim_sym); + return out_dims; + }(); + + const std::vector &index_dims = counts_dims; + + const std::vector &out_dims = [&] { + if (axes.empty()) { + return counts_dims; + } + std::vector out_dims = x_dims_sym; + int axis = axes.at(0); + axis = axis >= 0 ? axis : axis + rank; + out_dims.at(axis) = unique_dim_sym; + return out_dims; + }(); + + const std::vector &inverse_dims = [&] { + std::vector inverse_dims; + if (axes.empty()) { + // flatten before unique + symbol::DimExpr product{1}; + for (const auto &x_dim : x_dims_sym) { + product = product * x_dim; + } + inverse_dims.push_back(product); + } else { + int axis = axes.at(0); + axis = axis >= 0 ? axis : axis + rank; + inverse_dims.push_back(x_dims_sym.at(axis)); + } + return inverse_dims; + }(); + + bool return_index = GetBoolAttr(op, "return_index"); + bool return_inverse = GetBoolAttr(op, "return_inverse"); + bool return_counts = GetBoolAttr(op, "return_counts"); + + symbol::ShapeOrDataDimExprs empty{symbol::TensorShapeOrDataDimExprs{}}; + infer_context->SetShapeOrDataForValue( + op->result(0), symbol::TensorShapeOrDataDimExprs{out_dims}); + infer_context->SetShapeOrDataForValue( + op->result(1), + return_index ? symbol::TensorShapeOrDataDimExprs{index_dims} : empty); + infer_context->SetShapeOrDataForValue( + op->result(2), + return_inverse ? symbol::TensorShapeOrDataDimExprs{inverse_dims} : empty); + infer_context->SetShapeOrDataForValue( + op->result(3), + return_counts ? symbol::TensorShapeOrDataDimExprs{counts_dims} : empty); + + return true; +} + +bool UniqueConsecutiveOpInferSymbolicShape( + pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) { + const auto &x_shape_or_data = + infer_context->GetShapeOrDataForValue(op->operand_source(0)); + PADDLE_ENFORCE_EQ( + x_shape_or_data.data().has_value(), + false, + common::errors::InvalidArgument( + "InferSymbolicShape of UniqueConsecutiveOp only support input with " + "value now.")); + const auto &x_dims_sym = x_shape_or_data.shape(); + const size_t rank = x_dims_sym.size(); + std::vector axes = + paddle::dialect::details::GetVectorAttr(op, "axis"); + + symbol::DimExpr unique_dim_sym = + infer_context->GetNextSymName(); // unknown until runtime + + const std::vector &counts_dims = [&] { + std::vector out_dims; + out_dims.push_back(unique_dim_sym); + return out_dims; + }(); + + const std::vector &out_dims = [&] { + if (axes.empty()) { + return counts_dims; + } + std::vector out_dims = x_dims_sym; + int axis = axes.at(0); + axis = axis >= 0 ? axis : axis + rank; + out_dims.at(axis) = unique_dim_sym; + return out_dims; + }(); + + const std::vector &inverse_dims = [&] { + std::vector inverse_dims; + if (axes.empty()) { + // flatten before unique + symbol::DimExpr product{1}; + for (const auto &x_dim : x_dims_sym) { + product = product * x_dim; + } + inverse_dims.push_back(product); + } else { + int axis = axes.at(0); + axis = axis >= 0 ? axis : axis + rank; + inverse_dims.push_back(x_dims_sym.at(axis)); + } + return inverse_dims; + }(); + + bool return_inverse = GetBoolAttr(op, "return_inverse"); + bool return_counts = GetBoolAttr(op, "return_counts"); + + symbol::ShapeOrDataDimExprs empty{symbol::TensorShapeOrDataDimExprs{}}; + infer_context->SetShapeOrDataForValue( + op->result(0), symbol::TensorShapeOrDataDimExprs{out_dims}); + infer_context->SetShapeOrDataForValue( + op->result(1), + return_inverse ? symbol::TensorShapeOrDataDimExprs{inverse_dims} : empty); + infer_context->SetShapeOrDataForValue( + op->result(2), + return_counts ? symbol::TensorShapeOrDataDimExprs{counts_dims} : empty); + + return true; +} + +bool UnsqueezeOpInferSymbolicShape( + pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) { + PADDLE_ENFORCE_EQ( + op->num_operands(), + 2, + common::errors::InvalidArgument( + "UnsqueezeOp InferSymbolicShape ONLY support num_operands() == 2 " + "now, but got %d operands", + op->num_operands())); + + auto x_shape_or_data = + infer_context->GetShapeOrDataForValue(op->operand_source(0)); + auto axes_shape_or_data = + infer_context->GetShapeOrDataForValue(op->operand_source(1)); + + std::vector x_sym_shape; + if (x_shape_or_data.data().has_value()) { + x_sym_shape = x_shape_or_data.data().value(); + } else { + x_sym_shape = x_shape_or_data.shape(); + } + int x_dims_size = x_sym_shape.size(); + + std::vector axes_sym; + if (axes_shape_or_data.data().has_value()) { + axes_sym = axes_shape_or_data.data().value(); + } else { + axes_sym = axes_shape_or_data.shape(); + } + int axes_sym_size = axes_sym.size(); + + // GetUnsqueezeShape + int output_rank = x_dims_size + axes_sym_size; + std::vector result_sym_dims(output_rank, 0); + + int cur_output_rank = x_dims_size; + for (auto axis_expr : axes_sym) { + PADDLE_ENFORCE_EQ( + axis_expr.Has(), + true, + common::errors::InvalidArgument( + "in UnsqueezeOpInferSymbolicShape, axes must be known int type, " + "but got: %s", + symbol::ToString(axis_expr))); + int axis = static_cast(axis_expr.Get()); + int cur = axis < 0 ? axis + cur_output_rank + 1 : axis; + + // Move old axis, and insert new axis + for (int i = cur_output_rank; i >= cur; --i) { + if (result_sym_dims.at(i) == 1) { + // Move axis + result_sym_dims.at(i + 1) = 1; + result_sym_dims.at(i) = 0; + } + } + result_sym_dims.at(cur) = 1; + // Add the output size. + cur_output_rank++; + } + + // Make output shape + for (int in_idx = 0, out_idx = 0; out_idx < output_rank; ++out_idx) { + if (result_sym_dims.at(out_idx) == 0) { + result_sym_dims.at(out_idx) = x_sym_shape.at(in_idx++); + } + } + + symbol::ShapeOrDataDimExprs shape_data{ + symbol::TensorShapeOrDataDimExprs(result_sym_dims)}; + + pir::Value res = op->result(0); + infer_context->SetShapeOrDataForValue(res, shape_data); + infer_context->SetShapeOrDataForValue( + op->result(1), CreateShapeOrDataForXShape(x_shape_or_data)); + + return true; +} +bool Unsqueeze_OpInferSymbolicShape( + pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) { + return UnsqueezeOpInferSymbolicShape(op, infer_context); +} + +} // namespace paddle::dialect diff --git a/paddle/fluid/pir/dialect/operator/interface/infer_symbolic_shape/.ipynb_checkpoints/unary_infer_sym-checkpoint.h b/paddle/fluid/pir/dialect/operator/interface/infer_symbolic_shape/.ipynb_checkpoints/unary_infer_sym-checkpoint.h new file mode 100644 index 0000000000000..4390c63f99ec4 --- /dev/null +++ b/paddle/fluid/pir/dialect/operator/interface/infer_symbolic_shape/.ipynb_checkpoints/unary_infer_sym-checkpoint.h @@ -0,0 +1,95 @@ +// Copyright (c) 2024 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include "paddle/pir/include/dialect/shape/utils/shape_analysis.h" + +namespace paddle::dialect { +OP_DECLARE_INFER_SYMBOLIC_SHAPE(All) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Amax) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Amin) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Any) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Argmax) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Argmin) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(AsComplex) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(AsReal) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Assign) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Assign_) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(BipartiteMatch) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Cast) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Cast_) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Cholesky) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(ClipByNorm) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(ClipByNormSr) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Cummax) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Cummin) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Cumprod) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Cumprod_) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Cumsum) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Cumsum_) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(ChannelShuffle) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(DiagEmbed) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Diagonal) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(DistributeFpnProposals) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Eigh) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Eigvalsh) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(FakeChannelWiseQuantizeAbsMax) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(FftC2c) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(FftC2r) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(FftR2c) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(FillDiagonal) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(FillDiagonal_) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Flatten) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Flatten_) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Fold) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(IdentityLoss) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(IdentityLoss_) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Kthvalue) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(LpPool2d) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Logcumsumexp) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Logsumexp) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Max) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Maxout) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Min) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(MeanAll) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Nonzero) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Numel) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Pad) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Pad3d) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Pool2d) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Prod) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(RepeatInterleave) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Reshape) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Reshape_) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Shape) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(ShapeSr) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Slice) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Split) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(SplitWithNum) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Squeeze) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Squeeze_) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Sum) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Tile) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Topk) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(TopkV1) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Transpose) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Transpose_) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Unbind) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Unique) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(UniqueConsecutive) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Unsqueeze) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Unsqueeze_) + +} // namespace paddle::dialect diff --git a/paddle/fluid/pir/dialect/operator/interface/infer_symbolic_shape/multiary_infer_sym.cc b/paddle/fluid/pir/dialect/operator/interface/infer_symbolic_shape/multiary_infer_sym.cc index fe36253d3f9d7..1c5d3ae07be03 100644 --- a/paddle/fluid/pir/dialect/operator/interface/infer_symbolic_shape/multiary_infer_sym.cc +++ b/paddle/fluid/pir/dialect/operator/interface/infer_symbolic_shape/multiary_infer_sym.cc @@ -1201,6 +1201,80 @@ bool Where_OpInferSymbolicShape(pir::Operation *op, return WhereOpInferSymbolicShape(op, infer_context); } +bool YoloLossOpInferSymbolicShape( + pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) { + const auto &dim_x = + infer_context->GetShapeOrDataForValue(op->operand_source(0)).shape(); + const auto &dim_gtbox = + infer_context->GetShapeOrDataForValue(op->operand_source(1)).shape(); + const auto &dim_gtlabel = + infer_context->GetShapeOrDataForValue(op->operand_source(2)).shape(); + std::vector anchors_mask = + paddle::dialect::details::GetVectorAttr(op, "anchors_mask"); + int mask_num = static_cast(anchors_mask.size()); + int class_num = op->attribute("class_num").data(); + + PADDLE_ENFORCE_EQ(dim_x.size(), + 4, + phi::errors::InvalidArgument( + "Input(X) should be a 4-D tensor. But received " + "X dimension size(%s)", + dim_x.size())); + PADDLE_ENFORCE_EQ( + dim_gtbox.size(), + 3, + phi::errors::InvalidArgument("Input(GTBox) should be a 3-D tensor, but " + "received gtbox dimension size(%s)", + dim_gtbox.size())); + PADDLE_ENFORCE_EQ( + dim_gtbox[2], + 4, + phi::errors::InvalidArgument("Input(GTBox) dim[2] should be 4", + "But receive dim[2](%s) != 5. ", + dim_gtbox[2])); + PADDLE_ENFORCE_EQ(dim_gtlabel.size(), + 2, + phi::errors::InvalidArgument( + "Input(GTLabel) should be a 2-D tensor," + "But received Input(GTLabel) dimension size(%s) != 2.", + dim_gtlabel.size())); + infer_context->AddEqualCstr(dim_x[2], dim_x[3]); + infer_context->AddEqualCstr(dim_x[1], mask_num * (5 + class_num)); + infer_context->AddEqualCstr(dim_gtlabel[0], dim_gtbox[0]); + infer_context->AddEqualCstr(dim_gtlabel[1], dim_gtbox[1]); + + const auto &dim_gtscore = + infer_context->GetShapeOrDataForValue(op->operand_source(3)).shape(); + PADDLE_ENFORCE_EQ( + dim_gtscore.size(), + 2, + phi::errors::InvalidArgument("Input(GTScore) should be a 2-D tensor" + "But received GTScore dimension(%s)", + dim_gtbox.size())); + infer_context->AddEqualCstr(dim_gtscore[0], dim_gtbox[0]); + infer_context->AddEqualCstr(dim_gtscore[1], dim_gtbox[1]); + + std::vector dim_out = {dim_x[0]}; + infer_context->SetShapeOrDataForValue( + op->result(0), + symbol::ShapeOrDataDimExprs{symbol::TensorShapeOrDataDimExprs(dim_out)}); + + std::vector dim_obj_mask = { + dim_x[0], mask_num, dim_x[2], dim_x[3]}; + infer_context->SetShapeOrDataForValue( + op->result(1), + symbol::ShapeOrDataDimExprs{ + symbol::TensorShapeOrDataDimExprs(dim_obj_mask)}); + + std::vector dim_gt_match_mask = {dim_gtbox[0], dim_gtbox[1]}; + infer_context->SetShapeOrDataForValue( + op->result(2), + symbol::ShapeOrDataDimExprs{ + symbol::TensorShapeOrDataDimExprs(dim_gt_match_mask)}); + + return true; +} + bool FakeChannelWiseDequantizeMaxAbsOpInferSymbolicShape( pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) { const auto &x_shape_or_data = diff --git a/paddle/fluid/pir/dialect/operator/interface/infer_symbolic_shape/multiary_infer_sym.h b/paddle/fluid/pir/dialect/operator/interface/infer_symbolic_shape/multiary_infer_sym.h index 6c21b3b69000a..2c347e4cd9ca4 100644 --- a/paddle/fluid/pir/dialect/operator/interface/infer_symbolic_shape/multiary_infer_sym.h +++ b/paddle/fluid/pir/dialect/operator/interface/infer_symbolic_shape/multiary_infer_sym.h @@ -48,6 +48,7 @@ OP_DECLARE_INFER_SYMBOLIC_SHAPE(Stack) OP_DECLARE_INFER_SYMBOLIC_SHAPE(TrilinearInterp) OP_DECLARE_INFER_SYMBOLIC_SHAPE(Where) OP_DECLARE_INFER_SYMBOLIC_SHAPE(Where_) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(YoloLoss) OP_DECLARE_INFER_SYMBOLIC_SHAPE(FakeChannelWiseDequantizeMaxAbs) } // namespace paddle::dialect diff --git a/paddle/fluid/pir/dialect/operator/interface/infer_symbolic_shape/same_operands_result.cc b/paddle/fluid/pir/dialect/operator/interface/infer_symbolic_shape/same_operands_result.cc index 3eb6b62bdc1fd..22d202775eb17 100644 --- a/paddle/fluid/pir/dialect/operator/interface/infer_symbolic_shape/same_operands_result.cc +++ b/paddle/fluid/pir/dialect/operator/interface/infer_symbolic_shape/same_operands_result.cc @@ -43,6 +43,7 @@ OP_SAME_OPERANDS_AND_RESULT(Atan) OP_SAME_OPERANDS_AND_RESULT(Atan_) OP_SAME_OPERANDS_AND_RESULT(Atanh) OP_SAME_OPERANDS_AND_RESULT(Atanh_) +OP_SAME_OPERANDS_AND_RESULT(AsStrided) OP_SAME_OPERANDS_AND_RESULT(Bernoulli) OP_SAME_OPERANDS_AND_RESULT(BitwiseNot) OP_SAME_OPERANDS_AND_RESULT(BitwiseNot_) diff --git a/paddle/fluid/pir/dialect/operator/interface/infer_symbolic_shape/same_operands_result.h b/paddle/fluid/pir/dialect/operator/interface/infer_symbolic_shape/same_operands_result.h index 2e84c7297643f..ed3565456c841 100644 --- a/paddle/fluid/pir/dialect/operator/interface/infer_symbolic_shape/same_operands_result.h +++ b/paddle/fluid/pir/dialect/operator/interface/infer_symbolic_shape/same_operands_result.h @@ -31,6 +31,7 @@ OP_DECLARE_INFER_SYMBOLIC_SHAPE(Asinh) OP_DECLARE_INFER_SYMBOLIC_SHAPE(Asinh_) OP_DECLARE_INFER_SYMBOLIC_SHAPE(Assign) OP_DECLARE_INFER_SYMBOLIC_SHAPE(Assign_) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(AsStrided) OP_DECLARE_INFER_SYMBOLIC_SHAPE(Atan) OP_DECLARE_INFER_SYMBOLIC_SHAPE(Atan_) OP_DECLARE_INFER_SYMBOLIC_SHAPE(Atanh) diff --git a/paddle/fluid/pir/dialect/operator/interface/infer_symbolic_shape/unary_infer_sym.cc b/paddle/fluid/pir/dialect/operator/interface/infer_symbolic_shape/unary_infer_sym.cc index 36aecf07fcc73..56df979e6cd44 100644 --- a/paddle/fluid/pir/dialect/operator/interface/infer_symbolic_shape/unary_infer_sym.cc +++ b/paddle/fluid/pir/dialect/operator/interface/infer_symbolic_shape/unary_infer_sym.cc @@ -886,6 +886,24 @@ bool Flatten_OpInferSymbolicShape( return FlattenOpInferSymbolicShape(op, infer_context); } +bool FoldOpInferSymbolicShape(pir::Operation *op, + pir::InferSymbolicShapeContext *infer_context) { + const auto &in_dims = + infer_context->GetShapeOrDataForValue(op->operand_source(0)).shape(); + + std::vector out_dims; + out_dims.push_back(in_dims[0]); + std::vector kernel_sizes = + paddle::dialect::details::GetVectorAttr(op, "kernel_sizes"); + out_dims.push_back(in_dims[1] / (kernel_sizes[0] * kernel_sizes[1])); + + infer_context->SetShapeOrDataForValue( + op->result(0), + symbol::ShapeOrDataDimExprs{symbol::TensorShapeOrDataDimExprs(out_dims)}); + + return true; +} + bool IdentityLossOpInferSymbolicShape( pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) { const auto &input_shape = diff --git a/paddle/fluid/pir/dialect/operator/interface/infer_symbolic_shape/unary_infer_sym.h b/paddle/fluid/pir/dialect/operator/interface/infer_symbolic_shape/unary_infer_sym.h index 8437418587353..4390c63f99ec4 100644 --- a/paddle/fluid/pir/dialect/operator/interface/infer_symbolic_shape/unary_infer_sym.h +++ b/paddle/fluid/pir/dialect/operator/interface/infer_symbolic_shape/unary_infer_sym.h @@ -53,6 +53,7 @@ OP_DECLARE_INFER_SYMBOLIC_SHAPE(FillDiagonal) OP_DECLARE_INFER_SYMBOLIC_SHAPE(FillDiagonal_) OP_DECLARE_INFER_SYMBOLIC_SHAPE(Flatten) OP_DECLARE_INFER_SYMBOLIC_SHAPE(Flatten_) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Fold) OP_DECLARE_INFER_SYMBOLIC_SHAPE(IdentityLoss) OP_DECLARE_INFER_SYMBOLIC_SHAPE(IdentityLoss_) OP_DECLARE_INFER_SYMBOLIC_SHAPE(Kthvalue) diff --git a/paddle/phi/ops/yaml/.ipynb_checkpoints/ops-checkpoint.yaml b/paddle/phi/ops/yaml/.ipynb_checkpoints/ops-checkpoint.yaml index d37c56117f3d5..0a6da0f668217 100755 --- a/paddle/phi/ops/yaml/.ipynb_checkpoints/ops-checkpoint.yaml +++ b/paddle/phi/ops/yaml/.ipynb_checkpoints/ops-checkpoint.yaml @@ -314,6 +314,7 @@ func : as_strided backward : as_strided_grad no_need_buffer : input + interfaces : paddle::dialect::InferSymbolicShapeInterface - op : asgd_ args : (Tensor param, Tensor grad, Tensor learning_rate, Tensor d, Tensor y, Tensor n, Tensor master_param, bool multi_precision=false) @@ -1931,6 +1932,7 @@ kernel: func: fold backward: fold_grad + interfaces : paddle::dialect::InferSymbolicShapeInterface - op : fractional_max_pool2d args : (Tensor x, int[] output_size, int[] kernel_size = {0, 0}, float random_u = 0.0, bool return_mask = true) @@ -4985,6 +4987,7 @@ optional : gt_score intermediate : objectness_mask, gt_match_mask backward : yolo_loss_grad + interfaces : paddle::dialect::InferSymbolicShapeInterface - op : zeros args : (IntArray shape, DataType dtype=DataType::FLOAT32, Place place=CPUPlace()) diff --git a/paddle/phi/ops/yaml/ops.yaml b/paddle/phi/ops/yaml/ops.yaml index d37c56117f3d5..0a6da0f668217 100755 --- a/paddle/phi/ops/yaml/ops.yaml +++ b/paddle/phi/ops/yaml/ops.yaml @@ -314,6 +314,7 @@ func : as_strided backward : as_strided_grad no_need_buffer : input + interfaces : paddle::dialect::InferSymbolicShapeInterface - op : asgd_ args : (Tensor param, Tensor grad, Tensor learning_rate, Tensor d, Tensor y, Tensor n, Tensor master_param, bool multi_precision=false) @@ -1931,6 +1932,7 @@ kernel: func: fold backward: fold_grad + interfaces : paddle::dialect::InferSymbolicShapeInterface - op : fractional_max_pool2d args : (Tensor x, int[] output_size, int[] kernel_size = {0, 0}, float random_u = 0.0, bool return_mask = true) @@ -4985,6 +4987,7 @@ optional : gt_score intermediate : objectness_mask, gt_match_mask backward : yolo_loss_grad + interfaces : paddle::dialect::InferSymbolicShapeInterface - op : zeros args : (IntArray shape, DataType dtype=DataType::FLOAT32, Place place=CPUPlace()) From 243febb6eb369d1bb128aad0f11932cb060d60c1 Mon Sep 17 00:00:00 2001 From: Fripping <124574028+Fripping@users.noreply.github.com> Date: Mon, 5 Aug 2024 15:49:17 +0800 Subject: [PATCH 3/8] Delete paddle/phi/ops/yaml/.ipynb_checkpoints directory --- .../.ipynb_checkpoints/ops-checkpoint.yaml | 5030 ----------------- 1 file changed, 5030 deletions(-) delete mode 100755 paddle/phi/ops/yaml/.ipynb_checkpoints/ops-checkpoint.yaml diff --git a/paddle/phi/ops/yaml/.ipynb_checkpoints/ops-checkpoint.yaml b/paddle/phi/ops/yaml/.ipynb_checkpoints/ops-checkpoint.yaml deleted file mode 100755 index 0a6da0f668217..0000000000000 --- a/paddle/phi/ops/yaml/.ipynb_checkpoints/ops-checkpoint.yaml +++ /dev/null @@ -1,5030 +0,0 @@ -# This file is designed for C++ operators, which manages the -# generated code for dynamic mode and static mode. If you want -# to add the new operator configuration, make sure an operator's -# Python API, dynamic graph API, and static graph Operator parameters -# are consistent and correspond one-to-one. It's forbidden that the -# operator configured in this yaml file does not have Python API. - -- op : abs - args : (Tensor x) - output : Tensor(out) - infer_meta : - func : RealAndImagInferMeta - kernel : - func : abs - data_type : x - inplace: (x -> out) - backward : abs_grad - interfaces : paddle::dialect::InferSymbolicShapeInterface - -- op : accuracy - args : (Tensor x, Tensor indices, Tensor label) - output : Tensor(accuracy), Tensor(correct), Tensor(total) - infer_meta : - func : AccuracyInferMeta - kernel : - func : accuracy - data_type : x - interfaces : paddle::dialect::InferSymbolicShapeInterface - traits : paddle::dialect::ForwardOnlyTrait - -- op : accuracy_check - args : (Tensor x, Tensor y, str fn_name, double rtol=1e-5, double atol=1e-8, bool equal_nan=false) - output : Tensor(out) - infer_meta : - func : ValueCompareInferMeta - param: [x, y] - kernel : - func : accuracy_check - data_type : x - interfaces : paddle::dialect::InferSymbolicShapeInterface - -- op : acos - args : (Tensor x) - output : Tensor(out) - infer_meta : - func : UnchangedInferMeta - kernel : - func : acos - inplace: (x -> out) - backward : acos_grad - interfaces : paddle::dialect::InferSymbolicShapeInterface - -- op : acosh - args : (Tensor x) - output : Tensor(out) - infer_meta : - func : UnchangedInferMeta - kernel : - func : acosh - inplace: (x -> out) - backward : acosh_grad - interfaces : paddle::dialect::InferSymbolicShapeInterface - -- op : adadelta_ - args : (Tensor param, Tensor grad, Tensor avg_squared_grad, Tensor avg_squared_update, - Tensor learning_rate, Tensor master_param, float rho = 0.95f, float epsilon = - 1.0e-6f, bool multi_precision = false) - output : Tensor(param_out), Tensor(moment_out), Tensor(inf_norm_out), Tensor(master_param_out) - infer_meta : - func : AdadeltaInferMeta - kernel : - func : adadelta - data_type : param - optional : master_param, master_param_out - inplace : (param -> param_out), (avg_squared_grad -> moment_out), (avg_squared_update -> inf_norm_out), (master_param -> master_param_out) - -- op : adagrad_ - args : (Tensor param, Tensor grad, Tensor moment, Tensor learning_rate, Tensor master_param, float epsilon = 1.0e-6f, bool multi_precision = false) - output : Tensor(param_out), Tensor(moment_out), Tensor(master_param_out) - infer_meta : - func : AdagradInferMeta - kernel : - func : adagrad {dense, dense, dense, dense, dense -> dense, dense, dense} - adagrad_dense_param_sparse_grad {dense, selected_rows, dense, dense, dense -> dense, dense, dense} - data_type : param - optional : master_param, master_param_out - inplace : (param -> param_out), (moment -> moment_out), (master_param -> master_param_out) - traits : pir::SideEffectTrait - -- op : adam_ - args : (Tensor param, Tensor grad, Tensor learning_rate, Tensor moment1, Tensor moment2, Tensor beta1_pow, Tensor beta2_pow, Tensor master_param, Tensor skip_update, Scalar beta1 = 0.9f, Scalar beta2 = 0.999f, Scalar epsilon = 1.0e-8f, bool lazy_mode = false, int64_t min_row_size_to_use_multithread = 1000, bool multi_precision = false, bool use_global_beta_pow = false) - output : Tensor(param_out), Tensor(moment1_out), Tensor(moment2_out), Tensor(beta1_pow_out), Tensor(beta2_pow_out), Tensor(master_param_out) - infer_meta : - func : AdamInferMeta - spmd_rule : AdamInferSpmdDynamic - kernel : - func : adam {dense, dense, dense, dense, dense, dense, dense, dense, dense -> dense, dense, dense, dense, dense, dense}, - adam_dense_param_sparse_grad {dense, selected_rows, dense, dense, dense, dense, dense, dense, dense -> dense, dense, dense, dense, dense, dense} - data_type : param - optional : master_param, skip_update, master_param_out - inplace : (param -> param_out), (moment1 -> moment1_out), (moment2 -> moment2_out), (beta1_pow -> beta1_pow_out), (beta2_pow -> beta2_pow_out), (master_param -> master_param_out) - traits : pir::SideEffectTrait - -- op : adamax_ - args : (Tensor param, Tensor grad, Tensor learning_rate, Tensor moment, Tensor inf_norm, Tensor beta1_pow, Tensor master_param, float beta1 = 0.9f, float beta2 = 0.999f, float epsilon = 1.0e-8f, bool multi_precision = false) - output : Tensor(param_out), Tensor(moment_out), Tensor(inf_norm_out), Tensor(master_param_out) - infer_meta : - func : AdamaxInferMeta - kernel : - func : adamax - data_type : param - optional : master_param, master_param_out - inplace : (param -> param_out), (moment -> moment_out), (inf_norm -> inf_norm_out), (master_param ->master_param_out) - traits : pir::SideEffectTrait - -- op : adamw_ - args : (Tensor param, Tensor grad, Tensor learning_rate, Tensor moment1, Tensor moment2, Tensor beta1_pow, Tensor beta2_pow, Tensor master_param, Tensor skip_update, Scalar beta1 = 0.9f, Scalar beta2 = 0.999f, Scalar epsilon = 1.0e-8f, float lr_ratio = 1.0f, float coeff = 0.01f, bool with_decay = false, bool lazy_mode = false, int64_t min_row_size_to_use_multithread = 1000, bool multi_precision = false, bool use_global_beta_pow = false) - output : Tensor(param_out), Tensor(moment1_out), Tensor(moment2_out), Tensor(beta1_pow_out), Tensor(beta2_pow_out), Tensor(master_param_out) - infer_meta : - func : AdamwInferMeta - spmd_rule : AdamwInferSpmdDynamic - kernel : - func : adamw - data_type : param - optional : master_param, skip_update, master_param_out - inplace : (param -> param_out), (moment1 -> moment1_out), (moment2 -> moment2_out), (beta1_pow -> beta1_pow_out), (beta2_pow -> beta2_pow_out), (master_param -> master_param_out) - traits : pir::SideEffectTrait - -- op : add_position_encoding - args: (Tensor x, float alpha = 1.0f, float beta = 1.0f) - output: Tensor (out) - infer_meta: - func: AddPositionEncodingInferMeta - kernel: - func: add_position_encoding - data_type: x - backward: add_position_encoding_grad - -- op : addmm - args : (Tensor input, Tensor x, Tensor y, float beta=1.0, float alpha=1.0) - output : Tensor(out) - infer_meta : - func : AddmmInferMeta - kernel : - func : addmm - data_type : x - inplace: (input -> out) - backward : addmm_grad - interfaces : paddle::dialect::InferSymbolicShapeInterface - -- op : affine_channel - args: (Tensor x, Tensor scale, Tensor bias, str data_layout = "AnyLayout") - output: Tensor (out) - infer_meta: - func: AffineChannelInferMeta - kernel: - func: affine_channel - backward: affine_channel_grad - inplace : (x -> out) - -- op : affine_grid - args : (Tensor input, IntArray output_shape={}, bool align_corners=true) - output : Tensor(output) - infer_meta : - func : AffineGridInferMeta - param : [input, output_shape, align_corners] - kernel : - func : affine_grid - param : [input, output_shape, align_corners] - data_type : input - backward : affine_grid_grad - -- op : all - args : (Tensor x, int64_t[] axis={}, bool keepdim=false) - output : Tensor(out) - infer_meta : - func : ReduceInferMeta - spmd_rule : ReductionAllInferSpmdDynamic - kernel : - func : all - traits : paddle::dialect::ForwardOnlyTrait - interfaces : paddle::dialect::InferSymbolicShapeInterface - -- op : all_gather - args : (Tensor x, int ring_id = 0, int nranks=0) - output : Tensor(out) - infer_meta : - func : AllGatherInferMeta - param: [x, nranks] - kernel : - func : all_gather - param: [x, nranks] - -- op : allclose - args : (Tensor x, Tensor y, Scalar(double) rtol=1e-5, Scalar(double) atol=1e-8, bool equal_nan=false) - output : Tensor(out) - infer_meta : - func : AllValueCompareInferMeta - param: [x, y] - kernel : - func : allclose - data_type : x - interfaces : paddle::dialect::InferSymbolicShapeInterface - -- op : amax - args : (Tensor x, int64_t[] axis={}, bool keepdim=false) - output : Tensor(out) - infer_meta : - func : ReduceInferMeta - kernel : - func : amax - backward : amax_grad - interfaces : paddle::dialect::InferSymbolicShapeInterface - -- op : amin - args : (Tensor x, int64_t[] axis={}, bool keepdim=false) - output : Tensor(out) - infer_meta : - func : ReduceInferMeta - kernel : - func : amin - backward : amin_grad - interfaces : paddle::dialect::InferSymbolicShapeInterface - -- op : angle - args : (Tensor x) - output : Tensor - infer_meta : - func : RealAndImagInferMeta - kernel : - func : angle - backward : angle_grad - interfaces : paddle::dialect::InferSymbolicShapeInterface - -- op : any - args : (Tensor x, int64_t[] axis={}, bool keepdim=false) - output : Tensor(out) - infer_meta : - func : ReduceInferMeta - kernel : - func : any - traits : paddle::dialect::ForwardOnlyTrait - interfaces : paddle::dialect::InferSymbolicShapeInterface - -- op : apply_per_channel_scale - args: (Tensor x, Tensor scales) - output: Tensor(out) - infer_meta : - func : ApplyPerChannelScaleInferMeta - kernel : - func : apply_per_channel_scale - data_type : x - -- op : argmax - args : (Tensor x, Scalar(int64_t) axis, bool keepdims = false, bool flatten = false, DataType dtype = DataType::INT64) - output : Tensor(out) - infer_meta : - func : ArgMinMaxInferMeta - spmd_rule : ArgMaxInferSpmdDynamic - kernel : - func : argmax - data_type : x - interfaces : paddle::dialect::InferSymbolicShapeInterface - traits : paddle::dialect::ForwardOnlyTrait - -- op : argmin - args : (Tensor x, Scalar(int64_t) axis, bool keepdims = false, bool flatten = false, DataType dtype = DataType::INT64) - output : Tensor(out) - infer_meta : - func : ArgMinMaxInferMeta - kernel : - func : argmin - data_type : x - interfaces : paddle::dialect::InferSymbolicShapeInterface - traits : paddle::dialect::ForwardOnlyTrait - -- op : argsort - args : (Tensor x, int axis=-1, bool descending=false, bool stable=false) - output : Tensor(out), Tensor(indices) - infer_meta : - func : ArgsortInferMeta - kernel : - func : argsort - backward : argsort_grad - interfaces : paddle::dialect::InferSymbolicShapeInterface - -- op : as_complex - args : (Tensor x) - output : Tensor - infer_meta : - func : AsComplexInferMeta - kernel : - func : as_complex - backward : as_complex_grad - interfaces : paddle::dialect::InferSymbolicShapeInterface - -- op : as_real - args : (Tensor x) - output : Tensor - infer_meta : - func : AsRealInferMeta - kernel : - func : as_real - backward : as_real_grad - interfaces : paddle::dialect::InferSymbolicShapeInterface - -- op : as_strided - args : (Tensor input, int64_t[] dims = {}, int64_t[] stride = {}, int64_t offset = 0) - output : Tensor - infer_meta : - func : StridedUnChangedInferMeta - param : [input] - kernel : - func : as_strided - backward : as_strided_grad - no_need_buffer : input - interfaces : paddle::dialect::InferSymbolicShapeInterface - -- op : asgd_ - args : (Tensor param, Tensor grad, Tensor learning_rate, Tensor d, Tensor y, Tensor n, Tensor master_param, bool multi_precision=false) - output : Tensor(param_out), Tensor(d_out), Tensor(y_out), Tensor(master_param_out) - infer_meta : - func : ASGDInferMeta - kernel : - func : asgd - data_type : param - data_transform : - support_trans_dtype : learning_rate, n - optional : master_param, master_param_out - inplace : (param -> param_out), (d -> d_out), (y -> y_out), (master_param -> master_param_out) - traits : pir::SideEffectTrait - -- op : asin - args : (Tensor x) - output : Tensor(out) - infer_meta : - func : UnchangedInferMeta - kernel : - func : asin - inplace: (x -> out) - backward : asin_grad - interfaces : paddle::dialect::InferSymbolicShapeInterface - -- op : asinh - args : (Tensor x) - output : Tensor(out) - infer_meta : - func : UnchangedInferMeta - kernel : - func : asinh - inplace: (x -> out) - backward : asinh_grad - interfaces : paddle::dialect::InferSymbolicShapeInterface - -- op : assign_out_ - args : (Tensor x, Tensor output) - output : Tensor(out) - infer_meta : - func : UnchangedInferMeta - param : [x] - kernel : - func : assign - param : [x] - inplace : (output -> out) - backward : assign_out__grad - traits : pir::SideEffectTrait - -- op : assign_pos - args : (Tensor x, Tensor cum_count, Tensor eff_num_len) - output : Tensor(out) - infer_meta : - func : AssignPosInferMeta - kernel : - func : assign_pos - -- op : assign_value_ - args : (Tensor output, int[] shape, DataType dtype, Scalar[] values, Place place = {}) - output : Tensor(out) - inplace: (output -> out) - infer_meta : - func : AssignValueInferMeta - param : [shape, dtype] - kernel : - func : assign_value - param : [shape, dtype, values] - data_type : dtype - backend : place > output - interfaces : paddle::dialect::InferSymbolicShapeInterface - traits : paddle::dialect::ForwardOnlyTrait - -- op : atan - args : (Tensor x) - output : Tensor(out) - infer_meta : - func : UnchangedInferMeta - kernel : - func : atan - inplace: (x -> out) - backward : atan_grad - interfaces : paddle::dialect::InferSymbolicShapeInterface - -- op : atan2 - args : (Tensor x, Tensor y) - output : Tensor(out) - infer_meta : - func : Atan2InferMeta - kernel : - func : atan2 - backward : atan2_grad - interfaces : paddle::dialect::InferSymbolicShapeInterface - -- op : atanh - args : (Tensor x) - output : Tensor(out) - infer_meta : - func : UnchangedInferMeta - kernel : - func : atanh - inplace: (x -> out) - backward : atanh_grad - interfaces : paddle::dialect::InferSymbolicShapeInterface - -- op : attention_lstm - args: (Tensor x, Tensor c0, Tensor h0, Tensor attention_weight, Tensor attention_bias, - Tensor attention_scalar, Tensor attention_scalar_bias, Tensor lstm_weight, - Tensor lstm_bias, str gate_activation = "sigmoid", str cell_activation = "tanh", - str candidate_activation = "tanh") - output: Tensor (hidden), Tensor (cell), Tensor (attentioned_x), Tensor (attention_fc_out), - Tensor (lstm_x), Tensor (lstm_out) - infer_meta: - func: AttentionLstmInferMeta - kernel: - func: attention_lstm - data_type: x - optional: h0, attention_bias, attention_scalar, attention_scalar_bias - intermediate: attentioned_x, attention_fc_out, lstm_x, lstm_out - -- op : auc - args : (Tensor x, Tensor label, Tensor stat_pos, Tensor stat_neg, Tensor ins_tag_weight, str curve = "ROC", int num_thresholds = (2 << 12) - 1, int slide_steps = 1) - output : Tensor(auc), Tensor(stat_pos_out), Tensor(stat_neg_out) - infer_meta : - func : AucInferMeta - kernel : - func : auc - data_type : x - optional : ins_tag_weight - interfaces : paddle::dialect::InferSymbolicShapeInterface - -- op : average_accumulates_ - args : (Tensor param, Tensor in_sum_1, Tensor in_sum_2, Tensor in_sum_3, Tensor in_num_accumulates, Tensor in_old_num_accumulates, Tensor in_num_updates, float average_window = 0, int64_t max_average_window = INT64_MAX, int64_t min_average_window = 10000L) - output : Tensor(out_sum_1), Tensor(out_sum_2), Tensor(out_sum_3), Tensor(out_num_accumulates), Tensor(out_old_num_accumulates), Tensor(out_num_updates) - infer_meta: - func : AverageAccumulatesInferMeta - kernel : - func : average_accumulates {dense, dense, dense, dense, dense ,dense, dense -> dense, dense, dense, dense, dense, dense} - data_type : param - inplace : (in_sum_1 -> out_sum_1), (in_sum_2 -> out_sum_2), (in_sum_3 -> out_sum_3), (in_num_accumulates -> out_num_accumulates), (in_old_num_accumulates -> out_old_num_accumulates), (in_num_updates -> out_num_updates) - -- op : batch_fc - args : (Tensor input, Tensor w, Tensor bias) - output : Tensor(out) - infer_meta: - func : BatchFCInferMeta - kernel : - func : batch_fc - data_type: input - backward: batch_fc_grad - -- op : bce_loss - args : (Tensor input, Tensor label) - output : Tensor - infer_meta : - func : BCELossInferMeta - kernel : - func : bce_loss - data_type : input - inplace : (input -> out) - backward : bce_loss_grad - interfaces : paddle::dialect::InferSymbolicShapeInterface - -- op : beam_search - args: (Tensor pre_ids, Tensor pre_scores, Tensor ids, Tensor scores, int level, - int beam_size, int end_id, bool is_accumulated = true) - output: Tensor (selected_ids), Tensor (selected_scores), Tensor (parent_idx) - infer_meta: - func: BeamSearchInferMeta - kernel: - func: beam_search - data_type: pre_ids - optional: ids, parent_idx - -- op : bernoulli - args : (Tensor x) - output : Tensor(out) - infer_meta : - func : UnchangedInferMeta - kernel : - func : bernoulli - interfaces : paddle::dialect::InferSymbolicShapeInterface - traits : paddle::dialect::ForwardOnlyTrait - -- op : bicubic_interp - args : (Tensor x, Tensor out_size, Tensor[] size_tensor, Tensor scale_tensor, str data_format="NCHW", int out_d=0, int out_h=0, int out_w=0, float[] scale={}, str interp_method="bilinear", bool align_corners=true, int align_mode=1) - output : Tensor(output) - infer_meta : - func : InterpolateInferMeta - optional: out_size, size_tensor, scale_tensor - kernel : - func : bicubic_interp - data_type : x - backward : bicubic_interp_grad - data_transform : - skip_transform : out_size, size_tensor, scale_tensor - interfaces : paddle::dialect::InferSymbolicShapeInterface - -- op : bilinear - args : (Tensor x, Tensor y, Tensor weight, Tensor bias) - output : Tensor - infer_meta : - func : BilinearInferMeta - kernel : - func : bilinear - optional : bias - backward : bilinear_grad - interfaces : paddle::dialect::InferSymbolicShapeInterface - -- op : bilinear_interp - args : (Tensor x, Tensor out_size, Tensor[] size_tensor, Tensor scale_tensor, str data_format="NCHW", int out_d=0, int out_h=0, int out_w=0, float[] scale={}, str interp_method="bilinear", bool align_corners=true, int align_mode=1) - output : Tensor(output) - infer_meta : - func : InterpolateInferMeta - optional: out_size, size_tensor, scale_tensor - kernel : - func : bilinear_interp - data_type : x - backward : bilinear_interp_grad - data_transform : - skip_transform : out_size, size_tensor, scale_tensor - interfaces : paddle::dialect::InferSymbolicShapeInterface - -- op : bincount - args: (Tensor x, Tensor weights, Scalar(int) minlength = 0) - output: Tensor(out) - infer_meta: - func: BincountInferMeta - kernel: - func: bincount - optional: weights - -- op : binomial - args : (Tensor count, Tensor prob) - output : Tensor(out) - infer_meta : - func : BinomialInferMeta - kernel : - func : binomial - -- op : bipartite_match - args: (Tensor dist_mat, str match_type = "bipartite", float dist_threshold = 0.5) - output: Tensor (col_to_row_match_indices), Tensor (col_to_row_match_dist) - infer_meta: - func: BipartiteMatchInferMeta - kernel: - func: bipartite_match - data_type: dist_mat - interfaces : paddle::dialect::InferSymbolicShapeInterface - -- op : bitwise_and - args : (Tensor x, Tensor y) - output : Tensor(out) - infer_meta : - func : ElementwiseInferMeta - spmd_rule : ElementwiseBinaryInferSpmd - kernel : - func : bitwise_and - backend : x - inplace: (x -> out) - interfaces : paddle::dialect::InferSymbolicShapeInterface - traits : paddle::dialect::ForwardOnlyTrait - -- op : bitwise_left_shift - args : (Tensor x, Tensor y, bool is_arithmetic = true) - output : Tensor(out) - infer_meta : - func : BitwiseShiftInferMeta - kernel : - func : bitwise_left_shift - backend : x - inplace: (x -> out) - traits : paddle::dialect::ForwardOnlyTrait - -- op : bitwise_not - args : (Tensor x) - output : Tensor(out) - infer_meta : - func : UnchangedInferMeta - spmd_rule : ElementwiseUnaryInferSpmd - kernel : - func : bitwise_not - backend : x - inplace: (x -> out) - interfaces : paddle::dialect::InferSymbolicShapeInterface - traits : paddle::dialect::ForwardOnlyTrait - -- op : bitwise_or - args : (Tensor x, Tensor y) - output : Tensor(out) - infer_meta : - func : ElementwiseInferMeta - kernel : - func : bitwise_or - backend : x - inplace: (x -> out) - traits : paddle::dialect::ForwardOnlyTrait - -- op : bitwise_right_shift - args : (Tensor x, Tensor y, bool is_arithmetic = true) - output : Tensor(out) - infer_meta : - func : BitwiseShiftInferMeta - kernel : - func : bitwise_right_shift - backend : x - inplace: (x -> out) - traits : paddle::dialect::ForwardOnlyTrait - -- op : bitwise_xor - args : (Tensor x, Tensor y) - output : Tensor(out) - infer_meta : - func : ElementwiseInferMeta - kernel : - func : bitwise_xor - backend : x - inplace: (x -> out) - interfaces : paddle::dialect::InferSymbolicShapeInterface - traits : paddle::dialect::ForwardOnlyTrait - -- op : bmm - args : (Tensor x, Tensor y) - output : Tensor - infer_meta : - func : BmmInferMeta - kernel : - func : bmm - backward : bmm_grad - -- op : box_clip - args: (Tensor input, Tensor im_info) - output: Tensor (output) - infer_meta: - func: BoxClipInferMeta - kernel: - func: box_clip - -- op : box_coder - args : (Tensor prior_box, Tensor prior_box_var, Tensor target_box, str code_type = "encode_center_size", bool box_normalized = true, int axis = 0, float[] variance = {}) - output : Tensor(output_box) - infer_meta : - func : BoxCoderInferMeta - kernel : - func : box_coder - optional : prior_box_var - -- op : broadcast_tensors - args: (Tensor[] input) - output: Tensor[]{input.size()} - infer_meta: - func: BroadcastTensorsInferMeta - kernel: - func: broadcast_tensors - data_type : input - backward: broadcast_tensors_grad - -- op : c_allgather - args : (Tensor x, int ring_id, int nranks, bool use_calc_stream) - output : Tensor(out) - infer_meta : - func : AllGatherInferMeta - param: [x, nranks] - kernel : - func : c_allgather - -- op : c_allreduce_max - args : (Tensor x, int ring_id, bool use_calc_stream, bool use_model_parallel) - output : Tensor(out) - infer_meta : - func : AllReduceInferMeta - param : [x] - kernel : - func : c_allreduce_max - inplace : (x -> out) - -- op : c_allreduce_min - args : (Tensor x, int ring_id, bool use_calc_stream, bool use_model_parallel) - output : Tensor(out) - infer_meta : - func : AllReduceInferMeta - param : [x] - kernel : - func : c_allreduce_min - inplace : (x -> out) - -- op : c_allreduce_prod - args : (Tensor x, int ring_id, bool use_calc_stream, bool use_model_parallel) - output : Tensor(out) - infer_meta : - func : AllReduceInferMeta - param : [x] - kernel : - func : c_allreduce_prod - inplace : (x -> out) - -- op : c_allreduce_sum - args : (Tensor x, int ring_id, bool use_calc_stream, bool use_model_parallel) - output : Tensor(out) - infer_meta : - func : AllReduceInferMeta - param : [x] - kernel : - func : c_allreduce_sum - inplace : (x -> out) - -- op : c_broadcast - args : (Tensor x, int ring_id=0, int root=0, bool use_calc_stream=false) - output : Tensor(out) - infer_meta : - func : UnchangedInferMeta - param : [x] - kernel : - func : c_broadcast - inplace : (x -> out) - -- op : c_concat - args : (Tensor x, int rank, int nranks, int ring_id, bool use_calc_stream, bool use_model_parallel) - output : Tensor(out) - infer_meta : - func : CConcatInferMeta - param : [x, nranks] - kernel : - func : c_concat - -- op : c_identity - args : (Tensor x, int ring_id, bool use_calc_stream, bool use_model_parallel) - output : Tensor(out) - infer_meta : - func : CIdentityInferMeta - kernel : - func : c_identity - inplace : (x -> out) - -- op : c_reduce_sum - args : (Tensor x, int ring_id, int root_id, bool use_calc_stream) - output : Tensor(out) - infer_meta : - func : DistReduceInferMeta - param : [x] - kernel : - func : c_reduce_sum - inplace : (x -> out) - -- op : c_sync_calc_stream - args : (Tensor x) - output : Tensor(out) - infer_meta : - func : UnchangedInferMeta - param : [x] - kernel : - func : c_sync_calc_stream - inplace : (x -> out) - -- op : c_sync_comm_stream - args : (Tensor x, int ring_id) - output : Tensor(out) - infer_meta : - func : UnchangedInferMeta - param : [x] - kernel : - func : c_sync_comm_stream - inplace : (x -> out) - -- op : calc_reduced_attn_scores - args : (Tensor q, Tensor k, Tensor softmax_lse) - output : Tensor(reduced_scores) - infer_meta : - func : CalcReducedAttnScoresInferMeta - param : [q, k, softmax_lse] - kernel : - func : calc_reduced_attn_scores - data_type : q - -- op : cast - args : (Tensor x, DataType dtype) - output : Tensor(out) - infer_meta : - func : CastInferMeta - spmd_rule : CastInferSpmd - kernel : - func : cast - param : [x, dtype] - data_type : x - inplace: (x -> out) - backward : cast_grad - interfaces : paddle::dialect::InferSymbolicShapeInterface - -- op : ceil - args : (Tensor x) - output : Tensor(out) - infer_meta : - func : UnchangedInferMeta - kernel : - func : ceil - inplace : (x -> out) - backward : ceil_grad - interfaces : paddle::dialect::InferSymbolicShapeInterface - -- op : celu - args : (Tensor x, float alpha = 1.0) - output : Tensor(out) - infer_meta : - func : UnchangedInferMeta - param: [x] - kernel : - func : celu - backward : celu_grad - interfaces : paddle::dialect::InferSymbolicShapeInterface - -- op : channel_shuffle - args : (Tensor x, int groups, str data_format="NCHW") - output : Tensor(out) - infer_meta : - func : ChannelShuffleInferMeta - kernel : - func : channel_shuffle - backward : channel_shuffle_grad - interfaces : paddle::dialect::InferSymbolicShapeInterface - -- op : check_finite_and_unscale_ - args : (Tensor[] x, Tensor scale) - output : Tensor[](out){x.size()}, Tensor(found_infinite) - infer_meta : - func : CheckFiniteAndUnscaleInferMeta - param : [x, scale] - spmd_rule : CheckFiniteAndUnscaleSpmd - kernel : - func : check_finite_and_unscale - param : [x, scale] - data_type : x - inplace : (x -> out) - -- op : check_numerics - args : (Tensor tensor, str op_type = "", str var_name = "", int check_nan_inf_level = 0, int stack_height_limit = -1, str output_dir = "") - output : Tensor(stats), Tensor(values) - infer_meta : - func : CheckNumericsInferMeta - kernel : - func : check_numerics - -- op : cholesky - args : (Tensor x, bool upper=false) - output : Tensor - infer_meta : - func : CholeskyInferMeta - kernel : - func : cholesky - backward : cholesky_grad - interfaces : paddle::dialect::InferSymbolicShapeInterface - -- op : cholesky_solve - args : (Tensor x, Tensor y, bool upper=false) - output : Tensor - infer_meta : - func : CholeskySolveInferMeta - kernel : - func : cholesky_solve - backward : cholesky_solve_grad - -- op : class_center_sample - args : (Tensor label, int num_classes, int num_samples, int ring_id = 0, int rank = 0, int nranks = 1, bool fix_seed = false, int seed = 0) - output : Tensor(remapped_label), Tensor(sampled_local_class_center) - infer_meta : - func : ClassCenterSampleInferMeta - kernel : - func : class_center_sample - data_type : label - traits : pir::SideEffectTrait - -- op : clip - args : (Tensor x, Scalar(float) min, Scalar(float) max) - output : Tensor(out) - inplace : (x -> out) - infer_meta : - func : UnchangedInferMeta - param : [x] - kernel : - func : clip - data_type : x - backward : clip_grad - interfaces : paddle::dialect::InferSymbolicShapeInterface - -- op : clip_by_norm - args : (Tensor x, float max_norm) - output : Tensor(out) - infer_meta : - func : ClipByNormInferMeta - kernel : - func : clip_by_norm {dense -> dense} - clip_by_norm_sr {selected_rows -> selected_rows} - interfaces : paddle::dialect::InferSymbolicShapeInterface - -- op : coalesce_tensor - args : (Tensor[] input, DataType dtype, bool copy_data = false, bool set_constant = false, bool persist_output = false, float constant = 0.0, bool use_align = true, int align_size = -1, int size_of_dtype = -1, int64_t[] concated_shapes = {}, int64_t[] concated_ranks = {}) - output : Tensor[](output){input.size()}, Tensor(fused_output) - infer_meta : - func : CoalesceTensorInferMeta - kernel : - func : coalesce_tensor - data_type : dtype - -- op : collect_fpn_proposals - args: (Tensor[] multi_level_rois, Tensor[] multi_level_scores, Tensor[] multi_level_rois_num, - int post_nms_topn) - output: Tensor (fpn_rois), Tensor (rois_num) - infer_meta: - func: CollectFpnProposalsInferMeta - kernel: - func: collect_fpn_proposals - data_type: multi_level_rois - optional: multi_level_rois_num, rois_num - -- op : complex - args : (Tensor real, Tensor imag) - output : Tensor - infer_meta : - func : ComplexInferMeta - kernel : - func : complex - data_type : real - backward : complex_grad - interfaces : paddle::dialect::InferSymbolicShapeInterface - -- op : concat - args : (Tensor[] x, Scalar axis=0) - output : Tensor - infer_meta : - func : ConcatInferMeta - param : [x, axis] - spmd_rule : ConcatInferSpmdDynamic - kernel : - func : concat - data_type : x - backward : concat_grad - interfaces : paddle::dialect::InferSymbolicShapeInterface, paddle::dialect::LayoutTransformationInterface - -- op : conj - args : (Tensor x) - output : Tensor (out) - infer_meta : - func : UnchangedInferMeta - kernel : - func : conj - backward : conj_grad - interfaces : paddle::dialect::InferSymbolicShapeInterface - -- op : conv2d - args : (Tensor input, Tensor filter, int[] strides={1, 1}, int[] paddings={0, 0}, str padding_algorithm="EXPLICIT", int[] dilations={1, 1}, int groups=1, str data_format="NCHW") - output : Tensor - infer_meta : - func : ConvInferMeta - kernel : - func : conv2d - data_type : input - backward : conv2d_grad - interfaces : paddle::dialect::InferSymbolicShapeInterface, paddle::dialect::LayoutTransformationInterface - -- op : conv2d_transpose - args : (Tensor x, Tensor filter, int[] strides={1, 1}, int[] paddings={0, 0}, int[] output_padding={}, IntArray output_size={}, str padding_algorithm="EXPLICIT", int groups=1, int[] dilations={1, 1}, str data_format="NCHW") - output : Tensor(out) - infer_meta : - func : Conv2dTransposeInferMeta - kernel : - func : conv2d_transpose - data_type : x - backward : conv2d_transpose_grad - -- op : conv2d_transpose_bias - args : (Tensor x, Tensor filter, Tensor bias, int[] strides={1, 1}, int[] paddings={0, 0}, int[] output_padding={}, IntArray output_size={}, str padding_algorithm="EXPLICIT", int groups=1, int[] dilations={1, 1}, str data_format="NCHW") - output : Tensor(out) - infer_meta : - func : Conv2dTransposeInferMeta - param: [x, filter, strides, paddings, output_padding, output_size, padding_algorithm, groups, dilations, data_format] - kernel : - func : conv2d_transpose_bias - data_type : x - -- op : conv3d - args : (Tensor input, Tensor filter, int[] strides={1, 1, 1}, int[] paddings={0, 0, 0}, str padding_algorithm="EXPLICIT", int groups=1, int[] dilations={1, 1, 1}, str data_format="NCDHW") - output : Tensor - infer_meta : - func : Conv3DInferMeta - kernel : - func : conv3d - data_type : input - backward : conv3d_grad - interfaces : paddle::dialect::InferSymbolicShapeInterface - -- op : conv3d_transpose - args : (Tensor x, Tensor filter, int[] strides={1, 1, 1}, int[] paddings={0, 0, 0}, int[] output_padding={}, int[] output_size={}, str padding_algorithm="EXPLICIT", int groups=1, int[] dilations={1, 1, 1}, str data_format="NCHW") - output : Tensor(out) - infer_meta : - func : ConvTransposeInferMeta - kernel : - func : conv3d_transpose - data_type : x - backward : conv3d_transpose_grad - -- op : copy_to - args : (Tensor x, Place place, bool blocking) - output : Tensor(out) - invoke : copy_to_impl(x, place, blocking) - interfaces : paddle::dialect::InferSymbolicShapeInterface - -- op : copysign - args : (Tensor x, Tensor y) - output : Tensor(out) - infer_meta : - func : ElementwiseInferMeta - kernel : - func : copysign - inplace: (x -> out) - backward : copysign_grad - interfaces : paddle::dialect::InferSymbolicShapeInterface - -- op : correlation - args : (Tensor input1, Tensor input2, int pad_size, int kernel_size, int max_displacement, int stride1, int stride2, int corr_type_multiply=1) - output : Tensor(out) - infer_meta : - func : CorrelationInferMeta - kernel : - func : correlation - data_type : input1 - backward : correlation_grad - -- op : cos - args : (Tensor x) - output : Tensor(out) - infer_meta : - func : UnchangedInferMeta - spmd_rule : ElementwiseUnaryInferSpmd - kernel : - func : cos - inplace: (x -> out) - backward : cos_grad - interfaces : paddle::dialect::InferSymbolicShapeInterface - -- op : cosh - args : (Tensor x) - output : Tensor(out) - infer_meta : - func : UnchangedInferMeta - kernel : - func : cosh - inplace: (x -> out) - backward : cosh_grad - interfaces : paddle::dialect::InferSymbolicShapeInterface - -- op : crf_decoding - args: (Tensor emission, Tensor transition, Tensor label, Tensor length) - output: Tensor (viterbi_path) - infer_meta: - func: CrfDecodingInferMeta - kernel: - func: crf_decoding - data_type: emission - optional: label, length - -- op : crop - args : (Tensor x, IntArray shape = {}, IntArray offsets = {}) - output : Tensor(out) - infer_meta : - func : CropInferMeta - kernel : - func : crop - data_type : x - backward : crop_grad - -- op : cross - args : (Tensor x, Tensor y, int axis = 9) - output : Tensor - infer_meta : - func : CrossInferMeta - kernel : - func : cross - data_type : x - backward : cross_grad - interfaces : paddle::dialect::InferSymbolicShapeInterface - -# Part of python API paddle.nn.functional.cross_entropy -- op : cross_entropy_with_softmax - args : (Tensor input, Tensor label, bool soft_label=false, bool use_softmax=true, bool numeric_stable_mode=true, int ignore_index=-100, int axis=-1) - output : Tensor(softmax), Tensor(loss) - inplace : (input -> softmax) - infer_meta : - func : CrossEntropyWithSoftmaxInferMeta - spmd_rule: CrossEntropyWithSoftmaxInferSpmd - kernel : - func : cross_entropy_with_softmax - data_type : input - backward : cross_entropy_with_softmax_grad - interfaces : paddle::dialect::InferSymbolicShapeInterface - -- op : ctc_align - args: (Tensor input, Tensor input_length, int blank = 0, bool merge_repeated = true, - int padding_value = 0) - output: Tensor (output), Tensor (output_length) - infer_meta: - func: CtcAlignInferMeta - kernel: - func: ctc_align - data_type: input - optional: input_length, output_length - interfaces : paddle::dialect::InferSymbolicShapeInterface - -- op : cudnn_lstm - args: (Tensor x, Tensor init_h, Tensor init_c, Tensor w, Tensor[] weight_list, Tensor sequence_length, float dropout_prob = 0.0, bool is_bidirec = false, int hidden_size = 100, int num_layers = 1, bool is_test = false, int seed = 0) - output: Tensor (out), Tensor (last_h), Tensor (last_c), Tensor (reserve), Tensor (state_out) - infer_meta: - func: CudnnLSTMInferMeta - kernel: - func: cudnn_lstm - data_type: x - optional: w, weight_list, sequence_length - intermediate: reserve - backward: cudnn_lstm_grad - -- op : cummax - args : (Tensor x, int axis=-1, DataType dtype = DataType::INT64) - output : Tensor(out), Tensor(indices) - infer_meta : - func : CumWithIndicesInferMeta - kernel : - func : cummax - data_type : x - backward : cummax_grad - interfaces : paddle::dialect::InferSymbolicShapeInterface - -- op : cummin - args : (Tensor x, int axis=-1, DataType dtype = DataType::INT64) - output : Tensor(out), Tensor(indices) - infer_meta : - func : CumWithIndicesInferMeta - kernel : - func : cummin - data_type : x - backward : cummin_grad - interfaces : paddle::dialect::InferSymbolicShapeInterface - -- op : cumprod - args : (Tensor x, int dim, bool exclusive=false, bool reverse=false) - output : Tensor(out) - infer_meta : - func : UnchangedInferMetaCheckAxis - param : [x, dim] - kernel : - func : cumprod - inplace: (x -> out) - backward : cumprod_grad - interfaces : paddle::dialect::InferSymbolicShapeInterface - -- op : cumsum - args : (Tensor x, Scalar axis=-1, bool flatten=false, bool exclusive=false, bool reverse=false) - output : Tensor(out) - infer_meta : - func : CumScalarAxisInferMeta - kernel : - func : cumsum - data_type : x - inplace: (x -> out) - backward : cumsum_grad - interfaces : paddle::dialect::InferSymbolicShapeInterface - -- op : cvm - args: (Tensor x, Tensor cvm, bool use_cvm = true) - output: Tensor (out) - infer_meta: - func: CvmInferMeta - kernel: - func: cvm - data_type: x - backward: cvm_grad - no_need_buffer: cvm - -- op : data - args : (str name, IntArray shape, DataType dtype, Place place) - output : Tensor(out) - infer_meta : - func : DataInferMeta - param : [name, shape, dtype] - kernel: - func : data - param : [name, shape, dtype] - data_type : dtype - backend : place - interfaces : paddle::dialect::InferSymbolicShapeInterface - -- op : decayed_adagrad - args : (Tensor param, Tensor grad, Tensor moment, Tensor learning_rate, float decay = 0.95f, float epsilon = 1.0e-6f) - output : Tensor(param_out), Tensor(moment_out) - infer_meta : - func : DecayedAdagradInferMeta - kernel : - func : decayed_adagrad - data_type : param - -- op : decode_jpeg - args : (Tensor x, str mode, Place place) - output : Tensor(out) - infer_meta : - func : DecodeJpegInferMeta - param : [x, mode] - kernel : - func : decode_jpeg - param : [x, mode] - backend : place - -- op : deformable_conv - args : (Tensor x, Tensor offset, Tensor filter, Tensor mask, int[] strides, int[] paddings, int[] dilations, int deformable_groups, int groups, int im2col_step) - output : Tensor(out) - infer_meta : - func : DeformableConvInferMeta - kernel : - func : deformable_conv - data_type : x - optional : mask - backward : deformable_conv_grad - -- op : depend - args: (Tensor x, Tensor[] dep) - output: Tensor (out) - infer_meta: - func : UnchangedInferMeta - param : [x] - kernel: - func: depend - -- op : depthwise_conv2d - args : (Tensor input, Tensor filter, int[] strides={1, 1}, int[] paddings={0, 0}, str padding_algorithm="EXPLICIT", int groups=1, int[] dilations={1, 1}, str data_format="NCHW") - output : Tensor(out) - infer_meta : - func : DepthwiseConvInferMeta - kernel : - func : depthwise_conv2d - data_type : input - backward : depthwise_conv2d_grad - -- op : depthwise_conv2d_transpose - args : (Tensor x, Tensor filter, int[] strides={1, 1}, int[] paddings={0, 0}, int[] output_padding={}, IntArray output_size={}, str padding_algorithm="EXPLICIT", int groups=1, int[] dilations={1, 1}, str data_format="NCHW") - output : Tensor(out) - infer_meta : - func : Conv2dTransposeInferMeta - kernel : - func : depthwise_conv2d_transpose - data_type : x - backward : depthwise_conv2d_transpose_grad - -- op : dequantize_abs_max - args : (Tensor x, Tensor scale, float max_range) - output : Tensor(out) - infer_meta : - func : DequantizeAbsMaxInferMeta - kernel : - func : dequantize_abs_max - data_type : x - -- op : dequantize_log - args: (Tensor x, Tensor dict) - output: Tensor(out) - infer_meta: - func: DequantizeLogInferMeta - kernel: - func: dequantize_log - data_type: x - interfaces : paddle::dialect::InferSymbolicShapeInterface - -- op : det - args : (Tensor x) - output : Tensor - infer_meta : - func : UnchangedInferMeta - kernel : - func : determinant - backward : det_grad - -- op : detection_map - args: (Tensor detect_res, Tensor label, Tensor has_state, Tensor pos_count, Tensor - true_pos, Tensor false_pos, int class_num, int background_label = 0, float overlap_threshold - = .5f, bool evaluate_difficult = true, str ap_type = "integral") - output: Tensor (accum_pos_count), Tensor (accum_true_pos), Tensor (accum_false_pos), - Tensor (m_ap) - infer_meta: - func: DetectionMapInferMeta - kernel: - func: detection_map - data_type: detect_res - optional: has_state, pos_count, true_pos, false_pos - -- op : dgc - args : (Tensor u, Tensor v, Tensor grad, Tensor param, Tensor current_step, Tensor nranks, float m=0.9, bool use_nesterov=true, float[] sparsity={}, float rampup_begin_step=0.0, float rampup_step=0.0, float regular_coeff=0.0, int regular_type=0) - output : Tensor(u_out), Tensor(v_out), Tensor(encode_grad), Tensor(grad_out), Tensor(k), Tensor(gather_buff) - infer_meta: - func: DgcInferMeta - param : [u, v, grad, param, current_step, nranks] - kernel : - func : dgc - param : [u, v, grad, param, current_step, nranks, m, use_nesterov, sparsity, rampup_begin_step, rampup_step, regular_coeff, regular_type] - optional: param - data_transform : - skip_transform : current_step, nranks - -- op : dgc_clip_by_norm - args: (Tensor x, Tensor current_step, float max_norm, float rampup_begin_step = -1.0) - output: Tensor(out) - infer_meta: - func: ClipByNormInferMeta - param: [x, max_norm] - kernel: - func: dgc_clip_by_norm {dense, dense -> dense} - dgc_clip_by_norm_sr {selected_rows, dense -> selected_rows} - data_transform : - skip_transform : current_step - -- op : dgc_momentum - args: (Tensor param, Tensor grad, Tensor velocity, Tensor learning_rate, Tensor - master_param, Tensor current_step_tensor, Tensor nranks_tensor, float mu, bool use_nesterov - = false, str regularization_method = "", float regularization_coeff = 0.0f, bool - multi_precision = false, float rescale_grad = 1.0f, float rampup_begin_step = - -1.0) - output: Tensor (param_out), Tensor (velocity_out), Tensor (master_param_out), Tensor - (grad_out) - infer_meta: - func: DGCMomentumInferMeta - kernel: - func: dgc_momentum - data_type: param - optional : master_param, master_param_out - data_transform : - skip_transform : current_step_tensor, nranks_tensor - -- op : diag - args : (Tensor x, int offset = 0, float padding_value = 0.0) - output : Tensor - infer_meta : - func : DiagInferMeta - kernel : - func : diag - backward : diag_grad - -- op : diag_embed - args : (Tensor input, int offset = 0, int dim1 = -2, int dim2 = -1) - output : Tensor(out) - infer_meta : - func : DiagEmbedInferMeta - kernel : - func : diag_embed - interfaces : paddle::dialect::InferSymbolicShapeInterface - traits : paddle::dialect::ForwardOnlyTrait - -- op : diagonal - args : (Tensor x, int offset = 0, int axis1 = 0, int axis2 = 1) - output : Tensor - infer_meta : - func : DiagonalInferMeta - kernel : - func : diagonal - backward : diagonal_grad - interfaces : paddle::dialect::InferSymbolicShapeInterface - -- op : digamma - args : (Tensor x) - output : Tensor(out) - infer_meta : - func : UnchangedInferMeta - kernel : - func : digamma - inplace: (x -> out) - backward : digamma_grad - interfaces : paddle::dialect::InferSymbolicShapeInterface - -- op : dirichlet - args: (Tensor alpha) - output: Tensor(out) - infer_meta: - func: DirichletInferMeta - kernel: - func: dirichlet - interfaces : paddle::dialect::InferSymbolicShapeInterface - -- op : disable_check_model_nan_inf - args: (Tensor x, int flag = 0) - output: Tensor(out) - infer_meta: - func: UnchangedInferMeta - param : [x] - kernel: - func: check_model_nan_inf - data_type: x - backward: disable_check_model_nan_inf_grad - -- op : dist - args : (Tensor x, Tensor y, float p = 2.0) - output : Tensor - infer_meta : - func : DistInferMeta - kernel : - func : dist - backward : dist_grad - -- op : dot - args : (Tensor x, Tensor y) - output : Tensor - infer_meta : - func : DotInferMeta - kernel : - func : dot - data_type : x - backward : dot_grad - -- op : dpsgd - args: (Tensor param, Tensor grad, Tensor learning_rate, float clip = 10.0f, float batch_size = 16.0f, float sigma = 1.0f, int seed = 0) - output: Tensor(param_out) - infer_meta: - func: DpsgdInferMeta - kernel: - func: dpsgd - data_type: param - -- op : dropout - args : (Tensor x, Tensor seed_tensor, Scalar p, bool is_test, str mode, int seed, bool fix_seed) - output : Tensor(out), Tensor(mask) - infer_meta : - func : DropoutInferMeta - kernel : - func : dropout - data_type : x - optional : seed_tensor - intermediate : mask - backward : dropout_grad - traits : pir::SideEffectTrait - -- op : edit_distance - args : (Tensor hyps, Tensor refs, Tensor hypslength, Tensor refslength, bool normalized = false) - output : Tensor(sequencenum), Tensor(out) - infer_meta : - func : EditDistanceInferMeta - kernel : - func : edit_distance - data_type : DataType::FLOAT32 - optional : hypslength, refslength - -- op : eig - args: (Tensor x) - output: Tensor(out_w), Tensor(out_v) - infer_meta: - func: EigInferMeta - kernel: - func: eig - backward: eig_grad - -- op : eigh - args : (Tensor x, str UPLO = "L") - output : Tensor(out_w), Tensor(out_v) - infer_meta : - func : EighInferMeta - kernel : - func : eigh - backward : eigh_grad - interfaces : paddle::dialect::InferSymbolicShapeInterface - -- op : eigvals - args : (Tensor x) - output : Tensor(out) - infer_meta : - func : EigvalsInferMeta - kernel : - func : eigvals - -- op : eigvalsh - args : (Tensor x, str uplo = "L", bool is_test = false) - output : Tensor(eigenvalues), Tensor(eigenvectors) - infer_meta : - func : EigvalshInferMeta - kernel : - func : eigvalsh - data_type : x - backward : eigvalsh_grad - interfaces : paddle::dialect::InferSymbolicShapeInterface - -- op : elu - args : (Tensor x, float alpha = 1.0f) - output : Tensor(out) - infer_meta : - func : UnchangedInferMeta - param : [x] - kernel : - func : elu - inplace : (x -> out) - backward : elu_grad - -- op : empty - args : (IntArray shape, DataType dtype=DataType::FLOAT32, Place place=CPUPlace()) - output: Tensor(out) - infer_meta : - func : CreateInferMeta - param : [shape, dtype] - kernel : - func : empty - param : [shape, dtype] - data_type : dtype - backend : place - interfaces : paddle::dialect::InferSymbolicShapeInterface - -- op : empty_like - args : (Tensor x, DataType dtype = DataType::UNDEFINED, Place place = {}) - output: Tensor(out) - infer_meta : - func : CreateLikeInferMeta - param : [x, dtype] - kernel : - func : empty_like - param : [x, dtype] - data_type : dtype > x - backend : place > x - interfaces : paddle::dialect::InferSymbolicShapeInterface - -- op : enable_check_model_nan_inf - args: (Tensor x, int flag = 1) - output: Tensor(out) - infer_meta: - func: UnchangedInferMeta - param : [x] - kernel: - func: check_model_nan_inf - data_type: x - backward: enable_check_model_nan_inf_grad - interfaces : paddle::dialect::InferSymbolicShapeInterface - -- op : equal_all - args : (Tensor x, Tensor y) - output : Tensor(out) - infer_meta : - func : CompareAllInferMeta - kernel : - func : equal_all - -- op : erf - args : (Tensor x) - output : Tensor(out) - infer_meta : - func : UnchangedInferMeta - kernel : - func : erf - inplace : (x -> out) - backward : erf_grad - interfaces : paddle::dialect::InferSymbolicShapeInterface - -- op : erfinv - args : (Tensor x) - output : Tensor(out) - infer_meta : - func : UnchangedInferMeta - kernel : - func : erfinv - inplace : (x -> out) - backward : erfinv_grad - interfaces : paddle::dialect::InferSymbolicShapeInterface - -- op : exp - args : (Tensor x) - output : Tensor(out) - infer_meta : - func : UnchangedInferMeta - spmd_rule : ElementwiseUnaryInferSpmd - kernel : - func : exp - inplace : (x -> out) - backward : exp_grad - interfaces : paddle::dialect::InferSymbolicShapeInterface - -- op : expand - args : (Tensor x, IntArray shape = {}) - output : Tensor(out) - infer_meta : - func : ExpandInferMeta - kernel : - func : expand - data_type : x - backward : expand_grad - -- op : expand_as - args : (Tensor x, Tensor y, int[] target_shape = {}) - output : Tensor(out) - infer_meta : - func : ExpandAsInferMeta - local_shape: target_shape - kernel : - func : expand_as - data_type : x - optional : y - backward : expand_as_grad - interfaces : paddle::dialect::InferSymbolicShapeInterface - -- op : expm1 - args : (Tensor x) - output : Tensor(out) - infer_meta : - func : UnchangedInferMeta - param : [x] - kernel : - func : expm1 - inplace: (x -> out) - backward : expm1_grad - interfaces : paddle::dialect::InferSymbolicShapeInterface - -- op : exponential_ - args : (Tensor x, float lam) - output : Tensor(out) - infer_meta : - func : UnchangedInferMeta - param : [x] - kernel : - func : exponential - inplace : (x -> out) - backward : exponential__grad - interfaces : paddle::dialect::InferSymbolicShapeInterface - -- op : eye - args : (Scalar num_rows, Scalar num_columns, DataType dtype=DataType::FLOAT32, Place place={}) - output : Tensor(out) - infer_meta : - func : EyeInferMeta - param : [num_rows, num_columns, dtype] - kernel : - func : eye - param : [num_rows, num_columns, dtype] - data_type : dtype - backend : place - -- op : fake_channel_wise_dequantize_max_abs - args : (Tensor x, Tensor[] scales, int[] quant_bits = {8}, int quant_axis = 0, int x_num_col_dims = 1) - output : Tensor(out) - infer_meta : - func : FakeChannelWiseDequantizeMaxAbsInferMeta - kernel : - func : fake_channel_wise_dequantize_max_abs - data_type : x - interfaces : paddle::dialect::InferSymbolicShapeInterface - -- op : fake_channel_wise_quantize_abs_max - args : (Tensor x, int bit_length = 8, int round_type = 1, int quant_axis = 0, bool is_test = false) - output : Tensor(out), Tensor(out_scale) - infer_meta : - func : FakeChannelWiseQuantizeAbsMaxInferMeta - kernel : - func : fake_channel_wise_quantize_abs_max - data_type : x - interfaces : paddle::dialect::InferSymbolicShapeInterface - -- op : fake_channel_wise_quantize_dequantize_abs_max - args : (Tensor x, int bit_length = 8, int round_type = 1, int quant_axis = 0) - output : Tensor(out), Tensor(out_scale) - infer_meta : - func : FakeChannelWiseQuantizeDequantizeAbsMaxInferMeta - kernel : - func : fake_channel_wise_quantize_dequantize_abs_max - data_type : x - backward : fake_channel_wise_quantize_dequantize_abs_max_grad - -- op : fake_dequantize_max_abs - args : (Tensor x, Tensor scale, float max_range) - output : Tensor(out) - infer_meta : - func : FakeDequantizeMaxAbsInferMeta - kernel : - func : fake_dequantize_max_abs - data_type : x - -- op : fake_quantize_abs_max - args : (Tensor x, int bit_length = 8, int round_type = 1) - output : Tensor(out), Tensor(out_scale) - infer_meta : - func : FakeQuantizeAbsMaxInferMeta - kernel : - func : fake_quantize_abs_max - data_type : x - -- op : fake_quantize_dequantize_abs_max - args : (Tensor x, int bit_length = 8, int round_type = 1) - output : Tensor(out), Tensor(out_scale) - infer_meta : - func : FakeQuantizeAbsMaxInferMeta - kernel : - func : fake_quantize_dequantize_abs_max - data_type : x - backward : fake_quantize_dequantize_abs_max_grad - -- op : fake_quantize_dequantize_moving_average_abs_max - args : (Tensor x, Tensor in_scale, Tensor in_accum, Tensor in_state, float moving_rate = 0.9, int bit_length = 8, bool is_test = false, int round_type = 1) - output : Tensor(out), Tensor(out_scale), Tensor(out_state), Tensor(out_accum) - infer_meta : - func : FakeQuantOrWithDequantMovingAverageAbsMaxInferMeta - kernel : - func : fake_quantize_dequantize_moving_average_abs_max - data_type : x - optional : in_accum, in_state, out_state, out_accum - backward : fake_quantize_dequantize_moving_average_abs_max_grad - inplace: (in_scale -> out_scale) - -- op : fake_quantize_moving_average_abs_max - args : (Tensor x, Tensor in_scale, Tensor in_accum, Tensor in_state, float moving_rate = 0.9, int bit_length = 8, bool is_test = false, int round_type = 1) - output : Tensor(out), Tensor(out_scale), Tensor(out_state), Tensor(out_accum) - infer_meta : - func : FakeQuantOrWithDequantMovingAverageAbsMaxInferMeta - kernel : - func : fake_quantize_moving_average_abs_max - data_type : x - optional : in_accum, in_state, out_state, out_accum - inplace: (in_scale -> out_scale) - -- op : fake_quantize_range_abs_max - args : (Tensor x, Tensor in_scale, Tensor iter, int window_size = 10000, int bit_length = 8, bool is_test = false, int round_type = 1) - output : Tensor(out), Tensor(out_scale), Tensor(out_scales) - infer_meta : - func : FakeQuantizeRangeAbsMaxInferMeta - kernel : - func : fake_quantize_range_abs_max - data_type : x - optional : iter, out_scales - inplace: (in_scale -> out_scale) - -- op : fft_c2c - args : (Tensor x, int64_t[] axes, str normalization, bool forward) - output : Tensor - infer_meta : - func : FFTC2CInferMeta - kernel : - func : fft_c2c - backward : fft_c2c_grad - interfaces : paddle::dialect::InferSymbolicShapeInterface - -- op : fft_c2r - args : (Tensor x, int64_t[] axes, str normalization, bool forward, int64_t last_dim_size=0L) - output : Tensor - infer_meta : - func : FFTC2RInferMeta - kernel : - func : fft_c2r - backward : fft_c2r_grad - interfaces : paddle::dialect::InferSymbolicShapeInterface - -- op : fft_r2c - args : (Tensor x, int64_t[] axes, str normalization, bool forward, bool onesided) - output : Tensor - infer_meta : - func : FFTR2CInferMeta - kernel : - func : fft_r2c - backward : fft_r2c_grad - interfaces : paddle::dialect::InferSymbolicShapeInterface - -- op : fill - args : (Tensor x, Scalar(double) value=0) - output : Tensor(out) - infer_meta : - func : UnchangedInferMeta - param : [x] - kernel : - func : fill - inplace : (x -> out) - backward: fill_grad - interfaces : paddle::dialect::InferSymbolicShapeInterface - -- op : fill_diagonal - args : (Tensor x, float value=0, int offset=0, bool wrap=false) - output : Tensor(out) - infer_meta : - func : FillDiagonalInferMeta - kernel : - func : fill_diagonal - data_type : x - inplace : (x -> out) - backward : fill_diagonal_grad - interfaces : paddle::dialect::InferSymbolicShapeInterface - -- op : fill_diagonal_tensor - args : (Tensor x, Tensor y, int64_t offset = 0, int dim1 = 0, int dim2 = 1) - output : Tensor(out) - infer_meta : - func : FillDiagonalTensorInferMeta - kernel : - func : fill_diagonal_tensor - inplace : (x -> out) - backward : fill_diagonal_tensor_grad - interfaces : paddle::dialect::InferSymbolicShapeInterface - -- op : flash_attn - args : (Tensor q, Tensor k, Tensor v, Tensor fixed_seed_offset, Tensor attn_mask, float dropout = 0.0, bool causal = false, bool return_softmax = false, bool is_test = false, str rng_name = "") - output : Tensor(out), Tensor(softmax), Tensor(softmax_lse), Tensor(seed_offset) - optional : fixed_seed_offset, attn_mask - infer_meta : - func : FlashAttnInferMeta - param : [q, k, v] - spmd_rule : FlashAttInferSpmd - kernel : - func : flash_attn - data_type : q - backward : flash_attn_grad - interfaces : paddle::dialect::InferSymbolicShapeInterface - -- op : flash_attn_qkvpacked - args : (Tensor qkv, Tensor fixed_seed_offset, Tensor attn_mask, float dropout = 0.0, bool causal = false, bool return_softmax = false, bool is_test = false, str rng_name = "") - output : Tensor(out), Tensor(softmax), Tensor(softmax_lse), Tensor(seed_offset) - optional : fixed_seed_offset, attn_mask - infer_meta : - func : FlashAttnQKVPackedInferMeta - param : [qkv] - kernel : - func : flash_attn_qkvpacked - data_type : qkv - backward : flash_attn_qkvpacked_grad - -- op : flash_attn_unpadded - args : (Tensor q, Tensor k, Tensor v, Tensor cu_seqlens_q, Tensor cu_seqlens_k, Tensor fixed_seed_offset, Tensor attn_mask, int64_t max_seqlen_q, int64_t max_seqlen_k, float scale, float dropout = 0.0, bool causal = false, bool return_softmax = false, bool is_test = false, str rng_name = "") - output : Tensor(out), Tensor(softmax), Tensor(softmax_lse), Tensor(seed_offset) - optional : fixed_seed_offset , attn_mask - infer_meta : - func : FlashAttnInferMeta - param : [q, k, v] - kernel : - func : flash_attn_unpadded - data_type : q - intermediate : softmax_lse, seed_offset - backward : flash_attn_unpadded_grad - -- op : flash_attn_varlen_qkvpacked - args : (Tensor qkv, Tensor cu_seqlens_q, Tensor cu_seqlens_k, Tensor fixed_seed_offset, Tensor attn_mask, int64_t max_seqlen_q, int64_t max_seqlen_k, float scale, float dropout = 0.0, bool causal = false, bool return_softmax = false, bool is_test = false, str rng_name = "", bool varlen_padded = true) - output : Tensor(out), Tensor(softmax), Tensor(softmax_lse), Tensor(seed_offset) - optional : fixed_seed_offset , attn_mask - infer_meta : - func : FlashAttnQKVPackedInferMeta - param : [qkv] - kernel : - func : flash_attn_varlen_qkvpacked - data_type : qkv - intermediate : softmax_lse, seed_offset - backward : flash_attn_varlen_qkvpacked_grad - -- op : flash_attn_with_sparse_mask - args : (Tensor q, Tensor k, Tensor v, Tensor attn_mask_start_row_indices, Tensor fixed_seed_offset, float dropout = 0.0, bool causal = false, int attn_mask_start_row = 0, bool return_softmax = false, bool is_test = false, str rng_name = "") - output : Tensor(out), Tensor(softmax), Tensor(softmax_lse), Tensor(seed_offset) - optional : fixed_seed_offset - infer_meta : - func : FlashAttnInferMeta - param : [q, k, v] - kernel : - func : flash_attn_with_sparse_mask - data_type : q - backward : flash_attn_with_sparse_mask_grad - -- op : flatten - args : (Tensor x, int start_axis = 1, int stop_axis = 1) - output : Tensor(out), Tensor(xshape) - infer_meta : - func : FlattenWithXShapeInferMeta - spmd_rule : FlattenInferSpmd - kernel : - func : flatten - data_type : x - inplace : (x -> out) - view : (x -> out) - intermediate : xshape - backward : flatten_grad - interfaces : paddle::dialect::InferSymbolicShapeInterface - -- op : flip - args : (Tensor x, int[] axis) - output : Tensor (out) - infer_meta : - func : FlipInferMeta - kernel : - func : flip - backward : flip_grad - interfaces : paddle::dialect::InferSymbolicShapeInterface - -- op : floor - args : (Tensor x) - output : Tensor(out) - infer_meta : - func : UnchangedInferMeta - kernel : - func : floor - inplace : (x -> out) - backward : floor_grad - interfaces : paddle::dialect::InferSymbolicShapeInterface - -- op : fmax - args : (Tensor x, Tensor y) - output : Tensor(out) - infer_meta : - param: [x, y] - func : ElementwiseInferMeta - kernel : - func : fmax - backward : fmax_grad - interfaces : paddle::dialect::InferSymbolicShapeInterface - -- op : fmin - args : (Tensor x, Tensor y) - output : Tensor(out) - infer_meta : - func : ElementwiseInferMeta - param: [x, y] - kernel : - func : fmin - backward : fmin_grad - interfaces : paddle::dialect::InferSymbolicShapeInterface - -- op : fold - args: (Tensor x, int[] output_sizes, int[] kernel_sizes, int[] strides, int[] paddings, int[] dilations) - output: Tensor(out) - infer_meta: - func: FoldInferMeta - kernel: - func: fold - backward: fold_grad - interfaces : paddle::dialect::InferSymbolicShapeInterface - -- op : fractional_max_pool2d - args : (Tensor x, int[] output_size, int[] kernel_size = {0, 0}, float random_u = 0.0, bool return_mask = true) - output : Tensor(out), Tensor(mask) - infer_meta : - func : FractionalMaxPoolInferMeta - kernel : - func : fractional_max_pool2d - backward : fractional_max_pool2d_grad - -- op : fractional_max_pool3d - args : (Tensor x, int[] output_size, int[] kernel_size = {0, 0, 0}, float random_u = 0.0, bool return_mask = true) - output : Tensor(out), Tensor(mask) - infer_meta : - func : FractionalMaxPoolInferMeta - kernel : - func : fractional_max_pool3d - backward : fractional_max_pool3d_grad - -- op : frame - args : (Tensor x, int frame_length, int hop_length, int axis=-1) - output : Tensor(out) - infer_meta : - func : FrameInferMeta - kernel : - func : frame - backward : frame_grad - -- op : frobenius_norm - args : (Tensor x, IntArray axis, bool keep_dim, bool reduce_all) - output : Tensor(out) - infer_meta : - func : ReduceIntArrayAxisInferMetaBase - kernel : - func : frobenius_norm - backward : frobenius_norm_grad - -- op : ftrl - args: (Tensor param, Tensor squared_accumulator, Tensor linear_accumulator, Tensor grad, Tensor learning_rate, float l1=0.0f, float l2=0.0f, float lr_power=-0.5f) - output: Tensor(param_out), Tensor(squared_accum_out), Tensor(linear_accum_out) - infer_meta: - func: FtrlInferMeta - kernel: - func: ftrl {dense, dense, dense, dense, dense -> dense, dense, dense} - ftrl_sr {dense, dense, dense, selected_rows, dense -> dense, dense, dense} - data_type: param - -- op : full - args : (IntArray shape, Scalar(double) value, DataType dtype=DataType::FLOAT32, Place place=CPUPlace()) - output: Tensor(out) - infer_meta : - func : CreateInferMeta - param : [shape, dtype] - kernel : - func : full - param : [shape, value, dtype] - data_type : dtype - backend : place - interfaces : paddle::dialect::InferSymbolicShapeInterface - -- op : full_ - args : (Tensor output, IntArray shape, Scalar(double) value, DataType dtype=DataType::FLOAT32, Place place=CPUPlace()) - output : Tensor(out) - inplace : (output -> out) - infer_meta : - func : CreateInferMeta - param : [shape, dtype] - kernel : - func : full - param : [shape, value, dtype] - data_type : dtype - backend : place - -- op : full_batch_size_like - args : (Tensor input, int[] shape, DataType dtype, Scalar(double) value, int input_dim_idx, int output_dim_idx, Place place=CPUPlace()) - output: Tensor(out) - infer_meta : - func : FullBatchSizeLikeInferMeta - param : [input, shape, value, dtype, input_dim_idx, output_dim_idx] - kernel : - func : full_batch_size_like - param : [input, shape, value, dtype, input_dim_idx, output_dim_idx] - data_type : dtype - backend : place - -- op : full_int_array - args : (int64_t[] value, DataType dtype=DataType::FLOAT32, Place place=CPUPlace()) - output: Tensor(out) - infer_meta : - func : CreateVecShapeInferMeta - param : [value, dtype] - kernel : - func : full_int_array - param : [value, dtype] - data_type : dtype - backend : place - interfaces : paddle::dialect::InferSymbolicShapeInterface - -- op : full_like - args : (Tensor x, Scalar value, DataType dtype = DataType::UNDEFINED, Place place = {}) - output: Tensor(out) - infer_meta : - func : CreateLikeInferMeta - param : [x, dtype] - spmd_rule : FullLikeInferSpmd - kernel : - func : full_like - param : [x, value, dtype] - data_type : dtype > x - backend : place > x - data_transform : - skip_transform : x - traits : paddle::dialect::ForwardOnlyTrait - interfaces : paddle::dialect::InferSymbolicShapeInterface - -- op : full_with_tensor - args : (Tensor value, IntArray shape, DataType dtype=DataType::FLOAT32) - output: Tensor(out) - infer_meta : - func : FullWithTensorInferMeta - param : [shape, dtype] - kernel : - func : full_with_tensor - data_type : dtype - interfaces : paddle::dialect::InferSymbolicShapeInterface - traits : paddle::dialect::ForwardOnlyTrait - -- op : fused_batch_norm_act - args : (Tensor x, Tensor scale, Tensor bias, Tensor mean, Tensor variance, float momentum, float epsilon, str act_type) - output : Tensor(out), Tensor(mean_out), Tensor(variance_out), Tensor(saved_mean), Tensor(saved_variance), Tensor(reserve_space) - infer_meta: - func : FusedBatchNormActInferMeta - param : [x, scale, bias, mean, variance] - kernel : - func : fused_batch_norm_act - data_type : x - view : (mean -> mean_out), (variance -> variance_out) - backward : fused_batch_norm_act_grad - -- op : fused_bn_add_activation - args : (Tensor x, Tensor z, Tensor scale, Tensor bias, Tensor mean, Tensor variance, float momentum, float epsilon, str act_type) - output : Tensor(out), Tensor(mean_out), Tensor(variance_out), Tensor(saved_mean), Tensor(saved_variance), Tensor(reserve_space) - infer_meta: - func : FusedBatchNormActInferMeta - param : [x, scale, bias, mean, variance] - kernel : - func : fused_bn_add_activation - data_type : x - view : (mean -> mean_out), (variance -> variance_out) - backward : fused_bn_add_activation_grad - -- op : fused_multi_transformer - args : (Tensor x, Tensor[] ln_scales, Tensor[] ln_biases, Tensor[] qkv_weights, Tensor[] qkv_biases, Tensor[] cache_kvs, Tensor[] pre_caches, Tensor rotary_tensor, Tensor beam_offset, Tensor time_step, Tensor seq_lengths, Tensor src_mask, Tensor[] out_linear_weights, Tensor[] out_linear_biases, Tensor[] ffn_ln_scales, Tensor[] ffn_ln_biases, Tensor[] ffn1_weights, Tensor[] ffn1_biases, Tensor[] ffn2_weights, Tensor[] ffn2_biases, bool pre_layer_norm = true, float epsilon = 1e-5, float residual_alpha = 1.0f, float dropout_rate = .5f, int rotary_emb_dims = 0, bool is_test = false, str dropout_implementation = "downgrade_in_infer", str act_method = "gelu", bool trans_qkvw = true, int ring_id = -1, str norm_type = "layernorm", bool use_neox_rotary_style=true, int gqa_group_size=-1) - optional : qkv_biases, cache_kvs, pre_caches, rotary_tensor, beam_offset, time_step, seq_lengths, src_mask, out_linear_biases, ffn1_biases, ffn2_biases, cache_kv_outs - output : Tensor[](cache_kv_outs){out_linear_weights.size()}, Tensor(out) - infer_meta : - func : FusedMultiTransformerInferMeta - kernel : - func : fused_multi_transformer - data_type : x - -- op : fused_softmax_mask - args : (Tensor x, Tensor mask) - output : Tensor(out) - infer_meta : - func : SoftmaxMaskFuseInferMeta - kernel : - func : fused_softmax_mask - data_type : x - backward: fused_softmax_mask_grad - -- op : fused_softmax_mask_upper_triangle - args : (Tensor X) - output : Tensor(Out) - infer_meta : - func : UnchangedInferMeta - kernel: - func : fused_softmax_mask_upper_triangle - backward: fused_softmax_mask_upper_triangle_grad - -- op : gammaincc - args : (Tensor x, Tensor y) - output : Tensor(out) - infer_meta : - func : ElementwiseInferMeta - param : [x, y] - kernel : - func : gammaincc - inplace: (x -> out) - backward : gammaincc_grad - -- op : gammaln - args : (Tensor x) - output : Tensor(out) - infer_meta : - func : UnchangedInferMeta - kernel : - func : gammaln - inplace: (x -> out) - backward : gammaln_grad - -- op : gather - args : (Tensor x, Tensor index, Scalar axis=0) - output : Tensor(out) - infer_meta : - func : GatherInferMeta - kernel : - func : gather - data_type: x - backward : gather_grad - interfaces : paddle::dialect::InferSymbolicShapeInterface - -- op : gather_nd - args : (Tensor x, Tensor index) - output : Tensor(out) - infer_meta : - func : GatherNdInferMeta - spmd_rule : GatherNdInferSpmd - kernel : - func : gather_nd - data_type : x - backward : gather_nd_grad - interfaces : paddle::dialect::InferSymbolicShapeInterface - -- op : gather_tree - args : (Tensor ids, Tensor parents) - output : Tensor(out) - infer_meta : - func : GatherTreeMeta - kernel : - func : gather_tree - data_type : ids - -- op : gaussian - args : (IntArray shape, float mean, float std, int seed, DataType dtype, Place place={}) - output: Tensor(out) - infer_meta : - func : GaussianInferMeta - param : [shape, mean, std, seed, dtype] - kernel : - func : gaussian - param : [shape, mean, std, seed, dtype] - data_type : dtype - backend : place - interfaces : paddle::dialect::InferSymbolicShapeInterface - traits : pir::SideEffectTrait, paddle::dialect::ForwardOnlyTrait - -- op : gaussian_inplace - args: (Tensor x, float mean=0, float std=1.0, int seed=0) - output: Tensor(out) - infer_meta: - func: UnchangedInferMeta - param: [x] - kernel: - func: gaussian_inplace - data_type: x - backend : x - inplace: (x -> out) - backward: gaussian_inplace_grad - -- op : gelu - args : (Tensor x, bool approximate = false) - output : Tensor(out) - infer_meta : - func : UnchangedInferMeta - param: [x] - kernel : - func : gelu - backward : gelu_grad - -- op : generate_proposals - args : (Tensor scores, Tensor bbox_deltas, Tensor im_shape, Tensor anchors, Tensor variances, int pre_nms_top_n, int post_nms_top_n, float nms_thresh, float min_size, float eta, bool pixel_offset=true) - output : Tensor(rpn_rois), Tensor(rpn_roi_probs), Tensor(rpn_rois_num) - infer_meta : - func : GenerateProposalsV2InferMeta - kernel : - func : generate_proposals - data_type : anchors - optional : rpn_rois_num - -- op : graph_khop_sampler - args : (Tensor row, Tensor colptr, Tensor x, Tensor eids, int[] sample_sizes, bool return_eids) - output : Tensor(out_src), Tensor(out_dst), Tensor(sample_index), Tensor(reindex_x), Tensor(out_eids) - infer_meta : - func : GraphKhopSamplerInferMeta - kernel : - func : graph_khop_sampler - data_type : row - optional : eids - -- op : graph_sample_neighbors - args : (Tensor row, Tensor colptr, Tensor x, Tensor eids, Tensor perm_buffer, int sample_size, bool return_eids, bool flag_perm_buffer) - output : Tensor(out), Tensor(out_count), Tensor(out_eids) - infer_meta : - func : GraphSampleNeighborsInferMeta - kernel : - func : graph_sample_neighbors - data_type : row - optional : eids, perm_buffer - -- op : grid_sample - args : (Tensor x, Tensor grid, str mode = "bilinear", str padding_mode = "zeros", bool align_corners = true) - output : Tensor(out) - infer_meta : - func : GridSampleBaseInferMeta - param : [x, grid] - kernel: - func : grid_sample - data_type : x - backward : grid_sample_grad - -- op : group_norm - args : (Tensor x, Tensor scale, Tensor bias, float epsilon = 1e-5, int groups = -1, str data_format = "NCHW") - output : Tensor(y), Tensor(mean), Tensor(variance) - infer_meta : - func : GroupNormInferMeta - kernel : - func : group_norm - optional : scale, bias - intermediate : mean, variance - backward : group_norm_grad - interfaces : paddle::dialect::InferSymbolicShapeInterface, paddle::dialect::LayoutTransformationInterface - -- op : gru - args: (Tensor input, Tensor h0, Tensor weight, Tensor bias, str activation = "tanh", - str gate_activation = "sigmoid", bool is_reverse = false, bool origin_mode = false, bool is_test=false) - output: Tensor (batch_gate), Tensor (batch_reset_hidden_prev), Tensor (batch_hidden), - Tensor (hidden) - infer_meta: - func: GruInferMeta - kernel: - func: gru - data_type: input - optional: h0, bias - intermediate: batch_gate, batch_reset_hidden_prev, batch_hidden - backward: gru_grad - -- op : gru_unit - args: (Tensor input, Tensor hidden_prev, Tensor weight, Tensor bias, int activation - = 2, int gate_activation = 1, bool origin_mode = false) - output: Tensor (gate), Tensor (reset_hidden_prev), Tensor (hidden) - infer_meta: - func: GruUnitInferMeta - kernel: - func: gru_unit - optional: bias - intermediate: gate, reset_hidden_prev - backward: gru_unit_grad - -- op : gumbel_softmax - args : (Tensor x, float temperature = 1.0, bool hard = false, int axis = -1) - output : Tensor - infer_meta : - func : GumbelSoftmaxInferMeta - kernel : - func : gumbel_softmax - backward : gumbel_softmax_grad - -- op : hardshrink - args : (Tensor x, float threshold = 0.5) - output : Tensor (out) - infer_meta : - func : UnchangedInferMeta - param : [x] - kernel : - func : hard_shrink - backward : hardshrink_grad - -- op : hardsigmoid - args : (Tensor x, float slope = 0.2, float offset = 0.5) - output : Tensor (out) - infer_meta : - func : UnchangedInferMeta - param : [x] - kernel : - func : hardsigmoid - backward : hardsigmoid_grad - -- op : hardtanh - args : (Tensor x, float t_min=0, float t_max=24) - output : Tensor(out) - infer_meta : - func : UnchangedInferMeta - param : [x] - kernel : - func : hardtanh - inplace: (x -> out) - backward : hardtanh_grad - -- op : heaviside - args : (Tensor x, Tensor y) - output : Tensor(out) - infer_meta : - func : ElementwiseInferMeta - kernel : - func : heaviside - backward : heaviside_grad - -- op : hinge_loss - args: (Tensor logits, Tensor labels) - output: Tensor (loss) - infer_meta: - func: HingeLossInferMeta - kernel: - func: hinge_loss - data_type: logits - backward: hinge_loss_grad - -- op : histogram - args : (Tensor input, Tensor weight, int64_t bins = 100, int min = 0, int max = 0, bool density = false) - output : Tensor(out) - infer_meta : - func : HistogramInferMeta - optional : weight - kernel : - func : histogram - -- op : hsigmoid_loss - args : (Tensor x, Tensor label, Tensor w, Tensor bias, Tensor path, Tensor code, int num_classes, bool is_sparse) - output : Tensor(out), Tensor(pre_out), Tensor(w_out) - infer_meta : - func : HSigmoidLossInferMeta - optional: path, code, bias - kernel : - func : hsigmoid_loss - data_type : x - backward : hsigmoid_loss_grad - -- op : huber_loss - args : (Tensor input, Tensor label, float delta) - output : Tensor(out), Tensor(residual) - infer_meta : - func : HuberLossInferMeta - kernel : - func : huber_loss - intermediate : residual - backward : huber_loss_grad - -- op : i0 - args : (Tensor x) - output : Tensor(out) - infer_meta : - func : UnchangedInferMeta - kernel : - func : i0 - inplace: (x -> out) - backward : i0_grad - interfaces : paddle::dialect::InferSymbolicShapeInterface - -- op : i0e - args : (Tensor x) - output : Tensor(out) - infer_meta : - func : UnchangedInferMeta - kernel : - func : i0e - backward : i0e_grad - interfaces : paddle::dialect::InferSymbolicShapeInterface - -- op : i1 - args : (Tensor x) - output : Tensor(out) - infer_meta : - func : UnchangedInferMeta - kernel : - func : i1 - backward : i1_grad - interfaces : paddle::dialect::InferSymbolicShapeInterface - -- op : i1e - args : (Tensor x) - output : Tensor(out) - infer_meta : - func : UnchangedInferMeta - kernel : - func : i1e - backward : i1e_grad - interfaces : paddle::dialect::InferSymbolicShapeInterface - -- op : identity_loss - args : (Tensor x, int reduction = 1) - output : Tensor(out) - infer_meta : - func : IdentityLossInferMeta - kernel : - func : identity_loss - inplace: (x -> out) - backward : identity_loss_grad - interfaces : paddle::dialect::InferSymbolicShapeInterface - -- op : im2sequence - args: (Tensor x, Tensor y, int[] kernels, int[] strides = {1, 1}, int[] paddings - = {0, 0, 0, 0}, int[] out_stride = {1, 1}) - output: Tensor (out) - infer_meta: - func: Im2sequenceInferMeta - kernel: - func: im2sequence - optional: y - backward: im2sequence_grad - -- op : imag - args : (Tensor x) - output : Tensor (out) - infer_meta : - func : RealAndImagInferMeta - kernel : - func : imag - backward : imag_grad - interfaces : paddle::dialect::InferSymbolicShapeInterface - -- op : increment - args : (Tensor x, float value = 1.0) - output : Tensor(out) - infer_meta : - func : IncrementInferMeta - kernel : - func : increment - inplace : (x -> out) - interfaces : paddle::dialect::InferSymbolicShapeInterface - -- op : index_add - args : (Tensor x, Tensor index, Tensor add_value, int axis = 0) - output : Tensor(out) - infer_meta : - func : IndexAddInferMeta - kernel : - func : index_add - data_type : x - inplace : (x -> out) - backward : index_add_grad - -- op : index_put - args : (Tensor x, Tensor[] indices, Tensor value, bool accumulate=false) - output : Tensor(out) - infer_meta : - func : IndexPutInferMeta - kernel : - func : index_put - data_type : x - inplace : (x -> out) - backward : index_put_grad - -- op : index_sample - args : (Tensor x, Tensor index) - output : Tensor - infer_meta : - func : IndexSampleInferMeta - kernel : - func : index_sample - data_type : x - backward : index_sample_grad - data_transform : - skip_transform : index - interfaces : paddle::dialect::InferSymbolicShapeInterface - -- op : index_select - args : (Tensor x, Tensor index, int axis = 0) - output : Tensor(out) - infer_meta : - func : IndexSelectInferMeta - kernel : - func : index_select - data_type : x - backward : index_select_grad - data_transform : - skip_transform : index - -- op : index_select_strided - args : (Tensor x, int64_t index, int axis = 0) - output : Tensor(out) - infer_meta : - func : IndexSelectStridedInferMeta - kernel : - func : index_select_strided - data_type : x - backward : index_select_strided_grad - -- op : instance_norm - args : (Tensor x, Tensor scale, Tensor bias, float epsilon=1e-5) - output : Tensor(y), Tensor(saved_mean), Tensor(saved_variance) - infer_meta : - func : InstanceNormInferMeta - kernel : - func : instance_norm - data_type : x - optional : scale, bias - intermediate : saved_mean, saved_variance - backward : instance_norm_grad - -- op : inverse - args : (Tensor x) - output : Tensor(out) - infer_meta : - func : InverseInferMeta - kernel : - func : inverse - backward : inverse_grad - -- op : is_empty - args : (Tensor x) - output : Tensor(out) - infer_meta : - func : IsEmptyInferMeta - kernel : - func : is_empty - -- op : isclose - args : (Tensor x, Tensor y, Scalar(double) rtol=1e-5, Scalar(double) atol=1e-8, bool equal_nan=false) - output : Tensor(out) - infer_meta : - func : ValueCompareInferMeta - param: [x, y] - kernel : - func : isclose - data_type : x - interfaces : paddle::dialect::InferSymbolicShapeInterface - -- op : isfinite - args : (Tensor x) - output : Tensor(out) - infer_meta : - func : IsfiniteInferMeta - kernel : - func : isfinite {dense -> dense}, - isfinite_sr {selected_rows -> selected_rows} - interfaces : paddle::dialect::InferSymbolicShapeInterface - -- op : isinf - args : (Tensor x) - output : Tensor(out) - infer_meta : - func : IsfiniteInferMeta - kernel : - func : isinf {dense -> dense}, - isinf_sr {selected_rows -> selected_rows} - interfaces : paddle::dialect::InferSymbolicShapeInterface - traits : paddle::dialect::ForwardOnlyTrait - -- op : isnan - args : (Tensor x) - output : Tensor(out) - infer_meta : - func : IsfiniteInferMeta - kernel : - func : isnan {dense -> dense}, - isnan_sr {selected_rows -> selected_rows} - interfaces : paddle::dialect::InferSymbolicShapeInterface - traits : paddle::dialect::ForwardOnlyTrait - -- op : kldiv_loss - args : (Tensor x, Tensor label, str reduction = "mean", bool log_target = false) - output : Tensor(out) - infer_meta : - func : KLDivInferMeta - kernel : - func : kldiv_loss - data_type : x - backward : kldiv_loss_grad - -- op : kron - args : (Tensor x, Tensor y) - output : Tensor - infer_meta : - func : KronInferMeta - kernel : - func : kron - backward : kron_grad - interfaces : paddle::dialect::InferSymbolicShapeInterface - -- op : kthvalue - args : (Tensor x, int k = 1, int axis = -1, bool keepdim = false) - output : Tensor(out), Tensor(indices) - infer_meta : - func : KthvalueInferMeta - kernel : - func : kthvalue - backward : kthvalue_grad - interfaces : paddle::dialect::InferSymbolicShapeInterface - -- op : l1_norm - args : (Tensor x) - output : Tensor(out) - infer_meta : - func : L1NormInferMeta - kernel : - func : l1_norm - data_type : x - inplace: (x -> out) - backward : l1_norm_grad - -- op : label_smooth - args : (Tensor label, Tensor prior_dist, float epsilon = 0.0f) - output : Tensor (out) - infer_meta : - func : UnchangedInferMeta - param : [label] - kernel : - func : label_smooth - data_type : label - optional : prior_dist - backward : label_smooth_grad - -- op : lamb_ - args : (Tensor param, Tensor grad, Tensor learning_rate, Tensor moment1, Tensor moment2, Tensor beta1_pow, Tensor beta2_pow, Tensor master_param, Tensor skip_update, float weight_decay, float beta1=0.9, float beta2=0.999, float epsilon=1.0e-6f, bool always_adapt=false, bool multi_precision=false) - output : Tensor(param_out), Tensor(moment1_out), Tensor(moment2_out), Tensor(beta1_pow_out), Tensor(beta2_pow_out), Tensor(master_param_outs) - infer_meta : - func : LambInferMeta - kernel : - func : lamb {dense, dense, dense, dense, dense, dense, dense, dense, dense -> dense, dense, dense, dense, dense, dense}, - lamb_sr {dense, selected_rows, dense, dense, dense, dense, dense, dense, dense -> dense, dense, dense, dense, dense, dense} - data_type : param - optional : master_param, skip_update, beta1_pow_out, beta2_pow_out, master_param_outs - inplace : (param -> param_out), (moment1 -> moment1_out), (moment2 -> moment2_out), (beta1_pow -> beta1_pow_out), (beta2_pow -> beta2_pow_out), (master_param -> master_param_outs) - traits : pir::SideEffectTrait - -- op : layer_norm - args : (Tensor x, Tensor scale, Tensor bias, float epsilon = 1e-5, int begin_norm_axis = 1) - output : Tensor(out), Tensor(mean), Tensor(variance) - infer_meta : - func : LayerNormInferMeta - spmd_rule : LayerNormInferSpmd - kernel : - func : layer_norm - data_type : x - backward : layer_norm_grad - intermediate : mean, variance - optional : scale, bias - interfaces : paddle::dialect::InferSymbolicShapeInterface - -- op : leaky_relu - args : (Tensor x, float negative_slope = 0.02f) - output : Tensor(out) - infer_meta : - func : UnchangedInferMeta - param : [x] - kernel : - func : leaky_relu - inplace: (x -> out) - backward : leaky_relu_grad - interfaces : paddle::dialect::InferSymbolicShapeInterface - -- op : lerp - args : (Tensor x, Tensor y, Tensor weight) - output : Tensor(out) - infer_meta : - func : LerpInferMeta - kernel : - func : lerp - inplace : (x -> out) - backward : lerp_grad - interfaces : paddle::dialect::InferSymbolicShapeInterface - -- op : lgamma - args : (Tensor x) - output : Tensor(out) - infer_meta : - func : UnchangedInferMeta - kernel : - func : lgamma - inplace: (x -> out) - backward : lgamma_grad - interfaces : paddle::dialect::InferSymbolicShapeInterface - -- op : limit_by_capacity - args : (Tensor expert_count, Tensor capacity, int n_worker) - output : Tensor(out) - infer_meta : - func : LimitByCapacityInferMeta - kernel : - func : limit_by_capacity - data_type : expert_count - -- op : linear_interp - args : (Tensor x, Tensor out_size, Tensor[] size_tensor, Tensor scale_tensor, str data_format="NCHW", int out_d=0, int out_h=0, int out_w=0, float[] scale={}, str interp_method="bilinear", bool align_corners=true, int align_mode=1) - output : Tensor(output) - infer_meta : - func : InterpolateInferMeta - optional: out_size, size_tensor, scale_tensor - kernel : - func : linear_interp - data_type : x - backward : linear_interp_grad - data_transform : - skip_transform : out_size, size_tensor, scale_tensor - interfaces : paddle::dialect::InferSymbolicShapeInterface - -- op : linspace - args : (Tensor start, Tensor stop, Tensor number, DataType dtype, Place place) - output : Tensor(out) - infer_meta : - func : LinspaceInferMeta - param: [start, stop, number, dtype] - kernel : - func : linspace - param: [start, stop, number, dtype] - data_type : dtype - backend : place - interfaces : paddle::dialect::InferSymbolicShapeInterface - -- op : llm_int8_linear - args : (Tensor x, Tensor weight, Tensor bias, Tensor weight_scale, float threshold=6.0) - output : Tensor(out) - infer_meta : - func : LLMInt8LinearInferMeta - kernel : - func : llm_int8_linear - data_type : x - optional: bias - -- op : log - args : (Tensor x) - output : Tensor(out) - infer_meta : - func : UnchangedInferMeta - kernel : - func : log - inplace: (x -> out) - backward: log_grad - interfaces : paddle::dialect::InferSymbolicShapeInterface - -- op : log10 - args : (Tensor x) - output : Tensor(out) - infer_meta : - func : UnchangedInferMeta - kernel : - func : log10 - inplace: (x -> out) - backward: log10_grad - -- op : log1p - args : (Tensor x) - output : Tensor(out) - infer_meta : - func : UnchangedInferMeta - kernel : - func : log1p - inplace: (x -> out) - backward: log1p_grad - interfaces : paddle::dialect::InferSymbolicShapeInterface - -- op : log2 - args : (Tensor x) - output : Tensor(out) - infer_meta : - func : UnchangedInferMeta - kernel : - func : log2 - inplace: (x -> out) - backward: log2_grad - -- op : log_loss - args : (Tensor input, Tensor label, float epsilon) - output : Tensor - infer_meta : - func : LogLossInferMeta - kernel : - func : log_loss - backward : log_loss_grad - -- op : log_softmax - args : (Tensor x, int axis = -1) - output : Tensor(out) - infer_meta : - func : UnchangedInferMetaCheckAxis - kernel : - func : log_softmax - data_type : x - backward : log_softmax_grad - -- op : logcumsumexp - args : (Tensor x, int axis=-1, bool flatten=false, bool exclusive=false, bool reverse=false) - output : Tensor(out) - infer_meta : - func : CumInferMeta - kernel : - func : logcumsumexp - backward : logcumsumexp_grad - interfaces : paddle::dialect::InferSymbolicShapeInterface - -- op : logical_and - args : (Tensor x, Tensor y) - output : Tensor(out) - infer_meta : - func : LogicalBinaryInferMeta - kernel : - func : logical_and - data_type : x - backend : x - inplace: (x -> out) - interfaces : paddle::dialect::InferSymbolicShapeInterface - traits : paddle::dialect::ForwardOnlyTrait - -- op : logical_not - args : (Tensor x) - output : Tensor(out) - infer_meta : - func : LogicalNotInferMeta - kernel : - func : logical_not - data_type : x - backend : x - inplace: (x -> out) - interfaces : paddle::dialect::InferSymbolicShapeInterface - traits : paddle::dialect::ForwardOnlyTrait - -- op : logical_or - args : (Tensor x, Tensor y) - output : Tensor(out) - infer_meta : - func : LogicalBinaryInferMeta - kernel : - func : logical_or - data_type : x - backend : x - inplace: (x -> out) - interfaces : paddle::dialect::InferSymbolicShapeInterface - traits : paddle::dialect::ForwardOnlyTrait - -- op : logical_xor - args : (Tensor x, Tensor y) - output : Tensor(out) - infer_meta : - func : LogicalBinaryInferMeta - kernel : - func : logical_xor - data_type : x - backend : x - inplace: (x -> out) - interfaces : paddle::dialect::InferSymbolicShapeInterface - traits : paddle::dialect::ForwardOnlyTrait - -- op : logit - args : (Tensor x, float eps = 1e-6f) - output : Tensor(out) - infer_meta : - func : UnchangedInferMeta - param : [x] - kernel : - func : logit - inplace: (x -> out) - backward : logit_grad - interfaces : paddle::dialect::InferSymbolicShapeInterface - -- op : logsigmoid - args : (Tensor x) - output : Tensor - infer_meta : - func : UnchangedInferMeta - kernel : - func : logsigmoid - backward : logsigmoid_grad - interfaces : paddle::dialect::InferSymbolicShapeInterface - -- op : logspace - args : (Tensor start, Tensor stop, Tensor num, Tensor base, DataType dtype, Place place={}) - output : Tensor(out) - infer_meta: - func : LogspaceInferMeta - param : [start, stop, num, base, dtype] - kernel : - func : logspace - param : [start, stop, num, base, dtype] - data_type : dtype - backend : place - interfaces : paddle::dialect::InferSymbolicShapeInterface - -- op : logsumexp - args : (Tensor x, int[] axis={0}, bool keepdim=false, bool reduce_all=false) - output : Tensor(out) - infer_meta : - func : LogsumexpInferMeta - kernel : - func : logsumexp - backward : logsumexp_grad - interfaces : paddle::dialect::InferSymbolicShapeInterface - -- op : lookup_table_dequant - args: (Tensor w, Tensor ids, int64_t padding_idx = -1) - output: Tensor (out) - infer_meta: - func: LookupTableDequantInferMeta - kernel: - func: lookup_table_dequant - data_type: w - -- op : lp_pool2d - args : (Tensor x, IntArray kernel_size, int[] strides = {1,1}, int[] paddings = {0,0}, bool ceil_mode = false, bool exclusive = true, str data_format = "NCHW", str pooling_type = "", bool global_pooling = false, bool adaptive = false, str padding_algorithm = "EXPLICIT", float norm_type = 0.0f) - output : Tensor(out) - infer_meta : - func : Pool2DInferMeta - param : [x, kernel_size, strides, paddings, ceil_mode, exclusive, data_format, pooling_type, global_pooling, adaptive, padding_algorithm] - kernel : - func : lp_pool2d - param : [x, kernel_size, strides, paddings, ceil_mode, exclusive, data_format, pooling_type, global_pooling, adaptive, padding_algorithm, norm_type] - backward : lp_pool2d_grad - interfaces : paddle::dialect::InferSymbolicShapeInterface - -- op : lstm - args: (Tensor input, Tensor h0, Tensor c0, Tensor weight, Tensor bias, bool use_peepholes - = true, bool is_reverse = false, bool is_test = false, str gate_activation = "sigmoid", - str cell_activation = "tanh", str candidate_activation = "tanh") - output: Tensor (hidden), Tensor (cell), Tensor (batch_gate), Tensor (batch_cell_pre_act) - infer_meta: - func: LSTMInferMeta - kernel: - func: lstm - data_type: input - optional: h0, c0 - intermediate: batch_gate, batch_cell_pre_act - backward: lstm_grad - -- op : lstsq - args : (Tensor x, Tensor y, Scalar rcond=0.0f, str driver="gels") - output : Tensor(solution), Tensor(residuals), Tensor(rank), Tensor(singular_values) - infer_meta : - func : LstsqInferMeta - kernel : - func : lstsq - data_type : x - optional : residuals - -- op : lu - args : (Tensor x, bool pivot = true) - output : Tensor(out), Tensor(pivots), Tensor(infos) - infer_meta : - func : LUInferMeta - kernel : - func : lu - data_type : x - inplace : (x -> out) - backward : lu_grad - -- op : lu_unpack - args : (Tensor x, Tensor y, bool unpack_ludata = true, bool unpack_pivots = true) - output : Tensor(pmat), Tensor(l), Tensor(u) - infer_meta : - func : LUUnpackInferMeta - kernel : - func : lu_unpack - data_type : x - backward : lu_unpack_grad - -- op : margin_cross_entropy - args : (Tensor logits, Tensor label, bool return_softmax = false, int ring_id = 0, int rank = 0, int nranks = 1, float margin1 = 1.0f, float margin2 = 0.5f, float margin3 = 0.0f, float scale = 64.0f) - output : Tensor(softmax), Tensor(loss) - infer_meta : - func : MarginCrossEntropyInferMeta - kernel : - func : margin_cross_entropy - data_type : logits - backward : margin_cross_entropy_grad - -- op : masked_multihead_attention_ - args : (Tensor x, Tensor cache_kv, Tensor bias, Tensor src_mask, Tensor cum_offsets, Tensor sequence_lengths, Tensor rotary_tensor, Tensor beam_cache_offset, Tensor qkv_out_scale, Tensor out_shift, Tensor out_smooth, int seq_len, int rotary_emb_dims, bool use_neox_rotary_style=false, str compute_dtype = "default", float out_scale=-1, int quant_round_type=1, float quant_max_bound=127.0, float quant_min_bound=-127.0) - output : Tensor(out), Tensor(cache_kv_out), Tensor(beam_cache_offset_out) - infer_meta : - func : MaskedMultiheadAttentionInferMeta - kernel : - func : masked_multihead_attention - data_type : x - optional : bias, src_mask, cum_offsets, sequence_lengths, rotary_tensor, beam_cache_offset, qkv_out_scale, out_shift, out_smooth - inplace : (cache_kv -> cache_kv_out), (beam_cache_offset -> beam_cache_offset_out) - -- op : masked_select - args : (Tensor x, Tensor mask) - output : Tensor (out) - infer_meta : - func : MaskedSelectInferMeta - kernel : - func : masked_select - data_type : x - backward : masked_select_grad - interfaces : paddle::dialect::InferSymbolicShapeInterface - -- op : match_matrix_tensor - args: (Tensor x, Tensor y, Tensor w, int dim_t = 1) - output: Tensor (out), Tensor (tmp) - infer_meta: - func: MatchMatrixTensorInferMeta - kernel: - func: match_matrix_tensor - backward: match_matrix_tensor_grad - -- op : matrix_nms - args : (Tensor bboxes, Tensor scores, float score_threshold, int nms_top_k, int keep_top_k, float post_threshold=0., bool use_gaussian = false, float gaussian_sigma = 2., int background_label = 0, bool normalized = true) - output : Tensor(out), Tensor(index), Tensor(roisnum) - infer_meta : - func : MatrixNMSInferMeta - optional : roisnum - kernel : - func : matrix_nms - -- op : matrix_power - args : (Tensor x, int n) - output : Tensor - infer_meta : - func : MatrixPowerInferMeta - kernel : - func : matrix_power - backward : matrix_power_grad - -- op : matrix_rank - args : (Tensor x, float tol, bool use_default_tol=true, bool hermitian=false) - output : Tensor(out) - infer_meta : - func : MatrixRankInferMeta - param : [x, use_default_tol, hermitian] - kernel : - func : matrix_rank - -- op : matrix_rank_tol - args : (Tensor x, Tensor atol_tensor, bool use_default_tol=true, bool hermitian=false) - output : Tensor(out) - infer_meta : - func : MatrixRankTolInferMeta - kernel : - func : matrix_rank_tol - -- op : max - args : (Tensor x, IntArray axis={}, bool keepdim=false) - output : Tensor(out) - infer_meta : - func : ReduceIntArrayAxisInferMeta - spmd_rule: ReductionMaxInferSpmdDynamic - kernel : - func : max - backward : max_grad - interfaces : paddle::dialect::InferSymbolicShapeInterface - -- op : max_pool2d_with_index - args : (Tensor x, int[] kernel_size, int[] strides= {1, 1}, int[] paddings = {0, 0}, bool global_pooling = false, bool adaptive = false, bool ceil_mode = false) - output : Tensor(out), Tensor(mask) - infer_meta : - func : MaxPoolWithIndexInferMeta - kernel : - func : max_pool2d_with_index - backward : max_pool2d_with_index_grad - -- op : max_pool3d_with_index - args : (Tensor x, int[] kernel_size, int[] strides = {1, 1, 1}, int[] paddings = {0, 0, 0}, bool global_pooling = false, bool adaptive = false, bool ceil_mode = false) - output : Tensor(out), Tensor(mask) - infer_meta : - func : MaxPoolWithIndexInferMeta - kernel : - func : max_pool3d_with_index - backward : max_pool3d_with_index_grad - -- op : maxout - args : (Tensor x, int groups, int axis = 1) - output : Tensor(out) - infer_meta : - func : MaxOutInferMeta - kernel : - func : maxout - backward : maxout_grad - interfaces : paddle::dialect::InferSymbolicShapeInterface - -- op : mean - args : (Tensor x, IntArray axis={}, bool keepdim=false) - output : Tensor(out) - infer_meta : - func : ReduceIntArrayAxisInferMeta - spmd_rule : ReductionMeanInferSpmdDynamic - kernel : - func : mean - backward : mean_grad - -- op : mean_all - args : (Tensor x) - output : Tensor - infer_meta : - func : MeanAllInferMeta - kernel : - func : mean_all - backward : mean_all_grad - interfaces : paddle::dialect::InferSymbolicShapeInterface - -- op : memcpy_d2h - args : (Tensor x, int dst_place_type) - output : Tensor - infer_meta : - func : UnchangedInferMeta - param : [x] - kernel : - func : memcpy_d2h - -- op : memcpy_h2d - args : (Tensor x, int dst_place_type) - output : Tensor - infer_meta : - func : UnchangedInferMeta - param : [x] - kernel : - func : memcpy_h2d - -- op : memory_efficient_attention - args : (Tensor query, Tensor key, Tensor value, Tensor bias, Tensor cu_seqlens_q, Tensor cu_seqlens_k, Tensor causal_diagonal, Tensor seqlen_k, Scalar max_seqlen_q, Scalar max_seqlen_k, bool causal, double dropout_p, float scale, bool is_test) - output : Tensor(output), Tensor(logsumexp), Tensor(seed_and_offset) - infer_meta : - func : MemoryEfficientAttentionInferMeta - kernel : - func : memory_efficient_attention - data_type : query - optional : bias, cu_seqlens_q, cu_seqlens_k, causal_diagonal, seqlen_k - backward : memory_efficient_attention_grad - interfaces : paddle::dialect::InferSymbolicShapeInterface - -- op : merge_selected_rows - args : (Tensor x) - output : Tensor(out) - infer_meta : - func : UnchangedInferMeta - kernel : - func : merge_selected_rows {selected_rows -> selected_rows} - -- op : merged_adam_ - args : (Tensor[] param, Tensor[] grad, Tensor[] learning_rate, Tensor[] moment1, Tensor[] moment2, Tensor[] beta1_pow, Tensor[] beta2_pow, Tensor[] master_param, Scalar beta1 = 0.9f, Scalar beta2 = 0.999f, Scalar epsilon = 1.0e-8f, bool multi_precision = false, bool use_global_beta_pow = false) - output : Tensor[](param_out){param.size()}, Tensor[](moment1_out){param.size()}, Tensor[](moment2_out){param.size()}, Tensor[](beta1_pow_out){param.size()}, Tensor[](beta2_pow_out){param.size()}, Tensor[](master_param_out){param.size()} - infer_meta : - func : MergedAdamInferMeta - kernel : - func : merged_adam - data_type : param - optional: master_param, master_param_out - inplace : (param -> param_out), (moment1 -> moment1_out), (moment2 -> moment2_out), (beta1_pow -> beta1_pow_out), (beta2_pow -> beta2_pow_out), (master_param -> master_param_out) - traits : pir::SideEffectTrait - -- op : merged_momentum_ - args : (Tensor[] param, Tensor[] grad, Tensor[] velocity, Tensor[] learning_rate, Tensor[] master_param, float mu, bool use_nesterov = false, str[] regularization_method = {}, float[] regularization_coeff = {}, bool multi_precision = false, float rescale_grad = 1.0f) - output : Tensor[](param_out){param.size()}, Tensor[](velocity_out){param.size()}, Tensor[](master_param_out){param.size()} - infer_meta : - func : MergedMomentumInferMeta - kernel : - func : merged_momentum - data_type : param - optional: master_param, master_param_out - inplace : (param -> param_out), (velocity -> velocity_out), (master_param -> master_param_out) - traits : pir::SideEffectTrait - -- op : meshgrid - args : (Tensor[] inputs) - output : Tensor[]{inputs.size()} - infer_meta : - func : MeshgridInferMeta - kernel : - func : meshgrid - data_type : inputs - backward : meshgrid_grad - interfaces : paddle::dialect::InferSymbolicShapeInterface - -- op : mish - args : (Tensor x, float lambda) - output : Tensor - infer_meta : - func : UnchangedInferMeta - param : [x] - kernel : - func : mish - backward : mish_grad - -- op : mode - args : (Tensor x, int axis = -1, bool keepdim = false) - output : Tensor(out), Tensor(indices) - infer_meta : - func : ModeInferMeta - kernel : - func : mode - backward : mode_grad - -- op : momentum_ - args : (Tensor param, Tensor grad, Tensor velocity, Tensor learning_rate, Tensor master_param, float mu, bool use_nesterov = false, str regularization_method = "", float regularization_coeff = 0.0f, bool multi_precision = false, float rescale_grad = 1.0f) - output : Tensor(param_out), Tensor(velocity_out), Tensor(master_param_out) - infer_meta: - func : MomentumInferMeta - kernel : - func : momentum {dense, dense, dense, dense, dense -> dense, dense, dense}, - momentum_dense_param_sparse_grad {dense, selected_rows, dense, dense, dense -> dense, dense, dense} - data_type : param - optional : master_param, master_param_out - inplace : (param -> param_out), (velocity -> velocity_out), (master_param -> master_param_out) - traits : pir::SideEffectTrait - -- op : multi_dot - args : (Tensor[] x) - output : Tensor - infer_meta : - func : MultiDotInferMeta - kernel : - func : multi_dot - backward : multi_dot_grad - -- op : multiclass_nms3 - args : (Tensor bboxes, Tensor scores, Tensor rois_num, float score_threshold, int nms_top_k, int keep_top_k, float nms_threshold=0.3, bool normalized=true, float nms_eta=1.0, int background_label=0) - output : Tensor(out), Tensor(index), Tensor(nms_rois_num) - infer_meta : - func : MultiClassNMSInferMeta - kernel : - func : multiclass_nms3 - data_type : scores - optional : rois_num, nms_rois_num - -- op : multinomial - args : (Tensor x, Scalar(int) num_samples = 1, bool replacement = false) - output : Tensor(out) - infer_meta : - func : MultinomialInferMeta - kernel : - func : multinomial - data_type : x - traits : paddle::dialect::ForwardOnlyTrait - -- op : multiplex - args : (Tensor[] inputs, Tensor index) - output : Tensor - infer_meta : - func : MultiplexInferMeta - kernel : - func : multiplex - data_type : inputs - backward : multiplex_grad - data_transform : - skip_transform : index - -- op : mv - args : (Tensor x, Tensor vec) - output : Tensor - infer_meta : - func : MvInferMeta - kernel : - func : mv - backward : mv_grad - -- op : nadam_ - args : (Tensor param, Tensor grad, Tensor learning_rate, Tensor momentum_decay_pow, Tensor beta2_pow, Tensor mu_product, Tensor moment1, Tensor moment2, Tensor master_param, float beta1 = 0.9f, float beta2 = 0.999f, float epsilon = 1.0e-8f, float momentum_decay = 0.004f, bool multi_precision = false) - output : Tensor(param_out), Tensor(momentum_decay_pow_out), Tensor(beta2_pow_out), Tensor(mu_product_out), Tensor(moment1_out), Tensor(moment2_out), Tensor(master_param_out) - infer_meta : - func : NAdamInferMeta - kernel : - func : nadam - data_type : param - optional : master_param, master_param_out - inplace : (param -> param_out), (momentum_decay_pow -> momentum_decay_pow_out), (beta2_pow -> beta2_pow_out), (mu_product -> mu_product_out), (moment1 -> moment1_out), (moment2 -> moment2_out), (master_param->master_param_out) - traits : pir::SideEffectTrait - -- op : nanmedian - args : (Tensor x, IntArray axis = {}, bool keepdim = true, str mode="avg") - output : Tensor(out), Tensor(medians) - infer_meta : - func : NanmedianInferMeta - kernel : - func : nanmedian - backward : nanmedian_grad - -- op : nearest_interp - args : (Tensor x, Tensor out_size, Tensor[] size_tensor, Tensor scale_tensor, str data_format="NCHW", int out_d=0, int out_h=0, int out_w=0, float[] scale={}, str interp_method="bilinear", bool align_corners=true, int align_mode=1) - output : Tensor(output) - infer_meta : - func : InterpolateInferMeta - optional: out_size, size_tensor, scale_tensor - kernel : - func : nearest_interp - data_type : x - backward : nearest_interp_grad - data_transform : - skip_transform : out_size, size_tensor, scale_tensor - interfaces : paddle::dialect::InferSymbolicShapeInterface - -- op : nextafter - args : (Tensor x, Tensor y) - output : Tensor(out) - infer_meta : - func : ElementwiseInferMeta - param: [x, y] - kernel : - func : nextafter - data_type : x - traits : paddle::dialect::ForwardOnlyTrait - -- op : nll_loss - args : (Tensor input, Tensor label, Tensor weight, int64_t ignore_index = -100, str reduction = "mean") - output : Tensor(out), Tensor(total_weight) - infer_meta : - func : NllLossRawInferMeta - kernel : - func : nll_loss - data_type : input - optional : weight - backward : nll_loss_grad - -- op : nms - args : (Tensor x, float threshold = 1.0f) - output : Tensor(out) - infer_meta : - func : NMSInferMeta - kernel : - func : nms - data_type : x - -- op : nonzero - args : (Tensor condition) - output : Tensor(out) - infer_meta : - func : NonZeroInferMeta - kernel : - func : nonzero - data_type: condition - interfaces : paddle::dialect::InferSymbolicShapeInterface - traits : paddle::dialect::ForwardOnlyTrait - -- op : norm - args : (Tensor x, int axis, float epsilon, bool is_test) - output : Tensor(out), Tensor(norm) - infer_meta : - func : NormInferMeta - kernel : - func : norm - backward : norm_grad - -- op : npu_identity - args : (Tensor x, int format = -1) - output : Tensor - infer_meta : - func : UnchangedInferMeta - param : [x] - kernel : - func : npu_identity - -- op : numel - args : (Tensor x) - output : Tensor(size) - infer_meta : - func : NumelInferMeta - spmd_rule : NumelInferSpmd - kernel : - func : numel - data_type : x - data_transform: - skip_transform : x - no_need_buffer : x - interfaces : paddle::dialect::InferSymbolicShapeInterface - -- op : one_hot - args : (Tensor x, Scalar(int) num_classes) - output : Tensor(out) - infer_meta : - func : OneHotInferMeta - kernel : - func : one_hot - traits : paddle::dialect::ForwardOnlyTrait - -- op : ones - args : (IntArray shape, DataType dtype=DataType::FLOAT32, Place place=CPUPlace()) - output : Tensor(out) - invoke : full(shape, 1, dtype, place) - -- op : ones_like - args : (Tensor x, DataType dtype=DataType::UNDEFINED, Place place={}) - output : Tensor(out) - invoke : full_like(x, 1, dtype, place) - -- op : overlap_add - args: (Tensor x, int hop_length, int axis=-1) - output: Tensor - infer_meta: - func: OverlapAddInferMeta - kernel: - func: overlap_add - data_type : x - backward: overlap_add_grad - -- op : p_norm - args : (Tensor x, float porder=2, int axis=-1, float epsilon=1.0e-12f, bool keepdim=false, bool asvector=false) - output : Tensor(out) - infer_meta : - func : PNormInferMeta - kernel : - func : p_norm - backward : p_norm_grad - -- op : pad - args : (Tensor x, int[] paddings, Scalar pad_value) - output : Tensor - infer_meta : - func : PadInferMeta - kernel : - func : pad - backward : pad_grad - interfaces : paddle::dialect::InferSymbolicShapeInterface - -- op : pad3d - args : (Tensor x, IntArray paddings, str mode = "constant", float pad_value = 0.0, str data_format = "NCDHW") - output : Tensor(out) - infer_meta : - func : Pad3dInferMeta - kernel : - func : pad3d - backward : pad3d_grad - interfaces : paddle::dialect::InferSymbolicShapeInterface - -- op : partial_concat - args : (Tensor[] x, int start_index = 0, int length = -1) - output : Tensor(out) - infer_meta : - func : PartialConcatInferMeta - kernel : - func : partial_concat - data_type : x - backward : partial_concat_grad - -- op : partial_sum - args : (Tensor[] x, int start_index = 0, int length = -1) - output : Tensor(out) - infer_meta : - func : PartialSumInferMeta - kernel : - func : partial_sum - data_type : x - backward : partial_sum_grad - -- op : pixel_shuffle - args : (Tensor x, int upscale_factor=1, str data_format="NCHW") - output : Tensor - infer_meta : - func : PixelShuffleInferMeta - kernel : - func : pixel_shuffle - backward : pixel_shuffle_grad - -- op : pixel_unshuffle - args : (Tensor x, int downscale_factor=1, str data_format="NCHW") - output : Tensor - infer_meta : - func : PixelUnshuffleInferMeta - kernel : - func : pixel_unshuffle - backward : pixel_unshuffle_grad - -- op : poisson - args : (Tensor x) - output : Tensor - infer_meta : - func : UnchangedInferMeta - kernel : - func : poisson - backward : poisson_grad - interfaces : paddle::dialect::InferSymbolicShapeInterface - -- op : polygamma - args : (Tensor x, int n) - output : Tensor(out) - infer_meta : - func : UnchangedInferMeta - param: [x] - kernel : - func : polygamma - inplace: (x -> out) - backward : polygamma_grad - interfaces : paddle::dialect::InferSymbolicShapeInterface - -- op : pool2d - args : (Tensor x, IntArray kernel_size, int[] strides, int[] paddings, bool ceil_mode, bool exclusive, str data_format, str pooling_type, bool global_pooling, bool adaptive, str padding_algorithm) - output : Tensor(out) - infer_meta : - func : Pool2DInferMeta - param : [x, kernel_size, strides, paddings, ceil_mode, exclusive, data_format, pooling_type, global_pooling, adaptive, padding_algorithm] - kernel : - func : pool2d - param : [x, kernel_size, strides, paddings, ceil_mode, exclusive, data_format, pooling_type, global_pooling, adaptive, padding_algorithm] - backward : pool2d_grad - interfaces : paddle::dialect::LayoutTransformationInterface, paddle::dialect::InferSymbolicShapeInterface - -- op : pool3d - args : (Tensor x, int[] kernel_size, int[] strides, int[] paddings, bool ceil_mode, bool exclusive, str data_format, str pooling_type, bool global_pooling, bool adaptive, str padding_algorithm) - output : Tensor(out) - infer_meta : - func : PoolInferMeta - param : [x, kernel_size, strides, paddings, ceil_mode, exclusive, data_format, pooling_type, global_pooling, adaptive, padding_algorithm] - kernel : - func : pool3d - param : [x, kernel_size, strides, paddings, ceil_mode, exclusive, data_format, pooling_type, global_pooling, adaptive, padding_algorithm] - backward : pool3d_grad - -- op : pow - args : (Tensor x, Scalar y=1.0f) - output : Tensor(out) - infer_meta : - func : UnchangedInferMeta - param: [x] - spmd_rule: PowInferSpmd - kernel : - func : pow - data_type : x - inplace: (x -> out) - backward : pow_grad - interfaces : paddle::dialect::InferSymbolicShapeInterface - -- op : prelu - args : (Tensor x, Tensor alpha, str data_format="NCHW", str mode="all") - output : Tensor(out) - infer_meta : - func : PReluInferMeta - kernel : - func : prelu - data_type : x - backward : prelu_grad - interfaces : paddle::dialect::InferSymbolicShapeInterface - -- op : prior_box - args : (Tensor input, Tensor image, float[] min_sizes, float[] max_sizes = {}, float[] aspect_ratios = {}, float[] variances = {}, bool flip=true, bool clip=true, float step_w=0.0, float step_h=0.0, float offset=0.5, bool min_max_aspect_ratios_order=false) - output : Tensor(out), Tensor(var) - infer_meta : - func : PriorBoxInferMeta - kernel : - func : prior_box - data_type : input - traits : paddle::dialect::ForwardOnlyTrait - -- op : prod - args : (Tensor x, IntArray axis, bool keepdim, bool reduce_all) - output : Tensor - infer_meta : - func : ReduceIntArrayAxisInferMetaBase - kernel : - func : prod - backward : prod_grad - interfaces : paddle::dialect::InferSymbolicShapeInterface - -- op : prune_gate_by_capacity - args : (Tensor gate_idx, Tensor expert_count, int64_t n_expert=0, int64_t n_worker=0) - output : Tensor(out_gate_idx) - infer_meta : - func : PruneGateByCapacityInferMeta - kernel : - func : prune_gate_by_capacity - data_type : gate_idx - -- op : psroi_pool - args : (Tensor x, Tensor boxes, Tensor boxes_num, int pooled_height=1, int pooled_width=1, int output_channels=1, float spatial_scale=1.0) - output : Tensor - infer_meta : - func : PsroiPoolInferMeta - kernel : - func : psroi_pool - data_type : x - optional : boxes_num - backward : psroi_pool_grad - -- op : put_along_axis - args : (Tensor arr, Tensor indices, Tensor values, int axis, str reduce = "assign", bool include_self = true) - output : Tensor(out) - infer_meta : - func : UnchangedInferMeta - param : [arr] - kernel : - func : put_along_axis - data_type : arr - inplace : (arr -> out) - backward : put_along_axis_grad - interfaces : paddle::dialect::InferSymbolicShapeInterface - -- op : pyramid_hash - args: (Tensor x, Tensor w, Tensor white_list, Tensor black_list, int num_emb = 0, - int space_len = 0, int pyramid_layer = 2, int rand_len = 0, float drop_out_percent - = 0, int is_training = 0, bool use_filter = true, int white_list_len = 0, int - black_list_len = 0, int seed = 0, float lr = 0.0, str distribute_update_vars = - "") - output: Tensor (out), Tensor (drop_pos), Tensor (x_temp_out) - infer_meta: - func: PyramidHashInferMeta - kernel: - func: pyramid_hash - data_type: w - intermediate: x_temp_out - backward: pyramid_hash_grad - -- op : qr - args : (Tensor x, str mode = "reduced") - output : Tensor(q), Tensor(r) - infer_meta : - func : QrInferMeta - kernel : - func : qr - backward : qr_grad - -- op : radam_ - args : (Tensor param, Tensor grad, Tensor learning_rate, Tensor beta1_pow, Tensor beta2_pow, Tensor rho, Tensor moment1, Tensor moment2, Tensor master_param, float beta1 = 0.9f, float beta2 = 0.999f, float epsilon = 1.0e-8f, bool multi_precision = false) - output : Tensor(param_out), Tensor(beta1_pow_out), Tensor(beta2_pow_out), Tensor(rho_out), Tensor(moment1_out), Tensor(moment2_out), Tensor(master_param_out) - infer_meta : - func : RAdamInferMeta - kernel : - func : radam - data_type : param - optional : master_param, master_param_out - inplace : (param -> param_out), (beta1_pow -> beta1_pow_out), (beta2_pow -> beta2_pow_out), (rho -> rho_out), (moment1 -> moment1_out), (moment2 -> moment2_out), (master_param->master_param_out) - traits : pir::SideEffectTrait - -- op : randint - args : (int low, int high, IntArray shape, DataType dtype=DataType::INT64, Place place={}) - output : Tensor(out) - infer_meta : - func : RandintInferMeta - param : [low, high, shape, dtype] - kernel : - func : randint - param : [low, high, shape, dtype] - data_type : dtype - backend : place - interfaces : paddle::dialect::InferSymbolicShapeInterface - traits : pir::SideEffectTrait, paddle::dialect::ForwardOnlyTrait - -- op : random_routing - args : (Tensor prob, Tensor topk_value, Tensor topk_idx) - output : Tensor(out) - infer_meta : - func : RandomRoutingInferMeta - kernel : - func : random_routing - data_type : prob - inplace : (topk_idx -> out) - traits : pir::SideEffectTrait - -- op : randperm - args : (int n, DataType dtype, Place place={}) - output : Tensor(out) - infer_meta : - func : RandpermInferMeta - param : [n, dtype] - kernel : - func : randperm - param : [n, dtype] - data_type : dtype - backend : place - traits : pir::SideEffectTrait - -- op : rank_attention - args : (Tensor x, Tensor rank_offset, Tensor rank_param, int max_rank = 3, int max_size = 0) - output : Tensor(input_help), Tensor(out), Tensor(ins_rank) - infer_meta : - func : RankAttentionInferMeta - kernel : - func : rank_attention - data_type : x - backward : rank_attention_grad - optional : ins_rank, input_help - -- op : read_file - args : (str filename = "", DataType dtype=DataType::UINT8, Place place=CPUPlace()) - output : Tensor(out) - infer_meta : - func : ReadFileInferMeta - param : [filename] - kernel : - func : read_file - param : [filename] - data_type : dtype - backend : place - -- op : real - args : (Tensor x) - output : Tensor (out) - infer_meta : - func : RealAndImagInferMeta - kernel : - func : real - backward : real_grad - interfaces : paddle::dialect::InferSymbolicShapeInterface - -- op : reciprocal - args : (Tensor x) - output : Tensor(out) - infer_meta : - func : UnchangedInferMeta - kernel : - func : reciprocal - inplace : (x -> out) - backward : reciprocal_grad - interfaces : paddle::dialect::InferSymbolicShapeInterface - -- op : reduce_as - args : (Tensor x, Tensor target) - output : Tensor(out) - infer_meta : - func : ReduceAsInferMeta - kernel : - func : reduce_as - data_type : x - backward : reduce_as_grad - interfaces : paddle::dialect::InferSymbolicShapeInterface - -- op : reduce_scatter - args : (Tensor x, int ring_id = 0, int nranks = 1) - output : Tensor(out) - infer_meta : - func : ReduceScatterInferMeta - param: [x, nranks] - kernel : - func : reduce_scatter - param: [x, nranks] - -- op : reindex_graph - args : (Tensor x, Tensor neighbors, Tensor count, Tensor hashtable_value, Tensor hashtable_index) - output : Tensor(reindex_src), Tensor(reindex_dst), Tensor(out_nodes) - infer_meta : - func : GraphReindexInferMeta - kernel : - func : graph_reindex - data_type : x - optional : hashtable_value, hashtable_index - -- op : relu - args : (Tensor x) - output : Tensor(out) - infer_meta : - func : UnchangedInferMeta - spmd_rule : ElementwiseUnaryInferSpmd - kernel : - func : relu - inplace : (x -> out) - backward : relu_grad - interfaces : paddle::dialect::InferSymbolicShapeInterface - -- op : relu6 - args : (Tensor x) - output : Tensor - infer_meta : - func : UnchangedInferMeta - param : [x] - kernel : - func : relu6 - backward : relu6_grad - interfaces : paddle::dialect::InferSymbolicShapeInterface - -- op : renorm - args : (Tensor x, float p, int axis, float max_norm) - output : Tensor(out) - infer_meta : - func : UnchangedInferMeta - param : [x] - kernel : - func : renorm - inplace: (x -> out) - backward : renorm_grad - -- op : repeat_interleave - args : (Tensor x, int repeats, int axis) - output : Tensor(out) - infer_meta : - func : RepeatInterleaveInferMeta - kernel : - func : repeat_interleave - data_type : x - backward: repeat_interleave_grad - interfaces : paddle::dialect::InferSymbolicShapeInterface - -- op : repeat_interleave_with_tensor_index - args : (Tensor x, Tensor repeats, int axis) - output : Tensor(out) - infer_meta : - func : RepeatInterleaveWithTensorIndexInferMeta - kernel : - func : repeat_interleave_with_tensor_index - data_type : x - backward: repeat_interleave_with_tensor_index_grad - -- op : reverse - args : (Tensor x, IntArray axis) - output : Tensor - infer_meta : - func : ReverseInferMeta - kernel : - func : reverse - data_type : x - backward : reverse_grad - interfaces : paddle::dialect::InferSymbolicShapeInterface - -- op : rms_norm - args : (Tensor x, Tensor bias, Tensor residual, Tensor norm_weight, Tensor norm_bias, float epsilon, int begin_norm_axis, float quant_scale, int quant_round_type, float quant_max_bound, float quant_min_bound) - output : Tensor(out), Tensor(residual_out), Tensor(inv_var) - infer_meta : - func : RmsNormInferMeta - kernel : - func : rms_norm - data_type : x - optional : bias, residual, norm_bias, residual_out - intermediate : inv_var - backward : rms_norm_grad - -- op : rmsprop_ - args : (Tensor param, Tensor mean_square, Tensor grad, Tensor moment, Tensor learning_rate, Tensor mean_grad, Tensor master_param, float epsilon = 1.0e-10f, float decay = 0.9f, float momentum = 0.0f, bool centered = false, bool multi_precision = false) - output : Tensor(param_out), Tensor(moment_out), Tensor(mean_square_out), Tensor(mean_grad_out), Tensor(master_param_outs) - infer_meta : - func : RmspropInferMeta - kernel : - func : rmsprop {dense, dense, dense, dense, dense, dense, dense-> dense, dense, dense, dense, dense} - rmsprop_dense_param_sparse_grad {dense, dense, selected_rows, dense, dense, dense, dense-> dense, dense, dense, dense, dense} - data_type : param - optional : mean_grad, master_param, master_param_outs - inplace : (param -> param_out), (moment -> moment_out), (mean_square -> mean_square_out), (mean_grad -> mean_grad_out), (master_param->master_param_outs) - traits : pir::SideEffectTrait - -- op : rnn - args: (Tensor x, Tensor[] pre_state, Tensor[] weight_list, Tensor sequence_length, Tensor dropout_state_in, float dropout_prob=0.0, bool is_bidirec=false, int input_size=10, int hidden_size=100, int num_layers=1, str mode="RNN_TANH", int seed=0, bool is_test=false) - output: Tensor(out), Tensor(dropout_state_out), Tensor[](state){pre_state.size()}, Tensor(reserve) - infer_meta: - func: RnnInferMeta - param : [x, pre_state, weight_list, sequence_length, dropout_prob, is_bidirec, input_size, hidden_size, num_layers, mode, seed, is_test] - kernel: - func: rnn - param : [x, pre_state, weight_list, sequence_length, dropout_prob, is_bidirec, input_size, hidden_size, num_layers, mode, seed, is_test] - data_type: x - backward: rnn_grad - optional : sequence_length - intermediate : reserve - view : (dropout_state_in -> dropout_state_out) - -- op : roi_align - args : (Tensor x, Tensor boxes, Tensor boxes_num, int pooled_height=1, int pooled_width=1, float spatial_scale=1.0, int sampling_ratio=-1, bool aligned=false) - output : Tensor - infer_meta : - func : RoiAlignInferMeta - kernel : - func : roi_align - data_type : x - optional : boxes_num - backward : roi_align_grad - interfaces : paddle::dialect::InferSymbolicShapeInterface - -- op : roi_pool - args : (Tensor x, Tensor boxes, Tensor boxes_num, int pooled_height=1, int pooled_width=1, float spatial_scale=1.0) - output : Tensor(out), Tensor(arg_max) - infer_meta : - func : RoiPoolInferMeta - kernel : - func : roi_pool - data_type : x - optional : boxes_num - intermediate : arg_max - backward : roi_pool_grad - -- op : roll - args : (Tensor x, IntArray shifts={}, int64_t[] axis={}) - output : Tensor(out) - infer_meta : - func : RollInferMeta - kernel : - func : roll - data_type : x - backward : roll_grad - interfaces : paddle::dialect::InferSymbolicShapeInterface - -- op : round - args : (Tensor x, int decimals = 0 ) - output : Tensor(out) - infer_meta : - func : UnchangedInferMeta - param : [x] - kernel : - func : round - inplace : (x -> out) - backward : round_grad - interfaces : paddle::dialect::InferSymbolicShapeInterface - -- op : rprop_ - args : (Tensor param, Tensor grad, Tensor prev, Tensor learning_rate, Tensor master_param, Tensor learning_rate_range, Tensor etas, bool multi_precision=false) - output : Tensor(param_out), Tensor(prev_out), Tensor(learning_rate_out), Tensor(master_param_out) - infer_meta : - func : RpropInferMeta - kernel : - func : rprop - data_type : param - data_transform : - support_trans_dtype : learning_rate - optional : master_param, master_param_out - inplace : (param -> param_out), (prev -> prev_out), (learning_rate -> learning_rate_out), (master_param -> master_param_out) - traits : pir::SideEffectTrait - -- op : rrelu - args : (Tensor x, float lower=1.0f/8, float upper=1.0f/3, bool is_test=false) - output : Tensor(out), Tensor(noise) - infer_meta : - func : RReluInferMeta - kernel : - func : rrelu - data_type : x - intermediate : noise - backward : rrelu_grad - -- op : rsqrt - args : (Tensor x) - output : Tensor(out) - infer_meta : - func : UnchangedInferMeta - spmd_rule : ElementwiseUnaryInferSpmd - kernel : - func : rsqrt - inplace : (x -> out) - backward : rsqrt_grad - interfaces : paddle::dialect::InferSymbolicShapeInterface - -- op : scale - args : (Tensor x, Scalar scale=1.0, Scalar bias=0.0, bool bias_after_scale=true) - output : Tensor(out) - infer_meta : - func : UnchangedInferMeta - param : [x] - spmd_rule : ScaleInferSpmd - kernel : - func : scale {dense -> dense}, - scale_sr {selected_rows -> selected_rows} - data_type : x - inplace : (x -> out) - backward : scale_grad - interfaces : paddle::dialect::InferSymbolicShapeInterface - -- op : scatter - args : (Tensor x, Tensor index, Tensor updates, bool overwrite=true) - output : Tensor(out) - infer_meta : - func : ScatterInferMeta - kernel : - func : scatter - data_type : x - inplace : (x -> out) - backward : scatter_grad - interfaces : paddle::dialect::InferSymbolicShapeInterface - -- op : scatter_nd_add - args : (Tensor x, Tensor index, Tensor updates) - output : Tensor - infer_meta : - func : ScatterNdAddInferMeta - kernel : - func : scatter_nd_add - data_type : x - backward : scatter_nd_add_grad - interfaces : paddle::dialect::InferSymbolicShapeInterface - -- op : searchsorted - args : (Tensor sorted_sequence, Tensor values, bool out_int32 = false, bool right = false) - output : Tensor(out) - infer_meta : - func : SearchsortedInferMeta - kernel : - func : searchsorted - data_type : sorted_sequence - interfaces : paddle::dialect::InferSymbolicShapeInterface - -- op : segment_pool - args : (Tensor x, Tensor segment_ids, str pooltype="SUM") - output : Tensor(out), Tensor(summed_ids) - infer_meta : - func : SegmentPoolInferMeta - kernel : - func : segment_pool - data_type : x - intermediate : summed_ids - backward : segment_pool_grad - -- op : selu - args : (Tensor x, float scale=1.0507009873554804934193349852946, float alpha=1.6732632423543772848170429916717) - output : Tensor - infer_meta : - func : UnchangedInferMeta - param : [x] - kernel : - func : selu - backward : selu_grad - -- op : send_u_recv - args : (Tensor x, Tensor src_index, Tensor dst_index, str reduce_op = "SUM", IntArray out_size = {0}) - output : Tensor(out), Tensor(dst_count) - infer_meta : - func : SendURecvInferMeta - kernel : - func : send_u_recv - data_type : x - intermediate : dst_count - backward : send_u_recv_grad - -- op : send_ue_recv - args : (Tensor x, Tensor y, Tensor src_index, Tensor dst_index, str message_op="ADD", str reduce_op="SUM", IntArray out_size={0}) - output : Tensor(out), Tensor(dst_count) - infer_meta : - func : SendUERecvInferMeta - kernel : - func : send_ue_recv - data_type : x - intermediate : dst_count - backward : send_ue_recv_grad - -- op : send_uv - args : (Tensor x, Tensor y, Tensor src_index, Tensor dst_index, str message_op = "ADD") - output : Tensor(out) - infer_meta : - func : SendUVInferMeta - kernel : - func : send_uv - data_type : x - backward : send_uv_grad - -- op : sequence_conv - args: (Tensor x, Tensor padding_data, Tensor filter, int context_length, bool padding_trainable = false, - int context_start = 0, int context_stride = 1) - output: Tensor (out) - infer_meta: - func: SequenceConvInferMeta - kernel: - func: sequence_conv - data_type: x - optional: padding_data - backward: sequence_conv_grad - -- op : sequence_mask - args: (Tensor x, Scalar(int) max_len, DataType out_dtype) - output: Tensor(y) - infer_meta: - func: SequenceMaskScalarInferMeta - kernel: - func: sequence_mask_scalar - data_type : x - -- op : sequence_pool - args: (Tensor x, bool is_test=false, str pooltype = "AVERAGE", float pad_value = 0.0) - output: Tensor (out), Tensor (max_index) - infer_meta: - func: SequencePoolInferMeta - kernel: - func: sequence_pool - intermediate: max_index - backward: sequence_pool_grad - -- op : set_value_with_tensor - args : (Tensor x, Tensor values, IntArray starts, IntArray ends, IntArray steps, int64_t[] axes, int64_t[] decrease_axes, int64_t[] none_axes) - output : Tensor(out) - inplace: (x -> out) - infer_meta: - func: SetValueInferMeta - param: [x] - kernel: - func: set_value_with_tensor - backward: set_value_with_tensor_grad - -- op : sgd_ - args : (Tensor param, Tensor learning_rate, Tensor grad, Tensor master_param, bool multi_precision=false) - output : Tensor(param_out), Tensor(master_param_out) - infer_meta : - func : SgdInferMeta - spmd_rule : SgdInferSpmd - kernel : - func : sgd {dense, dense, dense, dense -> dense, dense}, - sgd_dense_param_sparse_grad {dense, dense, selected_rows, dense -> dense, dense}, - sgd_sparse_param_sparse_grad {selected_rows, dense, selected_rows, selected_rows -> selected_rows, selected_rows} - data_type : param - data_transform : - support_trans_dtype : learning_rate - optional : master_param, master_param_out - inplace : (param -> param_out), (master_param -> master_param_out) - traits : pir::SideEffectTrait - -- op : shape - args : (Tensor input) - output : Tensor(out) - infer_meta : - func : ShapeInferMeta - kernel : - func : shape {dense -> dense}, - shape_sr {selected_rows -> dense} - data_transform : - skip_transform : input - interfaces : paddle::dialect::InferSymbolicShapeInterface - traits : paddle::dialect::ForwardOnlyTrait - -- op : shard_index - args : (Tensor input, int index_num, int nshards, int shard_id, int ignore_value=-1) - output : Tensor(out) - infer_meta : - func : ShardIndexInferMeta - kernel : - func : shard_index - -- op : share_data - args: (Tensor x) - output: Tensor (out) - infer_meta: - func: ShareDataInferMeta - kernel: - func: share_data {dense -> dense} - share_data_sr {selected_rows -> selected_rows} - -- op : shuffle_batch - args : (Tensor x, Tensor seed, int startup_seed=0) - output : Tensor(out), Tensor(shuffle_idx), Tensor(seed_out) - infer_meta: - func: ShuffleBatchInferMeta - kernel: - func: shuffle_batch - data_type: x - backward : shuffle_batch_grad - traits : pir::SideEffectTrait - data_transform : - skip_transform : seed - -- op : shuffle_channel - args : (Tensor x, int group = 1) - output : Tensor(out) - infer_meta : - func : ShuffleChannelInferMeta - kernel : - func : shuffle_channel - backward : shuffle_channel_grad - -- op : sigmoid - args : (Tensor x) - output : Tensor - infer_meta : - func : UnchangedInferMeta - kernel : - func : sigmoid - inplace : (x -> out) - backward : sigmoid_grad - interfaces : paddle::dialect::InferSymbolicShapeInterface - -- op : sigmoid_cross_entropy_with_logits - args : (Tensor x, Tensor label, Tensor pos_weight, bool normalize=false, int ignore_index=-100) - output : Tensor - infer_meta : - func : SigmoidCrossEntropyWithLogitsInferMeta - kernel : - func : sigmoid_cross_entropy_with_logits - inplace : (x -> out) - backward : sigmoid_cross_entropy_with_logits_grad - optional : pos_weight - -- op : sign - args : (Tensor x) - output : Tensor(out) - infer_meta : - func : UnchangedInferMeta - kernel : - func : sign - backward : sign_grad - interfaces : paddle::dialect::InferSymbolicShapeInterface - -- op : silu - args : (Tensor x) - output : Tensor - infer_meta : - func : UnchangedInferMeta - spmd_rule : ElementwiseUnaryInferSpmd - kernel : - func : silu - backward : silu_grad - interfaces : paddle::dialect::LayoutTransformationInterface - -- op : sin - args : (Tensor x) - output : Tensor(out) - infer_meta : - func : UnchangedInferMeta - spmd_rule : ElementwiseUnaryInferSpmd - kernel : - func : sin - inplace : (x -> out) - backward : sin_grad - interfaces : paddle::dialect::InferSymbolicShapeInterface - -- op : sinh - args : (Tensor x) - output : Tensor(out) - infer_meta : - func : UnchangedInferMeta - kernel : - func : sinh - inplace: (x -> out) - backward : sinh_grad - interfaces : paddle::dialect::InferSymbolicShapeInterface - -- op : slice - args : (Tensor input, int64_t[] axes, IntArray starts, IntArray ends, int64_t[] infer_flags, int64_t[] decrease_axis) - output : Tensor - infer_meta : - func : SliceRawInferMeta - spmd_rule : SliceInferSpmdDynamic - kernel : - func : slice - backward : slice_grad - interfaces : paddle::dialect::InferSymbolicShapeInterface - -- op : slogdet - args : (Tensor x) - output : Tensor - infer_meta : - func : UnchangedInferMeta - kernel : - func : slogdet - backward : slogdet_grad - -- op : softplus - args : (Tensor x, float beta = 1.0, float threshold = 20.0f) - output : Tensor - infer_meta : - func : UnchangedInferMeta - param : [x] - kernel : - func : softplus - backward : softplus_grad - -- op : softshrink - args : (Tensor x, float threshold = 0.5) - output : Tensor - infer_meta : - func : UnchangedInferMeta - param : [x] - kernel : - func : softshrink - backward : softshrink_grad - -- op : softsign - args : (Tensor x) - output : Tensor - infer_meta : - func : UnchangedInferMeta - param : [x] - kernel : - func : softsign - backward : softsign_grad - -- op : solve - args : (Tensor x, Tensor y) - output : Tensor - infer_meta : - func : SolveInferMeta - kernel : - func : solve - data_type : x - backward : solve_grad - -- op : sparse_attention - args: (Tensor q, Tensor k, Tensor v, Tensor offset, Tensor columns, Tensor key_padding_mask, - Tensor attn_mask) - output: Tensor (out), Tensor (sparse_dot_sdd), Tensor (softmax) - infer_meta: - func: SparseAttentionInferMeta - kernel: - func: sparse_attention - data_type: q - optional: key_padding_mask, attn_mask - intermediate: sparse_dot_sdd, softmax - backward: sparse_attention_grad - -- op : spectral_norm - args : (Tensor weight, Tensor u, Tensor v, int dim = 0, int power_iters = 1, float eps = 1e-12f) - output : Tensor - infer_meta : - func : SpectralNormInferMeta - kernel : - func : spectral_norm - data_type : weight - backward : spectral_norm_grad - -- op : split - args : (Tensor x, IntArray sections, Scalar(int) axis) - output : Tensor[]{sections.size()} - infer_meta : - func : SplitInferMeta - spmd_rule : SplitInferSpmdDynamic - kernel : - func : split - backward : split_grad - interfaces : paddle::dialect::InferSymbolicShapeInterface - -- op : split_with_num - args : (Tensor x, int num, Scalar(int) axis) - output : Tensor[]{num} - infer_meta : - func : SplitWithNumInferMeta - spmd_rule : SplitWithNumInferSpmdDynamic - kernel : - func : split_with_num - backward : split_with_num_grad - interfaces : paddle::dialect::InferSymbolicShapeInterface - -- op : sqrt - args : (Tensor x) - output : Tensor(out) - infer_meta : - func : UnchangedInferMeta - kernel : - func : sqrt {dense -> dense}, - sqrt_sr {selected_rows -> selected_rows} - inplace : (x -> out) - backward : sqrt_grad - -- op : square - args : (Tensor x) - output : Tensor - infer_meta : - func : UnchangedInferMeta - spmd_rule : ElementwiseUnaryInferSpmd - kernel : - func : square {dense -> dense}, - square_sr {selected_rows -> selected_rows} - backward : square_grad - interfaces : paddle::dialect::InferSymbolicShapeInterface - -- op : squared_l2_norm - args : (Tensor x) - output : Tensor(out) - infer_meta : - func : SquaredL2NormInferMeta - spmd_rule : SquaredL2NormInferSpmd - kernel : - func : squared_l2_norm - backward : squared_l2_norm_grad - -- op : squeeze - args : (Tensor x, IntArray axis={}) - output : Tensor(out), Tensor(xshape) - infer_meta : - func : SqueezeWithXShapeInferMeta - spmd_rule : SqueezeInferSpmd - kernel : - func : squeeze - data_type : x - inplace : (x -> out) - view: (x -> out) - intermediate : xshape - backward : squeeze_grad - interfaces : paddle::dialect::InferSymbolicShapeInterface, paddle::dialect::LayoutTransformationInterface - -- op : stack - args : (Tensor[] x, int axis = 0) - output : Tensor (out) - infer_meta : - func : StackInferMeta - spmd_rule : StackInferSpmd - kernel : - func : stack - backward : stack_grad - interfaces : paddle::dialect::InferSymbolicShapeInterface - -- op : standard_gamma - args : (Tensor x) - output : Tensor(out) - infer_meta : - func : UnchangedInferMeta - kernel : - func : standard_gamma - -- op : stanh - args : (Tensor x, float scale_a=0.67f, float scale_b=1.7159f) - output : Tensor(out) - infer_meta : - func : UnchangedInferMeta - param : [x] - kernel : - func : stanh - backward : stanh_grad - -- op : stft - args: (Tensor x, Tensor window, int n_fft, int hop_length, bool normalized, bool onesided) - output: Tensor (out) - infer_meta: - func: StftInferMeta - kernel: - func: stft - data_type: x - backward: stft_grad - -- op : strided_slice - args : (Tensor x, int[] axes, IntArray starts, IntArray ends, IntArray strides) - output : Tensor - infer_meta : - func : StridedSliceInferMeta - spmd_rule : StridedSliceInferSpmdDynamic - kernel : - func : strided_slice - backward : strided_slice_grad - -- op : sum - args : (Tensor x, IntArray axis={}, DataType dtype=DataType::UNDEFINED, bool keepdim=false) - output : Tensor(out) - infer_meta : - func : SumInferMeta - spmd_rule : ReductionSumInferSpmdDynamic - kernel : - func : sum - data_type : x - backward : sum_grad - interfaces : paddle::dialect::InferSymbolicShapeInterface - -- op : svd - args : (Tensor x, bool full_matrices = false) - output : Tensor(u), Tensor(s), Tensor(vh) - infer_meta : - func : SvdInferMeta - kernel : - func : svd - backward : svd_grad - -- op : swiglu - args : (Tensor x, Tensor y) - output : Tensor(out) - infer_meta : - func : SwiGLUInferMeta - spmd_rule : SwiGLUInferSpmd - kernel : - func : swiglu - optional : y - backward: swiglu_grad - -- op : swish - args : (Tensor x) - output : Tensor(out) - infer_meta : - func : UnchangedInferMeta - param : [x] - kernel : - func : swish - backward : swish_grad - interfaces : paddle::dialect::InferSymbolicShapeInterface, paddle::dialect::LayoutTransformationInterface - -- op : sync_batch_norm_ - args : (Tensor x, Tensor mean, Tensor variance, Tensor scale, Tensor bias, bool is_test, float momentum, float epsilon, str data_format, bool use_global_stats, bool trainable_statistics) - output : Tensor(out), Tensor(mean_out), Tensor(variance_out), Tensor(saved_mean), Tensor(saved_variance), Tensor(reserve_space) - infer_meta : - func : BatchNormInferMeta - kernel : - func : sync_batch_norm - data_type : x - backward : sync_batch_norm_grad - inplace : (mean -> mean_out), (variance -> variance_out) - optional : reserve_space - -- op : take_along_axis - args : (Tensor arr, Tensor indices, int axis) - output : Tensor - infer_meta : - func : TakeAlongAxisInferMeta - param : [arr, indices, axis] - kernel : - func : take_along_axis - data_type : arr - backward : take_along_axis_grad - interfaces : paddle::dialect::InferSymbolicShapeInterface - -- op : tan - args : (Tensor x) - output : Tensor(out) - infer_meta : - func : UnchangedInferMeta - kernel : - func : tan - inplace : (x -> out) - backward : tan_grad - interfaces : paddle::dialect::InferSymbolicShapeInterface - -- op : tanh - args : (Tensor x) - output : Tensor(out) - infer_meta : - func : UnchangedInferMeta - kernel : - func : tanh - inplace : (x -> out) - backward : tanh_grad - interfaces : paddle::dialect::InferSymbolicShapeInterface - -- op : tanh_shrink - args : (Tensor x) - output : Tensor - infer_meta : - func : UnchangedInferMeta - kernel : - func : tanh_shrink - backward : tanh_shrink_grad - -- op : tdm_child - args: (Tensor x, Tensor tree_info, int child_nums, DataType dtype = DataType::INT32) - output: Tensor (child), Tensor (leaf_mask) - infer_meta: - func: TdmChildInferMeta - kernel: - func: tdm_child - data_type: x - -- op : tdm_sampler - args: (Tensor x, Tensor travel, Tensor layer, bool output_positive=true, int[] neg_samples_num_list={}, int[] layer_offset_lod={}, int seed = 0, int dtype=2) - output: Tensor(out), Tensor(labels), Tensor(mask) - infer_meta: - func : TdmSamplerInferMeta - kernel: - func : tdm_sampler - data_type : x - optional : labels - -- op : temporal_shift - args : (Tensor x, int seg_num, float shift_ratio = 0.25f, str data_format = "NCHW") - output : Tensor(out) - infer_meta : - func : TemporalShiftInferMeta - kernel : - func : temporal_shift - data_type : x - backward : temporal_shift_grad - -- op : tensor_unfold - args : (Tensor input, int64_t axis, int64_t size, int64_t step) - output : Tensor - infer_meta : - func : StridedUnChangedInferMeta - param : [input] - kernel : - func : tensor_unfold - backward : tensor_unfold_grad - no_need_buffer : input - -- op : thresholded_relu - args : (Tensor x, float threshold = 1.0, float value = 0.0) - output : Tensor(out) - infer_meta : - func : UnchangedInferMeta - param : [x] - kernel : - func : thresholded_relu - inplace: (x -> out) - backward : thresholded_relu_grad - interfaces : paddle::dialect::InferSymbolicShapeInterface - -- op : top_p_sampling - args : (Tensor x, Tensor ps, Tensor threshold, Tensor topp_seed, int seed=-1, int k=0, str mode="truncate") - output : Tensor (out), Tensor(ids), Tensor(topk_scores), Tensor(topk_ids) - infer_meta : - func : TopPSamplingInferMeta - kernel : - func : top_p_sampling - data_type : x - optional : threshold, topp_seed, topk_scores, topk_ids - interfaces : paddle::dialect::InferSymbolicShapeInterface - -- op : topk - args : (Tensor x, Scalar(int) k = 1, int axis = -1, bool largest = true, bool sorted = true) - output : Tensor(out), Tensor(indices) - infer_meta : - func : TopKInferMeta - kernel : - func : topk - data_type : x - backward : topk_grad - interfaces : paddle::dialect::InferSymbolicShapeInterface - -- op : trace - args : (Tensor x, int offset = 0, int axis1 = 0, int axis2 = 1) - output : Tensor - infer_meta : - func : TraceInferMeta - kernel : - func : trace - backward : trace_grad - -- op : trans_layout - args : (Tensor x, int[] perm) - output : Tensor - infer_meta : - func : TransposeInferMeta - kernel : - func : transpose - backward : trans_layout_grad - -- op : transpose - args : (Tensor x, int[] perm) - output : Tensor(out) - infer_meta : - func : TransposeInferMeta - spmd_rule: TransposeInferSpmd - kernel : - func : transpose - inplace : (x -> out) - backward : transpose_grad - interfaces : paddle::dialect::InferSymbolicShapeInterface - -- op : triangular_solve - args : (Tensor x, Tensor y, bool upper=true, bool transpose=false, bool unitriangular=false) - output : Tensor - infer_meta : - func : TriangularSolveInferMeta - kernel : - func : triangular_solve - data_type : x - backward : triangular_solve_grad - -- op : tril - args : (Tensor x, int diagonal) - output : Tensor(out) - infer_meta : - func : TrilInferMeta - kernel : - func : tril - inplace: (x -> out) - backward : tril_grad - interfaces : paddle::dialect::InferSymbolicShapeInterface - -- op : tril_indices - args : (int rows, int cols, int offset, DataType dtype, Place place={}) - output : Tensor(out) - infer_meta : - func : TrilIndicesInferMeta - param : [rows, cols, offset, dtype] - kernel : - func : tril_indices - param : [rows, cols, offset, dtype] - data_type : dtype - backend : place - interfaces : paddle::dialect::InferSymbolicShapeInterface - -- op : trilinear_interp - args : (Tensor x, Tensor out_size, Tensor[] size_tensor, Tensor scale_tensor, str data_format="NCHW", int out_d=0, int out_h=0, int out_w=0, float[] scale={}, str interp_method="bilinear", bool align_corners=true, int align_mode=1) - output : Tensor(output) - infer_meta : - func : InterpolateInferMeta - optional: out_size, size_tensor, scale_tensor - kernel : - func : trilinear_interp - data_type : x - backward : trilinear_interp_grad - data_transform : - skip_transform : out_size, size_tensor, scale_tensor - interfaces : paddle::dialect::InferSymbolicShapeInterface - -- op : triu - args : (Tensor x, int diagonal) - output : Tensor(out) - infer_meta : - func : TriuInferMeta - spmd_rule : TriuInferSpmd - kernel : - func : triu - inplace: (x -> out) - backward : triu_grad - interfaces : paddle::dialect::InferSymbolicShapeInterface - -- op : triu_indices - args : (int row, int col, int offset, DataType dtype, Place place={}) - output : Tensor(out) - infer_meta : - func : TriuIndicesInferMeta - param : [row, col, offset, dtype] - kernel : - func : triu_indices - param : [row, col, offset, dtype] - data_type : dtype - backend : place - interfaces : paddle::dialect::InferSymbolicShapeInterface - -- op : trunc - args : (Tensor input) - output : Tensor(out) - infer_meta : - func : UnchangedInferMeta - kernel : - func : trunc - inplace: (input -> out) - backward : trunc_grad - interfaces : paddle::dialect::InferSymbolicShapeInterface - -# python API: paddle.nn.initializer.TruncatedNormal -- op : truncated_gaussian_random - args : (int[] shape, float mean, float std, int seed, float a, float b, DataType dtype=DataType::FLOAT32, Place place={}) - output : Tensor(out) - infer_meta : - func : TruncatedGaussianRandomInferMeta - param : [shape, mean, std, seed, a, b, dtype] - kernel : - func : truncated_gaussian_random - param : [shape, mean, std, seed, a, b, dtype] - backend : place - data_type : dtype - traits : pir::SideEffectTrait - -- op : unbind - args : (Tensor input, int axis = 0) - output : Tensor[] {axis<0 ? input.dims()[input.dims().size()+axis]:input.dims()[axis]} - infer_meta : - func : UnbindInferMeta - spmd_rule : UnbindInferSpmdDynamic - kernel : - func : unbind - backward : unbind_grad - interfaces : paddle::dialect::InferSymbolicShapeInterface - -- op : unfold - args : (Tensor x, int[] kernel_sizes, int[] strides, int[] paddings, int[] dilations) - output : Tensor(out) - infer_meta : - func : UnfoldInferMeta - kernel : - func : unfold - backward : unfold_grad - -- op : uniform - args : (IntArray shape, DataType dtype, Scalar min, Scalar max, int seed, Place place={}) - output : Tensor(out) - infer_meta : - func : UniformRandomInferMeta - param: [shape, dtype] - kernel : - func : uniform - param: [shape, dtype, min, max, seed] - data_type : dtype - backend : place - interfaces : paddle::dialect::InferSymbolicShapeInterface - traits : pir::SideEffectTrait, paddle::dialect::ForwardOnlyTrait - -- op : uniform_inplace - args: (Tensor x, float min = -1.0, float max = 1.0, int seed = 0, int diag_num = 0, int diag_step = 0, float diag_val = 1.0) - output: Tensor(out) - infer_meta: - func: UniformRandomInplaceInferMeta - kernel: - func: uniform_inplace - data_type: x - inplace: (x -> out) - backward: uniform_inplace_grad - traits : pir::SideEffectTrait - -- op : uniform_random_batch_size_like - args: (Tensor input, int[] shape, int input_dim_idx = 0, int output_dim_idx = 0, - float min=-1.0f, float max=1.0f, int seed=0, int diag_num=0, int diag_step=0, float diag_val=1.0f, DataType dtype=DataType::FLOAT32) - output: Tensor (out) - infer_meta: - func: UniformRandomBatchSizeLikeInferMeta - kernel: - func : uniform_random_batch_size_like {dense -> dense}, - uniform_random_batch_size_like_sr {selected_rows -> selected_rows} - data_type: dtype - no_need_buffer: input - traits : pir::SideEffectTrait - -- op : unique_consecutive - args : (Tensor x, bool return_inverse = false, bool return_counts = false, int[] axis = {}, DataType dtype = DataType::FLOAT32) - output : Tensor(out), Tensor(index), Tensor(counts) - infer_meta : - func : UniqueConsecutiveInferMeta - kernel : - func : unique_consecutive - data_type : x - optional : index, counts - interfaces : paddle::dialect::InferSymbolicShapeInterface - -- op : unpool - args: (Tensor x, Tensor indices, int[] ksize, int[] strides, int[] padding, IntArray output_size, str data_format) - output: Tensor(out) - infer_meta: - func: UnpoolInferMeta - kernel: - func: unpool - data_type: x - backward: unpool_grad - -- op : unpool3d - args: (Tensor x, Tensor indices, int[] ksize, int[] strides={1,1,1}, int[] paddings={0,0,0}, int[] output_size={0,0,0}, str data_format="NCDHW") - output: Tensor(out) - infer_meta: - func: Unpool3dInferMeta - kernel: - func: unpool3d - data_type: x - backward: unpool3d_grad - -- op : unsqueeze - args : (Tensor x, IntArray axis = {}) - output : Tensor(out), Tensor(xshape) - infer_meta : - func : UnsqueezeWithXShapeInferMeta - spmd_rule : UnsqueezeInferSpmd - kernel : - func : unsqueeze - data_type : x - inplace : (x -> out) - view: (x -> out) - intermediate : xshape - backward : unsqueeze_grad - interfaces : paddle::dialect::InferSymbolicShapeInterface - -- op : unstack - args : (Tensor x, int axis=0, int num=0) - output : Tensor[](out){num} - infer_meta : - func : UnStackInferMeta - kernel : - func : unstack - backward : unstack_grad - -- op : update_loss_scaling_ - args : (Tensor[] x, Tensor found_infinite, Tensor prev_loss_scaling, Tensor in_good_steps, Tensor in_bad_steps, int incr_every_n_steps, int decr_every_n_nan_or_inf, float incr_ratio, float decr_ratio, Scalar stop_update=false) - output : Tensor[](out){x.size()}, Tensor(loss_scaling), Tensor(out_good_steps), Tensor(out_bad_steps) - infer_meta : - func : UpdateLossScalingInferMeta - param : [x, found_infinite, prev_loss_scaling, in_good_steps, in_bad_steps] - spmd_rule : UpdateLossScalingSpmd - kernel : - func : update_loss_scaling - data_type : x - data_transform : - skip_transform : found_infinite - inplace : (x -> out), (prev_loss_scaling -> loss_scaling), (in_good_steps -> out_good_steps), (in_bad_steps -> out_bad_steps) - -- op : view_dtype - args : (Tensor input, DataType dtype) - output : Tensor(out) - infer_meta : - func : StridedUnChangedInferMeta - param : [input] - kernel : - func : view_dtype - data_type : input - backward : view_dtype_grad - no_need_buffer : input - -- op : view_shape - args : (Tensor input, int64_t[] dims = {}) - output : Tensor(out) - infer_meta : - func : StridedUnChangedInferMeta - param : [input] - kernel : - func : view_shape - backward : view_shape_grad - no_need_buffer : input - interfaces : paddle::dialect::InferSymbolicShapeInterface - -- op : viterbi_decode - args : (Tensor potentials, Tensor transition_params, Tensor lengths, bool include_bos_eos_tag = true) - output : Tensor(scores), Tensor(path) - infer_meta : - func : ViterbiDecodeInferMeta - kernel : - func : viterbi_decode - data_type : potentials - -- op : warpctc - args : (Tensor logits, Tensor label, Tensor logits_length, Tensor labels_length, int blank = 0, bool norm_by_times = false) - output : Tensor(loss), Tensor(warpctcgrad) - infer_meta : - func : WarpctcInferMeta - kernel : - func : warpctc - data_type : logits - optional : logits_length, labels_length - intermediate : warpctcgrad - backward : warpctc_grad - -- op : warprnnt - args : (Tensor input, Tensor label, Tensor input_lengths, Tensor label_lengths, int blank = 0, float fastemit_lambda = 0.0) - output : Tensor(loss), Tensor(warprnntgrad) - infer_meta : - func : WarprnntInferMeta - kernel : - func : warprnnt - data_type : input - intermediate : warprnntgrad - backward : warprnnt_grad - -- op : weight_dequantize - args : (Tensor x, Tensor scale, str algo = "weight_only_int8", DataType out_dtype = DataType::FLOAT16, int group_size = -1) - output : Tensor(out) - infer_meta : - func : WeightDequantizeInferMeta - kernel : - func : weight_dequantize - data_type : out_dtype - -- op : weight_only_linear - args : (Tensor x, Tensor weight, Tensor bias, Tensor weight_scale, str weight_dtype, int arch = 80, int group_size = -1) - output : Tensor(out) - infer_meta : - func : WeightOnlyLinearInferMeta - kernel : - func : weight_only_linear - data_type : x - optional : bias - backward : weight_only_linear_grad - -- op : weight_quantize - args : (Tensor x, str algo = "weight_only_int8", int arch = 80, int group_size = -1) - output : Tensor(out), Tensor(scale) - infer_meta : - func : WeightQuantizeInferMeta - kernel : - func : weight_quantize - data_type : x - backend : x - -- op : weighted_sample_neighbors - args : (Tensor row, Tensor colptr, Tensor edge_weight, Tensor input_nodes, Tensor eids, int sample_size, bool return_eids) - output : Tensor(out_neighbors), Tensor(out_count), Tensor(out_eids) - infer_meta : - func : WeightedSampleNeighborsInferMeta - kernel : - func : weighted_sample_neighbors - optional : eids - -- op : where - args : (Tensor condition, Tensor x, Tensor y) - output : Tensor(out) - infer_meta : - func : WhereInferMeta - spmd_rule: WhereInferSpmd - kernel : - func : where - inplace : (x -> out) - backward : where_grad - interfaces : paddle::dialect::InferSymbolicShapeInterface - -- op : yolo_box - args : (Tensor x, Tensor img_size, int[] anchors={}, int class_num = 1, float conf_thresh = 0.01, int downsample_ratio = 32, bool clip_bbox = true, float scale_x_y=1.0, bool iou_aware=false, float iou_aware_factor=0.5) - output : Tensor(boxes), Tensor(scores) - infer_meta : - func : YoloBoxInferMeta - kernel : - func : yolo_box - data_type : x - -- op : yolo_box_head - args : (Tensor x, int[] anchors, int class_num) - output : Tensor(out) - infer_meta : - func : YoloBoxHeadInferMeta - kernel : - func : yolo_box_head - data_type : x - -- op : yolo_box_post - args : (Tensor boxes0, Tensor boxes1, Tensor boxes2, Tensor image_shape, Tensor image_scale, int[] anchors0, int[] anchors1, int[] anchors2, int class_num, float conf_thresh, int downsample_ratio0, int downsample_ratio1, int downsample_ratio2, bool clip_bbox, float scale_x_y, float nms_threshold) - output : Tensor(out), Tensor(nms_rois_num) - infer_meta : - func : YoloBoxPostInferMeta - kernel : - func : yolo_box_post - data_type : boxes0 - -- op : yolo_loss - args : (Tensor x, Tensor gt_box, Tensor gt_label, Tensor gt_score, int[] anchors={}, int[] anchor_mask={}, int class_num =1 , float ignore_thresh=0.7, int downsample_ratio=32, bool use_label_smooth=true, float scale_x_y=1.0) - output : Tensor(loss), Tensor(objectness_mask), Tensor(gt_match_mask) - infer_meta : - func : YoloLossInferMeta - kernel : - func : yolo_loss - data_type : x - optional : gt_score - intermediate : objectness_mask, gt_match_mask - backward : yolo_loss_grad - interfaces : paddle::dialect::InferSymbolicShapeInterface - -- op : zeros - args : (IntArray shape, DataType dtype=DataType::FLOAT32, Place place=CPUPlace()) - output : Tensor(out) - invoke : full(shape, 0, dtype, place) - -- op : zeros_like - args : (Tensor x, DataType dtype=DataType::UNDEFINED, Place place = {}) - output : Tensor(out) - invoke : full_like(x, 0, dtype, place) - -- op: chunk_eval - args: (Tensor inference, Tensor label, Tensor seq_length, int num_chunk_types, str - chunk_scheme = "IOB", int[] excluded_chunk_types = {}) - output: Tensor (precision), Tensor (recall), Tensor (f1_score), Tensor (num_infer_chunks), - Tensor (num_label_chunks), Tensor (num_correct_chunks) - infer_meta: - func: ChunkEvalInferMeta - kernel: - func: chunk_eval - data_type: DataType::FLOAT32 - optional: seq_length - -- op: moe - args: (Tensor x, Tensor gate, Tensor bmm0, Tensor bias0, Tensor bmm1, Tensor bias1, - str act_type = "gelu") - output: Tensor (out) - infer_meta: - func: MoeInferMeta - kernel: - func: moe - -- op: number_count - args: (Tensor numbers, int upper_range) - output: Tensor(out) - infer_meta: - func: NumberCountInferMeta - kernel: - func: number_count - data_type: numbers From 31c0cd36f9e942840f6f18b41c6d126e80c56fb1 Mon Sep 17 00:00:00 2001 From: Fripping <124574028+Fripping@users.noreply.github.com> Date: Mon, 5 Aug 2024 15:49:30 +0800 Subject: [PATCH 4/8] Delete paddle/fluid/pir/dialect/operator/interface/infer_symbolic_shape/.ipynb_checkpoints directory --- .../multiary_infer_sym-checkpoint.cc | 1308 ---------- .../multiary_infer_sym-checkpoint.h | 54 - .../same_operands_result-checkpoint.cc | 215 -- .../same_operands_result-checkpoint.h | 165 -- .../unary_infer_sym-checkpoint.cc | 2138 ----------------- .../unary_infer_sym-checkpoint.h | 95 - 6 files changed, 3975 deletions(-) delete mode 100644 paddle/fluid/pir/dialect/operator/interface/infer_symbolic_shape/.ipynb_checkpoints/multiary_infer_sym-checkpoint.cc delete mode 100644 paddle/fluid/pir/dialect/operator/interface/infer_symbolic_shape/.ipynb_checkpoints/multiary_infer_sym-checkpoint.h delete mode 100644 paddle/fluid/pir/dialect/operator/interface/infer_symbolic_shape/.ipynb_checkpoints/same_operands_result-checkpoint.cc delete mode 100644 paddle/fluid/pir/dialect/operator/interface/infer_symbolic_shape/.ipynb_checkpoints/same_operands_result-checkpoint.h delete mode 100644 paddle/fluid/pir/dialect/operator/interface/infer_symbolic_shape/.ipynb_checkpoints/unary_infer_sym-checkpoint.cc delete mode 100644 paddle/fluid/pir/dialect/operator/interface/infer_symbolic_shape/.ipynb_checkpoints/unary_infer_sym-checkpoint.h diff --git a/paddle/fluid/pir/dialect/operator/interface/infer_symbolic_shape/.ipynb_checkpoints/multiary_infer_sym-checkpoint.cc b/paddle/fluid/pir/dialect/operator/interface/infer_symbolic_shape/.ipynb_checkpoints/multiary_infer_sym-checkpoint.cc deleted file mode 100644 index a84326612f4b8..0000000000000 --- a/paddle/fluid/pir/dialect/operator/interface/infer_symbolic_shape/.ipynb_checkpoints/multiary_infer_sym-checkpoint.cc +++ /dev/null @@ -1,1308 +0,0 @@ -// Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#include "paddle/common/ddim.h" -#include "paddle/common/layout.h" -#include "paddle/fluid/pir/dialect/operator/interface/infer_symbolic_shape/infer_sym_slice_utils.h" -#include "paddle/fluid/pir/dialect/operator/interface/infer_symbolic_shape/infer_sym_utils.h" -#include "paddle/fluid/pir/dialect/operator/interface/infer_symbolic_shape/multiary_infer_sym.h" -#include "paddle/fluid/pir/dialect/operator/ir/op_attribute.h" - -namespace paddle::dialect { - -bool AccuracyOpInferSymbolicShape( - pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) { - const symbol::ShapeOrDataDimExprs &out_shape = - infer_context->GetShapeOrDataForValue(op->operand_source(0)); - const symbol::ShapeOrDataDimExprs &label_shape = - infer_context->GetShapeOrDataForValue(op->operand_source(2)); - - // Assume indices has same shape as inference, because - // it's the output of topk. - PADDLE_ENFORCE_EQ( - label_shape.shape().size(), - 2UL, - common::errors::InvalidArgument( - "ShapeError: label's dimensions of AccuracyOp must be 2. " - "But received label's dimensions = %d", - label_shape.shape().size())); - - infer_context->AddEqualCstr(label_shape.shape()[1], symbol::DimExpr{1}); - infer_context->AddEqualCstr(out_shape.shape()[0], label_shape.shape()[0]); - - std::vector accuracy_shape = {}; - infer_context->SetShapeOrDataForValue( - op->result(0), - symbol::ShapeOrDataDimExprs{ - symbol::TensorShapeOrDataDimExprs(accuracy_shape)}); - - std::vector correct_shape = {}; - infer_context->SetShapeOrDataForValue( - op->result(1), - symbol::ShapeOrDataDimExprs{ - symbol::TensorShapeOrDataDimExprs(correct_shape)}); - - std::vector total_shape = {}; - infer_context->SetShapeOrDataForValue( - op->result(2), - symbol::ShapeOrDataDimExprs{ - symbol::TensorShapeOrDataDimExprs(total_shape)}); - - return true; -} - -bool AddNOpInferSymbolicShape(pir::Operation *op, - pir::InferSymbolicShapeContext *infer_context) { - const auto &input_list_shape = - infer_context->GetShapeOrDataForValue(op->operand_source(0)); - PADDLE_ENFORCE_EQ( - input_list_shape.isa(), - true, - common::errors::InvalidArgument( - "The type of inputs shape should be TensorListShapeOrDataDimExprs")); - const auto &inputs_shape = - input_list_shape.dyn_cast(); - PADDLE_ENFORCE_GT( - inputs_shape.size(), - 0, - common::errors::InvalidArgument( - "The input tensor X's dimensions of AddNOp " - "should be larger than 0. But received X's dimensions %d.", - inputs_shape.size())); - symbol::TensorShapeOrDataDimExprs candidate_shape = inputs_shape.front(); - for (size_t i = 1; i < inputs_shape.size(); ++i) { - // 0D tensor - if (inputs_shape[i].shape().size() == 0) { - continue; - } - if (candidate_shape.shape().size() == 0) { - candidate_shape = inputs_shape[i]; - continue; - } - for (size_t j = 0; j < candidate_shape.shape().size(); ++j) { - infer_context->AddEqualCstr(candidate_shape.shape()[j], - inputs_shape[i].shape()[j]); - } - } - infer_context->SetShapeOrDataForValue( - op->result(0), symbol::ShapeOrDataDimExprs{candidate_shape}); - - return true; -} - -bool AddmmOpInferSymbolicShape(pir::Operation *op, - pir::InferSymbolicShapeContext *infer_context) { - const auto &input_shape = - infer_context->GetShapeOrDataForValue(op->operand_source(0)); - const auto &x_shape = - infer_context->GetShapeOrDataForValue(op->operand_source(1)); - const auto &y_shape = - infer_context->GetShapeOrDataForValue(op->operand_source(2)); - - auto ndim_input = input_shape.shape().size(); - auto ndim_x = x_shape.shape().size(); - auto ndim_y = y_shape.shape().size(); - - PADDLE_ENFORCE_EQ(ndim_input == 2 || ndim_input == 1, - true, - common::errors::InvalidArgument( - "The input tensor input's dimension must be 2 or 1. " - "But received input's dimension = [%d].", - ndim_input)); - PADDLE_ENFORCE_EQ(ndim_x, - 2, - common::errors::InvalidArgument( - "The input tensor x's dimension must be 2. " - "But received x's dimension = [%d].", - ndim_x)); - PADDLE_ENFORCE_EQ(ndim_y, - 2, - common::errors::InvalidArgument( - "The input tensor y's dimension must be 2. " - "But received y's dimension = [%d].", - ndim_y)); - - std::vector output_shape; - output_shape.push_back(x_shape.shape()[0]); - output_shape.push_back(y_shape.shape()[1]); - - infer_context->SetShapeOrDataForValue( - op->result(0), - symbol::ShapeOrDataDimExprs{ - symbol::TensorShapeOrDataDimExprs(output_shape)}); - - infer_context->AddEqualCstr(x_shape.shape()[1], y_shape.shape()[0]); - - if (ndim_input == 2) { - infer_context->AddBroadcastableCstr(input_shape.shape()[0], - x_shape.shape()[0]); - infer_context->AddBroadcastableCstr(input_shape.shape()[1], - y_shape.shape()[1]); - } else if (ndim_input == 1) { - infer_context->AddBroadcastableCstr(input_shape.shape()[0], - y_shape.shape()[1]); - } - - return true; -} - -bool Addmm_OpInferSymbolicShape(pir::Operation *op, - pir::InferSymbolicShapeContext *infer_context) { - return AddmmOpInferSymbolicShape(op, infer_context); -} - -bool AucOpInferSymbolicShape(pir::Operation *op, - pir::InferSymbolicShapeContext *infer_context) { - const auto &predict_shape = - infer_context->GetShapeOrDataForValue(op->operand_source(0)); - const auto &label_shape = - infer_context->GetShapeOrDataForValue(op->operand_source(1)); - - PADDLE_ENFORCE_GE( - predict_shape.shape().size(), - 2, - common::errors::InvalidArgument( - "The Input(Predict) has not been initialized properly. The " - "shape of Input(Predict) = [%s], the shape size must be " - "greater_equal 2.", - predict_shape.shape())); - - const auto &predict_height = predict_shape.shape()[0]; - const auto &label_height = label_shape.shape()[0]; - - infer_context->AddEqualCstr(predict_height, label_height); - - int num_thresholds = - op->attribute("num_thresholds").data(); - int slide_steps = op->attribute("slide_steps").data(); - - int num_pred_buckets = num_thresholds + 1; - - PADDLE_ENFORCE_GE( - num_pred_buckets, - 1, - common::errors::InvalidArgument("num_thresholds must larger than 1")); - PADDLE_ENFORCE_GE( - slide_steps, - 0, - common::errors::InvalidArgument("slide_steps must be natural number")); - - infer_context->SetShapeOrDataForValue( - op->result(0), - symbol::ShapeOrDataDimExprs{ - symbol::TensorShapeOrDataDimExprs(std::vector{})}); - - if (slide_steps) { - infer_context->SetShapeOrDataForValue( - op->result(1), - symbol::ShapeOrDataDimExprs{ - symbol::TensorShapeOrDataDimExprs(std::vector{ - (1 + slide_steps) * num_pred_buckets + 1})}); - infer_context->SetShapeOrDataForValue( - op->result(2), - symbol::ShapeOrDataDimExprs{ - symbol::TensorShapeOrDataDimExprs(std::vector{ - (1 + slide_steps) * num_pred_buckets + 1})}); - } else { - infer_context->SetShapeOrDataForValue( - op->result(1), - symbol::ShapeOrDataDimExprs{symbol::TensorShapeOrDataDimExprs( - std::vector{1, num_pred_buckets})}); - infer_context->SetShapeOrDataForValue( - op->result(2), - symbol::ShapeOrDataDimExprs{symbol::TensorShapeOrDataDimExprs( - std::vector{1, num_pred_buckets})}); - } - - return true; -} - -bool BatchNormOpInferSymbolicShape( - pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) { - const auto &x_shape_or_data = - infer_context->GetShapeOrDataForValue(op->operand_source(0)); - const auto &scale_shape_or_data = - infer_context->GetShapeOrDataForValue(op->operand_source(3)); - const auto &bias_shape_or_data = - infer_context->GetShapeOrDataForValue(op->operand_source(4)); - - std::vector x_dims = x_shape_or_data.shape(); - - std::string data_layout_str = - op->attribute("data_format").AsString(); - const DataLayout data_layout = common::StringToDataLayout(data_layout_str); - - PADDLE_ENFORCE_GE( - x_dims.size(), - 2, - phi::errors::InvalidArgument( - "ShapeError: the dimension of input " - "X must greater than or equal to 2. But received: the shape of input " - "X = [%s], the dimension of input X =[%d]", - x_dims, - x_dims.size())); - PADDLE_ENFORCE_LE( - x_dims.size(), - 5, - phi::errors::InvalidArgument( - "ShapeError: the dimension of input X " - "must smaller than or equal to 5. But received: the shape of input X " - "= [%s], the dimension of input X = [%d]", - x_dims, - x_dims.size())); - - symbol::DimExpr C = (data_layout == DataLayout::kNCHW) - ? x_dims[1] - : x_dims[x_dims.size() - 1]; - - if (!scale_shape_or_data.isa()) { - std::vector scale_dims = scale_shape_or_data.shape(); - PADDLE_ENFORCE_EQ(scale_dims.size(), - 1UL, - phi::errors::InvalidArgument( - "ShapeError: the dimension of scale must equal to 1." - "But received: the dimension of scale is [%d]", - scale_dims.size())); - infer_context->AddEqualCstr(scale_dims[0], C); - } - - if (!bias_shape_or_data.isa()) { - std::vector bias_dims = bias_shape_or_data.shape(); - PADDLE_ENFORCE_EQ(bias_dims.size(), - 1UL, - phi::errors::InvalidArgument( - "ShapeError: the dimension of bias must equal to 1." - "But received: the dimension of bias is [%d]", - bias_dims.size())); - infer_context->AddEqualCstr(bias_dims[0], C); - } - - // Set output shapes - infer_context->SetShapeOrDataForValue( - op->result(0), - symbol::ShapeOrDataDimExprs{symbol::TensorShapeOrDataDimExprs(x_dims)}); - - std::vector param_dims = {C}; - infer_context->SetShapeOrDataForValue( - op->result(1), - symbol::ShapeOrDataDimExprs{ - symbol::TensorShapeOrDataDimExprs(param_dims)}); - infer_context->SetShapeOrDataForValue( - op->result(2), - symbol::ShapeOrDataDimExprs{ - symbol::TensorShapeOrDataDimExprs(param_dims)}); - - if (op->result(3) && op->result(3).type()) { - infer_context->SetShapeOrDataForValue( - op->result(3), - symbol::ShapeOrDataDimExprs{ - symbol::TensorShapeOrDataDimExprs(param_dims)}); - } - if (op->result(4) && op->result(4).type()) { - infer_context->SetShapeOrDataForValue( - op->result(4), - symbol::ShapeOrDataDimExprs{ - symbol::TensorShapeOrDataDimExprs(param_dims)}); - } - if (op->result(5) && op->result(5).type()) { - std::vector reserve_space_dims{ - symbol::DimExpr{infer_context->GetNextSymName()}}; - infer_context->SetShapeOrDataForValue( - op->result(5), - symbol::ShapeOrDataDimExprs{ - symbol::TensorShapeOrDataDimExprs(reserve_space_dims)}); - } - - return true; -} - -bool BatchNorm_OpInferSymbolicShape( - pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) { - return BatchNormOpInferSymbolicShape(op, infer_context); -} - -bool BicubicInterpOpInferSymbolicShape( - pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) { - const symbol::ShapeOrDataDimExprs &x = - infer_context->GetShapeOrDataForValue(op->operand_source(0)); - - const auto &attributes = op->attributes(); - - const std::string data_format = - attributes.at("data_format").dyn_cast().AsString(); - int out_d = attributes.at("out_d").dyn_cast().data(); - int out_h = attributes.at("out_h").dyn_cast().data(); - int out_w = attributes.at("out_w").dyn_cast().data(); - const std::vector &scale = details::GetVectorAttr(op, "scale"); - - const bool has_size_tensor = [&] { - pir::Value size_tensor = op->operand_source(2); - if (!size_tensor || !size_tensor.type()) { - return false; - } - const auto &list_size_tensor = - size_tensor.type().dyn_cast(); - return list_size_tensor && !list_size_tensor.empty(); - }(); - auto GetSizeTensorDataExpr = - [&](pir::Value value) -> std::vector { - const symbol::ShapeOrDataDimExprs &size_tensor_shape = - infer_context->GetShapeOrDataForValue(value); - PADDLE_ENFORCE_EQ( - size_tensor_shape.isa(), - true, - common::errors::InvalidArgument( - "The size_tensor of Interpolation should be type of " - "TensorListShapeOrDataDimExprs")); - return details::GetOrCreateExprVecFromData(size_tensor_shape, - infer_context); - }; - auto GetOutSizeDataExpr = - [&](pir::Value value) -> std::vector { - const symbol::ShapeOrDataDimExprs &out_size_tensor_shape = - infer_context->GetShapeOrDataForValue(value); - return details::GetOrCreateExprVecFromData(out_size_tensor_shape, - infer_context); - }; - auto GetOutDimByScale = [&](const symbol::DimExpr &in_dim, - float scale) -> symbol::DimExpr { - PADDLE_ENFORCE_GT(scale, - 0, - common::errors::InvalidArgument( - "The scale in Attr(scale) of Operator(interpolate) " - "should be greater than 0, but received value is %d.", - scale)); - if (in_dim.isa()) { - return symbol::DimExpr{ - static_cast(in_dim.dyn_cast() * scale)}; - } - return symbol::DimExpr{infer_context->GetNextSymName()}; - }; - - std::vector size_tensor; - if (out_d != -1) size_tensor.push_back(out_d); - if (out_h != -1) size_tensor.push_back(out_h); - if (out_w != -1) size_tensor.push_back(out_w); - - const DataLayout data_layout = common::StringToDataLayout(data_format); - - if (x.shape().size() == 3) { - // shape check for 1D interpolate for input tensor shape NCHW - if (!size_tensor.empty()) { - // top priority size - std::vector dim_out; - if (data_layout == DataLayout::kNCHW) { - dim_out = {x.shape()[0], x.shape()[1], symbol::DimExpr{out_w}}; - } else { - dim_out = {x.shape()[0], symbol::DimExpr{out_w}, x.shape()[2]}; - } - - symbol::ShapeOrDataDimExprs shape_data{ - symbol::TensorShapeOrDataDimExprs(dim_out)}; - - pir::Value res = op->result(0); - infer_context->SetShapeOrDataForValue(res, shape_data); - return true; - } - - symbol::DimExpr out_w_tmp{0}; - const auto &next_sym = infer_context->GetNextSymName(); - out_w_tmp = symbol::DimExpr(next_sym); - - std::vector dim_out; - if (data_layout == DataLayout::kNCHW) { - dim_out = {x.shape()[0], x.shape()[1], out_w_tmp}; - } else { - dim_out = {x.shape()[0], out_w_tmp, x.shape()[2]}; - } - - symbol::ShapeOrDataDimExprs shape_data{ - symbol::TensorShapeOrDataDimExprs(dim_out)}; - - pir::Value res = op->result(0); - infer_context->SetShapeOrDataForValue(res, shape_data); - return true; - } else if (x.shape().size() == 4) { - // shape check for 2D interpolate for input tensor shape NCHW - auto GetOutHW = [&]() -> std::tuple { - // top priority size - if (has_size_tensor) { - const auto &size_tensor_list_shape = - GetSizeTensorDataExpr(op->operand_source(2)); - PADDLE_ENFORCE_EQ(size_tensor_list_shape.size(), - 2, - common::errors::InvalidArgument( - "The size of size_tensor list should be 2.")); - return std::make_tuple(size_tensor_list_shape.at(0), - size_tensor_list_shape.at(1)); - } - // has out_size tensor - if (op->operand_source(1)) { - const auto &out_size_shape_or_data = - infer_context->GetShapeOrDataForValue(op->operand_source(1)); - PADDLE_ENFORCE_EQ( - out_size_shape_or_data.shape().size(), - 1, - common::errors::InvalidArgument( - "The rank of input out_size tensor should be 1.")); - infer_context->AddEqualCstr(out_size_shape_or_data.shape()[0], - symbol::DimExpr{2}); - const auto &out_size_data = GetOutSizeDataExpr(op->operand_source(1)); - return std::make_tuple(symbol::DimExpr{out_size_data[0]}, - symbol::DimExpr{out_size_data[1]}); - } - // has scale - if (scale.size() == 2) { - float scale_h = scale[0]; - float scale_w = scale[1]; - const auto &in_h = - data_layout == DataLayout::kNCHW ? x.shape()[2] : x.shape()[1]; - const auto &in_w = - data_layout == DataLayout::kNCHW ? x.shape()[3] : x.shape()[2]; - return std::make_tuple(GetOutDimByScale(in_h, scale_h), - GetOutDimByScale(in_w, scale_w)); - } - - return std::make_tuple(symbol::DimExpr{out_h}, symbol::DimExpr{out_w}); - }; - - const std::vector dim_out = [&] { - const auto &[out_h_sym, out_w_sym] = GetOutHW(); - if (data_layout == DataLayout::kNCHW) { - return std::vector{ - x.shape()[0], x.shape()[1], out_h_sym, out_w_sym}; - } else { - return std::vector{ - x.shape()[0], out_h_sym, out_w_sym, x.shape()[3]}; - } - }(); - - symbol::ShapeOrDataDimExprs shape_data{ - symbol::TensorShapeOrDataDimExprs(dim_out)}; - infer_context->SetShapeOrDataForValue(op->result(0), shape_data); - - return true; - } else if (x.shape().size() == 5) { - auto GetOutDHW = - [&]() -> std::tuple { - // top priority size - if (has_size_tensor) { - const auto &size_tensor_list_shape = - GetSizeTensorDataExpr(op->operand_source(2)); - PADDLE_ENFORCE_EQ(size_tensor_list_shape.size(), - 3, - common::errors::InvalidArgument( - "The size of size_tensor list should be 3.")); - return std::make_tuple(size_tensor_list_shape.at(0), - size_tensor_list_shape.at(1), - size_tensor_list_shape.at(2)); - } - // has out_size tensor - if (op->operand_source(1)) { - const auto &out_size_data = GetOutSizeDataExpr(op->operand_source(1)); - return std::make_tuple(symbol::DimExpr{out_size_data[0]}, - symbol::DimExpr{out_size_data[1]}, - symbol::DimExpr{out_size_data[2]}); - } - // has scale - if (scale.size() == 3) { - float scale_d = scale[0]; - float scale_h = scale[1]; - float scale_w = scale[2]; - const auto &in_d = - data_layout == DataLayout::kNCHW ? x.shape()[2] : x.shape()[1]; - const auto &in_h = - data_layout == DataLayout::kNCHW ? x.shape()[3] : x.shape()[2]; - const auto &in_w = - data_layout == DataLayout::kNCHW ? x.shape()[4] : x.shape()[3]; - return std::make_tuple(GetOutDimByScale(in_d, scale_d), - GetOutDimByScale(in_h, scale_h), - GetOutDimByScale(in_w, scale_w)); - } - - return std::make_tuple(symbol::DimExpr{out_d}, - symbol::DimExpr{out_h}, - symbol::DimExpr{out_w}); - }; - - const std::vector dim_out = [&] { - const auto &[out_d_sym, out_h_sym, out_w_sym] = GetOutDHW(); - if (data_layout == DataLayout::kNCHW) { - return std::vector{ - x.shape()[0], x.shape()[1], out_d_sym, out_h_sym, out_w_sym}; - } else { - return std::vector{ - x.shape()[0], out_d_sym, out_h_sym, out_w_sym, x.shape()[4]}; - } - }(); - - symbol::ShapeOrDataDimExprs shape_data{ - symbol::TensorShapeOrDataDimExprs(dim_out)}; - infer_context->SetShapeOrDataForValue(op->result(0), shape_data); - return true; - } else { - PADDLE_THROW( - common::errors::Fatal("Input(X) dimension must be 3, 4 or 5!")); - } - - return true; -} - -bool BilinearOpInferSymbolicShape( - pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) { - const auto &x_shape = - infer_context->GetShapeOrDataForValue(op->operand_source(0)); - const auto &y_shape = - infer_context->GetShapeOrDataForValue(op->operand_source(1)); - const auto &weight_shape = - infer_context->GetShapeOrDataForValue(op->operand_source(2)); - - PADDLE_ENFORCE_EQ( - x_shape.shape().size(), - 2UL, - common::errors::InvalidArgument("The input(X) must be a 2D Tensor.")); - PADDLE_ENFORCE_EQ( - y_shape.shape().size(), - 2UL, - common::errors::InvalidArgument("The input(Y) must be a 2D Tensor.")); - PADDLE_ENFORCE_EQ( - weight_shape.shape().size(), - 3UL, - common::errors::InvalidArgument( - "Expected the input(Weight) is a 3D tensor. But received %dD tensor.", - weight_shape.shape().size())); - - infer_context->AddEqualCstr(x_shape.shape()[0], y_shape.shape()[0]); - - infer_context->AddEqualCstr(x_shape.shape()[1], weight_shape.shape()[1]); - infer_context->AddEqualCstr(y_shape.shape()[1], weight_shape.shape()[2]); - - if (op->operand_source(3)) { // has bias - const auto &bias_shape = - infer_context->GetShapeOrDataForValue(op->operand_source(3)); - PADDLE_ENFORCE_EQ(bias_shape.shape().size(), - 2UL, - common::errors::InvalidArgument( - "The Input(Bias) must be a 2-D tensor with " - "the 2nd dimension fixed to 1 (a row vector).")); - infer_context->AddEqualCstr(bias_shape.shape()[0], symbol::DimExpr{1}); - infer_context->AddEqualCstr(bias_shape.shape()[1], weight_shape.shape()[0]); - } - - infer_context->SetShapeOrDataForValue( - op->result(0), - symbol::ShapeOrDataDimExprs{symbol::TensorShapeOrDataDimExprs( - {x_shape.shape()[0], weight_shape.shape()[0]})}); - - return true; -} - -bool BilinearInterpOpInferSymbolicShape( - pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) { - return BicubicInterpOpInferSymbolicShape(op, infer_context); -} - -bool CrossEntropyWithSoftmaxOpInferSymbolicShape( - pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) { - const symbol::ShapeOrDataDimExprs &input_shape = - infer_context->GetShapeOrDataForValue(op->operand_source(0)); - const symbol::ShapeOrDataDimExprs &index_shape = - infer_context->GetShapeOrDataForValue(op->operand_source(1)); - - const auto &input_dim = input_shape.shape(); - const auto &index_dim = index_shape.shape(); - const auto &attributes = op->attributes(); - int axis = attributes.at("axis").dyn_cast().data(); - if (axis < 0) axis += input_shape.shape().size(); - bool soft_label = - attributes.at("soft_label").dyn_cast().data(); - PADDLE_ENFORCE(!soft_label || input_dim.size() == index_dim.size(), - common::errors::InvalidArgument( - "The input and index should have the same rank when " - "soft_label is true. But received input rank(%d) and " - "index rank(%d)", - input_dim.size(), - index_dim.size())); - - auto softmax_dim = index_dim; - auto out_dim = index_dim; - - if (index_dim.size() == input_dim.size()) { - if (soft_label) { - out_dim[axis] = 1; - } - softmax_dim[axis] = input_dim[axis]; - } else { - softmax_dim.insert(softmax_dim.begin() + axis, input_dim[axis]); - if (soft_label) { - out_dim.insert(out_dim.begin() + axis, 1); - } - } - - infer_context->SetShapeOrDataForValue( - op->result(0), symbol::TensorShapeOrDataDimExprs(softmax_dim)); - infer_context->SetShapeOrDataForValue( - op->result(1), symbol::TensorShapeOrDataDimExprs(out_dim)); - - return true; -} - -bool CrossEntropyWithSoftmax_OpInferSymbolicShape( - pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) { - return CrossEntropyWithSoftmaxOpInferSymbolicShape(op, infer_context); -} - -bool ConcatOpInferSymbolicShape(pir::Operation *op, - pir::InferSymbolicShapeContext *infer_context) { - const auto &axis_expr = - infer_context->GetShapeOrDataForValue(op->operand_source(1)); - if (!axis_expr.data() || !axis_expr.data()->at(0).isa()) { - pir::Value res = op->result(0); - infer_context->SetSymbolForValueByStaticShape(res); - return true; - } - - pir::Value operand_source = op->operand_source(0); - const auto &shape_data_list = - infer_context->GetShapeOrDataForValue(operand_source) - .dyn_cast(); - - size_t rank = shape_data_list.at(0).shape().size(); - const int64_t axis = [&] { - int64_t axis = axis_expr.data()->at(0).dyn_cast(); - return axis >= 0 ? axis : std::max(int64_t(0), int64_t(axis + rank)); - }(); - - if (shape_data_list.at(0).data().has_value()) { - if (rank == 1) { - const auto &s_or_d = - infer_context->GetShapeOrDataForValue(operand_source); - ExprVec data = details::GetExprVecFromData(s_or_d); - - const std::vector shape{std::int64_t(data.size())}; - symbol::ShapeOrDataDimExprs shape_data{ - symbol::TensorShapeOrDataDimExprs(shape, data)}; - pir::Value res = op->result(0); - infer_context->SetShapeOrDataForValue(res, shape_data); - - return true; - } else { - PADDLE_THROW(common::errors::Unimplemented( - op->name() + - " 's InferSymbolicShape can NOT deal with rank > 1 now.")); - } - std::vector data; - data.reserve(shape_data_list.size()); - for (auto &data_elem : shape_data_list) { - data.push_back(data_elem.data().value().at(0)); - } - const std::vector shape{std::int64_t(data.size())}; - symbol::ShapeOrDataDimExprs shape_data{ - symbol::TensorShapeOrDataDimExprs(shape, data)}; - pir::Value res = op->result(0); - infer_context->SetShapeOrDataForValue(res, shape_data); - - return true; - } - - const std::vector &out_dims = [&] { - std::vector out_dims = shape_data_list.at(0).shape(); - for (size_t i = 0; i < rank; ++i) { - if (i != static_cast(axis)) { - details::BuildCstrEqForTensorListAlongAxis( - infer_context, shape_data_list, i); - continue; - } - for (size_t j = 1; j < shape_data_list.size(); ++j) { - out_dims.at(axis) = - out_dims.at(axis) + shape_data_list.at(j).shape().at(axis); - } - } - return out_dims; - }(); - - symbol::ShapeOrDataDimExprs shape_data{ - symbol::TensorShapeOrDataDimExprs(out_dims)}; - - pir::Value res = op->result(0); - infer_context->SetShapeOrDataForValue(res, shape_data); - - return true; -} - -bool FullWithTensorOpInferSymbolicShape( - pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) { - pir::Value operand_source = op->operand_source(1); - const symbol::ShapeOrDataDimExprs &operand_shape_or_data = - infer_context->GetShapeOrDataForValue(operand_source); - - const auto &out_shape = operand_shape_or_data.data().has_value() - ? operand_shape_or_data.data().value() - : operand_shape_or_data.shape(); - - infer_context->SetShapeOrDataForValue( - op->result(0), symbol::TensorShapeOrDataDimExprs(out_shape)); - return true; -} - -bool FlashAttnOpInferSymbolicShape( - pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) { - pir::Value operand_source = op->operand_source(0); - const symbol::ShapeOrDataDimExprs &q = - infer_context->GetShapeOrDataForValue(operand_source); - - const symbol::ShapeOrDataDimExprs &k = - infer_context->GetShapeOrDataForValue(op->operand_source(1)); - - const symbol::ShapeOrDataDimExprs &v = - infer_context->GetShapeOrDataForValue(op->operand_source(2)); - - PADDLE_ENFORCE_EQ(q.shape().size(), - 4, - common::errors::InvalidArgument( - "flash_attn receive input with dim " - "[batch_size, seq_len, num_heads, head_dim]")); - - infer_context->AddEqualCstr(q.shape()[0], k.shape()[0]); - infer_context->AddEqualCstr(q.shape()[0], v.shape()[0]); - infer_context->AddEqualCstr(k.shape()[1], v.shape()[1]); - - if (op->operand_source(4)) { - const symbol::ShapeOrDataDimExprs &attn_mask = - infer_context->GetShapeOrDataForValue(op->operand_source(4)); - infer_context->AddEqualCstr(attn_mask.shape()[0], q.shape()[0]); - infer_context->AddEqualCstr(attn_mask.shape()[2], q.shape()[1]); - infer_context->AddEqualCstr(attn_mask.shape()[3], k.shape()[1]); - } - - std::vector out_shape = q.shape(); - - out_shape.back() = v.shape().back(); - - infer_context->SetShapeOrDataForValue( - op->result(0), symbol::TensorShapeOrDataDimExprs(out_shape)); - - // GPU has round for seqlen, but XPU has not. Here we align with the GPU - // version. - auto round_multiple = [](symbol::DimExpr x) { - auto m = symbol::DimExpr{128}; - auto m_minus_one = symbol::DimExpr{127}; - return (x + m_minus_one) / m * m; - }; - auto batch_size_expr = q.shape()[0]; - auto num_heads_expr = q.shape()[2]; - auto seqlen_q_rounded_expr = round_multiple(q.shape()[1]); - auto seqlen_k_rounded_expr = round_multiple(k.shape()[1]); - if (op->result(1)) { - std::vector softmax_shape{batch_size_expr, - num_heads_expr, - seqlen_q_rounded_expr, - seqlen_k_rounded_expr}; - infer_context->SetShapeOrDataForValue( - op->result(1), symbol::TensorShapeOrDataDimExprs(softmax_shape)); - } - if (op->result(2)) { - std::vector softmax_lse_shape{ - batch_size_expr, num_heads_expr, seqlen_q_rounded_expr}; - infer_context->SetShapeOrDataForValue( - op->result(2), symbol::TensorShapeOrDataDimExprs(softmax_lse_shape)); - } - if (op->result(3)) { - std::vector seed_offset_shape{symbol::DimExpr{2}}; - infer_context->SetShapeOrDataForValue( - op->result(3), symbol::TensorShapeOrDataDimExprs(out_shape)); - } - return true; -} - -bool GroupNormOpInferSymbolicShape( - pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) { - const symbol::ShapeOrDataDimExprs &x_shape = - infer_context->GetShapeOrDataForValue(op->operand_source(0)); - - infer_context->SetShapeOrDataForValue(op->result(0), x_shape); - - const symbol::DimExpr &batch_size = x_shape.shape()[0]; - int groups = op->attribute("groups").data(); - symbol::TensorShapeOrDataDimExprs mean_shape( - std::vector{batch_size, groups}); - if (op->result(1)) { - infer_context->SetShapeOrDataForValue(op->result(1), mean_shape); - } - if (op->result(2)) { - infer_context->SetShapeOrDataForValue(op->result(2), mean_shape); - } - return true; -} - -bool LerpOpInferSymbolicShape(pir::Operation *op, - pir::InferSymbolicShapeContext *infer_context) { - const auto &x_shape_or_data = - infer_context->GetShapeOrDataForValue(op->operand_source(0)); - const auto &y_shape_or_data = - infer_context->GetShapeOrDataForValue(op->operand_source(1)); - const auto &w_shape_or_data = - infer_context->GetShapeOrDataForValue(op->operand_source(2)); - const auto &x_shape = x_shape_or_data.shape(); - const auto &y_shape = y_shape_or_data.shape(); - const auto &w_shape = w_shape_or_data.shape(); - size_t x_ndims = x_shape.size(); - size_t y_ndims = y_shape.size(); - size_t w_ndims = w_shape.size(); - std::vector out1_shape; - std::vector out2_shape; - if (x_ndims > y_ndims) { - out1_shape.assign(x_shape.begin(), x_shape.end()); - } else if (x_ndims < y_ndims) { - out1_shape.assign(y_shape.begin(), y_shape.end()); - } else { - symbol::DimExprBuilder builder; - for (size_t i = 0; i < x_ndims; ++i) { - out1_shape.emplace_back(builder.Broadcast(x_shape[i], y_shape[i])); - infer_context->AddBroadcastableCstr(x_shape[i], y_shape[i]); - } - } - size_t out1_ndims = out1_shape.size(); - if (w_ndims > out1_ndims) { - out2_shape.assign(w_shape.begin(), w_shape.end()); - } else if (w_ndims < out1_ndims) { - out2_shape.assign(out1_shape.begin(), out1_shape.end()); - } else { - symbol::DimExprBuilder builder; - for (size_t i = 0; i < w_ndims; ++i) { - out2_shape.emplace_back(builder.Broadcast(w_shape[i], out1_shape[i])); - infer_context->AddBroadcastableCstr(w_shape[i], out1_shape[i]); - } - } - infer_context->SetShapeOrDataForValue( - op->result(0), - symbol::ShapeOrDataDimExprs{ - symbol::TensorShapeOrDataDimExprs(out2_shape)}); - return true; -} - -bool Lerp_OpInferSymbolicShape(pir::Operation *op, - pir::InferSymbolicShapeContext *infer_context) { - return LerpOpInferSymbolicShape(op, infer_context); -} - -bool LayerNormOpInferSymbolicShape( - pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) { - // Get the shapes of input tensors - const auto &x_shape_or_data = - infer_context->GetShapeOrDataForValue(op->operand_source(0)); - const auto &scale_shape_or_data = - infer_context->GetShapeOrDataForValue(op->operand_source(1)); - const auto &bias_shape_or_data = - infer_context->GetShapeOrDataForValue(op->operand_source(2)); - - std::vector x_dims = x_shape_or_data.shape(); - int begin_norm_axis = - op->attribute("begin_norm_axis").data(); - - // Flatten x_dims to 2D and get dim[1] - symbol::DimExpr matrix_dim_1 = x_dims[begin_norm_axis]; - for (std::size_t i = begin_norm_axis + 1; i < x_dims.size(); ++i) { - matrix_dim_1 = matrix_dim_1 * x_dims[i]; - } - - if (!scale_shape_or_data.isa()) { - std::vector scale_dims = scale_shape_or_data.shape(); - infer_context->AddEqualCstr(scale_dims[0], matrix_dim_1); - } - if (!bias_shape_or_data.isa()) { - std::vector bias_dims = bias_shape_or_data.shape(); - infer_context->AddEqualCstr(bias_dims[0], matrix_dim_1); - } - - // Set output shapes - infer_context->SetShapeOrDataForValue( - op->result(0), - symbol::ShapeOrDataDimExprs{symbol::TensorShapeOrDataDimExprs(x_dims)}); - - // Set mean and variance shapes - std::vector before_norm_dims( - x_dims.begin(), x_dims.begin() + begin_norm_axis); - infer_context->SetShapeOrDataForValue( - op->result(1), - symbol::ShapeOrDataDimExprs{ - symbol::TensorShapeOrDataDimExprs(before_norm_dims)}); - infer_context->SetShapeOrDataForValue( - op->result(2), - symbol::ShapeOrDataDimExprs{ - symbol::TensorShapeOrDataDimExprs(before_norm_dims)}); - - return true; -} - -bool LinspaceOpInferSymbolicShape( - pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) { - const auto &num_shape_or_data = - infer_context->GetShapeOrDataForValue(op->operand_source(2)); - const auto step = [&] { - symbol::DimExpr expr; - if (num_shape_or_data.data().has_value()) { - expr = num_shape_or_data.data().value()[0]; - } else { - expr = num_shape_or_data.shape()[0]; - } - return expr; - }(); - const symbol::ShapeOrDataDimExprs &shape_data = [&] { - std::vector out_dims{step}; - return symbol::ShapeOrDataDimExprs{ - symbol::TensorShapeOrDataDimExprs(out_dims)}; - }(); - infer_context->SetShapeOrDataForValue(op->result(0), shape_data); - return true; -} - -bool LinearInterpOpInferSymbolicShape( - pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) { - return BicubicInterpOpInferSymbolicShape(op, infer_context); -} - -bool LogspaceOpInferSymbolicShape( - pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) { - return LinspaceOpInferSymbolicShape(op, infer_context); -} - -bool NearestInterpOpInferSymbolicShape( - pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) { - return BicubicInterpOpInferSymbolicShape(op, infer_context); -} - -bool MemoryEfficientAttentionOpInferSymbolicShape( - pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) { - const auto &q_shape = - infer_context->GetShapeOrDataForValue(op->operand_source(0)).shape(); - const auto &k_shape = - infer_context->GetShapeOrDataForValue(op->operand_source(1)).shape(); - const auto &v_shape = - infer_context->GetShapeOrDataForValue(op->operand_source(2)).shape(); - PADDLE_ENFORCE_EQ( - q_shape.size(), - 4, - common::errors::InvalidArgument("Query should be a 4-D tensor" - "But received Query dimension(%d)", - q_shape.size())); - PADDLE_ENFORCE_EQ( - k_shape.size(), - 4, - common::errors::InvalidArgument("Key should be a 4-D tensor" - "But received Key dimension(%d)", - k_shape.size())); - PADDLE_ENFORCE_EQ( - v_shape.size(), - 4, - common::errors::InvalidArgument("Value should be a 4-D tensor" - "But received Value dimension(%d)", - v_shape.size())); - - const auto &query_batch_size = q_shape[0]; - const auto &query_seq_length = q_shape[1]; - const auto &query_num_head = q_shape[2]; - const auto &query_head_size = q_shape[3]; - - const auto &key_batch_size = k_shape[0]; - const auto &key_seq_length = k_shape[1]; - const auto &key_num_head = k_shape[2]; - const auto &key_head_size = k_shape[3]; - - const auto &value_batch_size = v_shape[0]; - const auto &value_seq_length = v_shape[1]; - const auto &value_num_head = v_shape[2]; - const auto &value_head_size = v_shape[3]; - - infer_context->AddEqualCstr(query_batch_size, key_batch_size); - infer_context->AddEqualCstr(key_batch_size, value_batch_size); - - infer_context->AddEqualCstr(query_num_head, key_num_head); - infer_context->AddEqualCstr(key_num_head, value_num_head); - - infer_context->AddEqualCstr(query_head_size, key_head_size); - - infer_context->AddEqualCstr(key_seq_length, value_seq_length); - - const std::vector out_dims{ - query_batch_size, query_seq_length, query_num_head, value_head_size}; - const std::vector logsumexp_dims{query_num_head, - query_batch_size}; - const std::vector seed_and_offset_dims{2}; - - infer_context->SetShapeOrDataForValue( - op->result(0), symbol::TensorShapeOrDataDimExprs(out_dims)); - infer_context->SetShapeOrDataForValue( - op->result(1), symbol::TensorShapeOrDataDimExprs(logsumexp_dims)); - infer_context->SetShapeOrDataForValue( - op->result(2), symbol::TensorShapeOrDataDimExprs(seed_and_offset_dims)); - - return true; -} - -bool RoiAlignOpInferSymbolicShape( - pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) { - const auto &x = op->operand_source(0); - const auto &boxes = op->operand_source(1); - - const auto &num_boxes = - infer_context->GetShapeOrDataForValue(boxes).shape()[0]; - symbol::DimExpr channel_num = - infer_context->GetShapeOrDataForValue(x).shape()[1]; - - int32_t out_h = op->attribute("pooled_height").data(); - int32_t out_w = op->attribute("pooled_width").data(); - - std::vector out_dim = {num_boxes, channel_num, out_h, out_w}; - infer_context->SetShapeOrDataForValue( - op->result(0), symbol::TensorShapeOrDataDimExprs(out_dim)); - return true; -} - -bool MeshgridOpInferSymbolicShape( - pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) { - const symbol::TensorListShapeOrDataDimExprs &shape_data_list = - infer_context->GetShapeOrDataForValue(op->operand_source(0)) - .dyn_cast(); - - const symbol::ShapeOrDataDimExprs sym_shape_dim_exprs = [&] { - symbol::TensorListShapeOrDataDimExprs shape_dim_exprs_list; - std::vector vec; - - for (auto &shape_data : shape_data_list) { - if (shape_data.shape().size() == 0) { - vec.emplace_back(1); - } else { - vec.emplace_back(shape_data.shape()[0]); - } - } - - auto shape_dim_exprs = symbol::TensorShapeOrDataDimExprs(vec); - - for (size_t i = 0; i < shape_data_list.size(); i++) { - shape_dim_exprs_list.emplace_back(shape_dim_exprs); - } - - return symbol::ShapeOrDataDimExprs(shape_dim_exprs_list); - }(); - - pir::Value res = op->result(0); - infer_context->SetShapeOrDataForValue(res, sym_shape_dim_exprs); - return true; -} - -bool StackOpInferSymbolicShape(pir::Operation *op, - pir::InferSymbolicShapeContext *infer_context) { - pir::Value operand_source = op->operand_source(0); - - const auto &attributes = op->attributes(); - int axis = attributes.at("axis").dyn_cast().data(); - const symbol::TensorListShapeOrDataDimExprs &shape_data_list = - infer_context->GetShapeOrDataForValue(operand_source) - .dyn_cast(); - - size_t rank = shape_data_list.at(0).shape().size(); - if (axis < 0) axis += rank + 1; - const symbol::ShapeOrDataDimExprs shape_data = [&] { - std::vector result_shape = {}; - std::vector result_data = {}; - const symbol::TensorShapeOrDataDimExprs &x_shape_data = - shape_data_list.at(0); - - const bool data_flag = [&] { - for (const auto &shape_data : shape_data_list) { - if (!shape_data.data().has_value()) { - return false; - } - } - return true; - }(); - - if (data_flag) { - // case 1: data is not empty, eg: shape_data_list = - // [[shape:{3},data:{S0,6,7}],...] - if (axis == 0 && x_shape_data.data().value().size() <= 1) { - for (const auto &shape_data : shape_data_list) { - result_data.emplace_back(shape_data.data().value().at(0)); - } - } else { - PADDLE_THROW(common::errors::Unimplemented( - op->name() + - " 's InferSymbolicShape can NOT deal with data size > 1 now.")); - } - result_shape.emplace_back( - static_cast(shape_data_list.size())); - } else { - // case 2: data is empty, eg: shape_data_list = - // [[shape:{5,6,7},data:{}],...] - for (size_t i = 0; i < rank; ++i) { - details::BuildCstrEqForTensorListAlongAxis( - infer_context, shape_data_list, i); - } - for (const symbol::DimExpr &dim : x_shape_data.shape()) { - result_shape.emplace_back(dim); - } - result_shape.insert(result_shape.begin() + axis, - static_cast(shape_data_list.size())); - } - - if (result_data.empty()) { - return symbol::ShapeOrDataDimExprs( - symbol::TensorShapeOrDataDimExprs(result_shape)); - } - return symbol::ShapeOrDataDimExprs( - symbol::TensorShapeOrDataDimExprs(result_shape, result_data)); - }(); - - pir::Value res = op->result(0); - infer_context->SetShapeOrDataForValue(res, shape_data); - return true; -} - -bool TrilinearInterpOpInferSymbolicShape( - pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) { - return BicubicInterpOpInferSymbolicShape(op, infer_context); -} - -bool WhereOpInferSymbolicShape(pir::Operation *op, - pir::InferSymbolicShapeContext *infer_context) { - infer_context->SetShapeOrDataForValue( - op->result(0), - infer_context->GetShapeOrDataForValue(op->operand_source(0))); - - const std::vector &operands = {op->operand_source(0), - op->operand_source(1)}; - - size_t rank = infer_context->GetShapeOrDataForValue(op->operand_source(0)) - .shape() - .size(); - - for (size_t i = 0; i < rank; ++i) { - paddle::dialect::details::BuildCstrEqForTensorListAlongAxis( - infer_context, operands, i); - } - - return true; -} - -bool Where_OpInferSymbolicShape(pir::Operation *op, - pir::InferSymbolicShapeContext *infer_context) { - return WhereOpInferSymbolicShape(op, infer_context); -} - -bool YoloLossOpInferSymbolicShape( - pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) { - const auto &dim_x = - infer_context->GetShapeOrDataForValue(op->operand_source(0)).shape(); - const auto &dim_gtbox = - infer_context->GetShapeOrDataForValue(op->operand_source(1)).shape(); - const auto &dim_gtlabel = - infer_context->GetShapeOrDataForValue(op->operand_source(2)).shape(); - std::vector anchors_mask = - paddle::dialect::details::GetVectorAttr(op, "anchors_mask"); - int mask_num = static_cast(anchors_mask.size()); - int class_num = op->attribute("class_num").data(); - - PADDLE_ENFORCE_EQ(dim_x.size(), - 4, - phi::errors::InvalidArgument( - "Input(X) should be a 4-D tensor. But received " - "X dimension size(%s)", - dim_x.size())); - PADDLE_ENFORCE_EQ( - dim_gtbox.size(), - 3, - phi::errors::InvalidArgument("Input(GTBox) should be a 3-D tensor, but " - "received gtbox dimension size(%s)", - dim_gtbox.size())); - PADDLE_ENFORCE_EQ( - dim_gtbox[2], - 4, - phi::errors::InvalidArgument("Input(GTBox) dim[2] should be 4", - "But receive dim[2](%s) != 5. ", - dim_gtbox[2])); - PADDLE_ENFORCE_EQ(dim_gtlabel.size(), - 2, - phi::errors::InvalidArgument( - "Input(GTLabel) should be a 2-D tensor," - "But received Input(GTLabel) dimension size(%s) != 2.", - dim_gtlabel.size())); - infer_context->AddEqualCstr(dim_x[2], dim_x[3]); - infer_context->AddEqualCstr(dim_x[1], mask_num * (5 + class_num)); - infer_context->AddEqualCstr(dim_gtlabel[0], dim_gtbox[0]); - infer_context->AddEqualCstr(dim_gtlabel[1], dim_gtbox[1]); - - const auto &dim_gtscore = - infer_context->GetShapeOrDataForValue(op->operand_source(3)).shape(); - PADDLE_ENFORCE_EQ( - dim_gtscore.size(), - 2, - phi::errors::InvalidArgument("Input(GTScore) should be a 2-D tensor" - "But received GTScore dimension(%s)", - dim_gtbox.size())); - infer_context->AddEqualCstr(dim_gtscore[0], dim_gtbox[0]); - infer_context->AddEqualCstr(dim_gtscore[1], dim_gtbox[1]); - - std::vector dim_out = {dim_x[0]}; - infer_context->SetShapeOrDataForValue( - op->result(0), - symbol::ShapeOrDataDimExprs{symbol::TensorShapeOrDataDimExprs(dim_out)}); - - std::vector dim_obj_mask = { - dim_x[0], mask_num, dim_x[2], dim_x[3]}; - infer_context->SetShapeOrDataForValue( - op->result(1), - symbol::ShapeOrDataDimExprs{ - symbol::TensorShapeOrDataDimExprs(dim_obj_mask)}); - - std::vector dim_gt_match_mask = {dim_gtbox[0], dim_gtbox[1]}; - infer_context->SetShapeOrDataForValue( - op->result(2), - symbol::ShapeOrDataDimExprs{ - symbol::TensorShapeOrDataDimExprs(dim_gt_match_mask)}); - - return true; -} - -bool FakeChannelWiseDequantizeMaxAbsOpInferSymbolicShape( - pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) { - const auto &x_shape_or_data = - infer_context->GetShapeOrDataForValue(op->operand_source(0)); - - int quant_axis = op->attribute("quant_axis").data(); - int x_num_col_dims = - op->attribute("x_num_col_dims").data(); - - PADDLE_ENFORCE_EQ( - quant_axis == 0 || quant_axis == 1, - true, - common::errors::InvalidArgument("'quant_axis' should be 0 or 1, but " - "the received is %d", - quant_axis)); - PADDLE_ENFORCE_EQ(x_num_col_dims == 0, - false, - common::errors::InvalidArgument( - "'x_num_col_dims' should be larger than 0, but " - "the received is %d", - x_num_col_dims)); - - infer_context->SetShapeOrDataForValue( - op->result(0), - symbol::ShapeOrDataDimExprs{ - symbol::TensorShapeOrDataDimExprs(x_shape_or_data.shape())}); - - return true; -} - -} // namespace paddle::dialect diff --git a/paddle/fluid/pir/dialect/operator/interface/infer_symbolic_shape/.ipynb_checkpoints/multiary_infer_sym-checkpoint.h b/paddle/fluid/pir/dialect/operator/interface/infer_symbolic_shape/.ipynb_checkpoints/multiary_infer_sym-checkpoint.h deleted file mode 100644 index 2c347e4cd9ca4..0000000000000 --- a/paddle/fluid/pir/dialect/operator/interface/infer_symbolic_shape/.ipynb_checkpoints/multiary_infer_sym-checkpoint.h +++ /dev/null @@ -1,54 +0,0 @@ -// Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#pragma once - -#include "paddle/pir/include/dialect/shape/utils/shape_analysis.h" - -namespace paddle::dialect { - -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Accuracy) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Addmm) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Addmm_) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(AddN) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Auc) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(BatchNorm) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(BatchNorm_) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(BicubicInterp) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Bilinear) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(BilinearInterp) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Concat) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(CrossEntropyWithSoftmax) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(CrossEntropyWithSoftmax_) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(FullWithTensor) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(FlashAttn) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(GroupNorm) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Lerp) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Lerp_) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(LayerNorm) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Linspace) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(LinearInterp) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Logspace) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(MemoryEfficientAttention) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Meshgrid) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(NearestInterp) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(RoiAlign) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Stack) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(TrilinearInterp) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Where) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Where_) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(YoloLoss) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(FakeChannelWiseDequantizeMaxAbs) - -} // namespace paddle::dialect diff --git a/paddle/fluid/pir/dialect/operator/interface/infer_symbolic_shape/.ipynb_checkpoints/same_operands_result-checkpoint.cc b/paddle/fluid/pir/dialect/operator/interface/infer_symbolic_shape/.ipynb_checkpoints/same_operands_result-checkpoint.cc deleted file mode 100644 index 22d202775eb17..0000000000000 --- a/paddle/fluid/pir/dialect/operator/interface/infer_symbolic_shape/.ipynb_checkpoints/same_operands_result-checkpoint.cc +++ /dev/null @@ -1,215 +0,0 @@ -// Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#include "paddle/fluid/pir/dialect/operator/interface/infer_symbolic_shape/same_operands_result.h" - -#define OP_SAME_OPERANDS_AND_RESULT(name) \ - bool name##OpInferSymbolicShape( \ - pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) { \ - const auto &operand_shape = \ - infer_context->GetShapeOrDataForValue(op->operand_source(0)).shape(); \ - infer_context->SetShapeOrDataForValue( \ - op->result(0), \ - symbol::ShapeOrDataDimExprs{ \ - symbol::TensorShapeOrDataDimExprs(operand_shape)}); \ - return true; \ - } - -namespace paddle::dialect { - -OP_SAME_OPERANDS_AND_RESULT(Abs) -OP_SAME_OPERANDS_AND_RESULT(Abs_) -OP_SAME_OPERANDS_AND_RESULT(Acos) -OP_SAME_OPERANDS_AND_RESULT(Acos_) -OP_SAME_OPERANDS_AND_RESULT(Acosh) -OP_SAME_OPERANDS_AND_RESULT(Acosh_) -OP_SAME_OPERANDS_AND_RESULT(Angle) -OP_SAME_OPERANDS_AND_RESULT(Asin) -OP_SAME_OPERANDS_AND_RESULT(Asin_) -OP_SAME_OPERANDS_AND_RESULT(Asinh) -OP_SAME_OPERANDS_AND_RESULT(Asinh_) -OP_SAME_OPERANDS_AND_RESULT(Atan) -OP_SAME_OPERANDS_AND_RESULT(Atan_) -OP_SAME_OPERANDS_AND_RESULT(Atanh) -OP_SAME_OPERANDS_AND_RESULT(Atanh_) -OP_SAME_OPERANDS_AND_RESULT(AsStrided) -OP_SAME_OPERANDS_AND_RESULT(Bernoulli) -OP_SAME_OPERANDS_AND_RESULT(BitwiseNot) -OP_SAME_OPERANDS_AND_RESULT(BitwiseNot_) -OP_SAME_OPERANDS_AND_RESULT(Ceil) -OP_SAME_OPERANDS_AND_RESULT(Ceil_) -OP_SAME_OPERANDS_AND_RESULT(Celu) -OP_SAME_OPERANDS_AND_RESULT(Clip) -OP_SAME_OPERANDS_AND_RESULT(Clip_) -OP_SAME_OPERANDS_AND_RESULT(Conj) -OP_SAME_OPERANDS_AND_RESULT(CopyTo) -OP_SAME_OPERANDS_AND_RESULT(Cos) -OP_SAME_OPERANDS_AND_RESULT(Cos_) -OP_SAME_OPERANDS_AND_RESULT(Cosh) -OP_SAME_OPERANDS_AND_RESULT(Cosh_) -OP_SAME_OPERANDS_AND_RESULT(DequantizeLog) -OP_SAME_OPERANDS_AND_RESULT(Digamma) -OP_SAME_OPERANDS_AND_RESULT(Digamma_) -OP_SAME_OPERANDS_AND_RESULT(Dirichlet) -OP_SAME_OPERANDS_AND_RESULT(EmptyLike) -OP_SAME_OPERANDS_AND_RESULT(Erf) -OP_SAME_OPERANDS_AND_RESULT(Erf_) -OP_SAME_OPERANDS_AND_RESULT(Erfinv) -OP_SAME_OPERANDS_AND_RESULT(Erfinv_) -OP_SAME_OPERANDS_AND_RESULT(Exp) -OP_SAME_OPERANDS_AND_RESULT(Exp_) -OP_SAME_OPERANDS_AND_RESULT(Expm1) -OP_SAME_OPERANDS_AND_RESULT(Expm1_) -OP_SAME_OPERANDS_AND_RESULT(Exponential_) -OP_SAME_OPERANDS_AND_RESULT(Fill) -OP_SAME_OPERANDS_AND_RESULT(Fill_) -OP_SAME_OPERANDS_AND_RESULT(Fetch) -OP_SAME_OPERANDS_AND_RESULT(Flip) -OP_SAME_OPERANDS_AND_RESULT(Floor) -OP_SAME_OPERANDS_AND_RESULT(Floor_) -OP_SAME_OPERANDS_AND_RESULT(FullLike) -OP_SAME_OPERANDS_AND_RESULT(Imag) -OP_SAME_OPERANDS_AND_RESULT(Increment) -OP_SAME_OPERANDS_AND_RESULT(Increment_) -OP_SAME_OPERANDS_AND_RESULT(Isfinite) -OP_SAME_OPERANDS_AND_RESULT(IsfiniteSr) -OP_SAME_OPERANDS_AND_RESULT(Isinf) -OP_SAME_OPERANDS_AND_RESULT(IsinfSr) -OP_SAME_OPERANDS_AND_RESULT(Isnan) -OP_SAME_OPERANDS_AND_RESULT(IsnanSr) -OP_SAME_OPERANDS_AND_RESULT(I0) -OP_SAME_OPERANDS_AND_RESULT(I0_) -OP_SAME_OPERANDS_AND_RESULT(I0e) -OP_SAME_OPERANDS_AND_RESULT(I1) -OP_SAME_OPERANDS_AND_RESULT(I1e) -OP_SAME_OPERANDS_AND_RESULT(Lgamma) -OP_SAME_OPERANDS_AND_RESULT(Lgamma_) -OP_SAME_OPERANDS_AND_RESULT(Log1p) -OP_SAME_OPERANDS_AND_RESULT(Log1p_) -OP_SAME_OPERANDS_AND_RESULT(Log) -OP_SAME_OPERANDS_AND_RESULT(Log_) -OP_SAME_OPERANDS_AND_RESULT(LogicalNot) -OP_SAME_OPERANDS_AND_RESULT(LogicalNot_) -OP_SAME_OPERANDS_AND_RESULT(Logit) -OP_SAME_OPERANDS_AND_RESULT(Logit_) -OP_SAME_OPERANDS_AND_RESULT(Logsigmoid) -OP_SAME_OPERANDS_AND_RESULT(Logsigmoid_) -OP_SAME_OPERANDS_AND_RESULT(Pow) -OP_SAME_OPERANDS_AND_RESULT(Poisson) -OP_SAME_OPERANDS_AND_RESULT(Pow_) -OP_SAME_OPERANDS_AND_RESULT(Prelu) -OP_SAME_OPERANDS_AND_RESULT(Print) -OP_SAME_OPERANDS_AND_RESULT(PutAlongAxis) -OP_SAME_OPERANDS_AND_RESULT(PutAlongAxis_) -OP_SAME_OPERANDS_AND_RESULT(Real) -OP_SAME_OPERANDS_AND_RESULT(Reciprocal) -OP_SAME_OPERANDS_AND_RESULT(Reciprocal_) -OP_SAME_OPERANDS_AND_RESULT(Relu) -OP_SAME_OPERANDS_AND_RESULT(Relu6) -OP_SAME_OPERANDS_AND_RESULT(Relu_) -OP_SAME_OPERANDS_AND_RESULT(Reverse) -OP_SAME_OPERANDS_AND_RESULT(Roll) -OP_SAME_OPERANDS_AND_RESULT(Round) -OP_SAME_OPERANDS_AND_RESULT(Round_) -OP_SAME_OPERANDS_AND_RESULT(RowConv) -OP_SAME_OPERANDS_AND_RESULT(Rsqrt) -OP_SAME_OPERANDS_AND_RESULT(Rsqrt_) -OP_SAME_OPERANDS_AND_RESULT(ScaleSr) -OP_SAME_OPERANDS_AND_RESULT(ScaleSr_) -OP_SAME_OPERANDS_AND_RESULT(Scale_) -OP_SAME_OPERANDS_AND_RESULT(ScatterNdAdd) -OP_SAME_OPERANDS_AND_RESULT(Scatter) -OP_SAME_OPERANDS_AND_RESULT(Scatter_) -OP_SAME_OPERANDS_AND_RESULT(Select) -OP_SAME_OPERANDS_AND_RESULT(Sign) -OP_SAME_OPERANDS_AND_RESULT(Sin) -OP_SAME_OPERANDS_AND_RESULT(Sin_) -OP_SAME_OPERANDS_AND_RESULT(Sinh) -OP_SAME_OPERANDS_AND_RESULT(Sinh_) -OP_SAME_OPERANDS_AND_RESULT(Softmax) -OP_SAME_OPERANDS_AND_RESULT(Softmax_) -OP_SAME_OPERANDS_AND_RESULT(Swish) -OP_SAME_OPERANDS_AND_RESULT(Tan) -OP_SAME_OPERANDS_AND_RESULT(Tan_) -OP_SAME_OPERANDS_AND_RESULT(Tanh) -OP_SAME_OPERANDS_AND_RESULT(Tanh_) -OP_SAME_OPERANDS_AND_RESULT(Tril) -OP_SAME_OPERANDS_AND_RESULT(Tril_) -OP_SAME_OPERANDS_AND_RESULT(Triu) -OP_SAME_OPERANDS_AND_RESULT(Triu_) -OP_SAME_OPERANDS_AND_RESULT(Trunc) -OP_SAME_OPERANDS_AND_RESULT(Trunc_) -OP_SAME_OPERANDS_AND_RESULT(Sigmoid) -OP_SAME_OPERANDS_AND_RESULT(Sigmoid_) -OP_SAME_OPERANDS_AND_RESULT(LeakyRelu) -OP_SAME_OPERANDS_AND_RESULT(LeakyRelu_) -OP_SAME_OPERANDS_AND_RESULT(ThresholdedRelu) -OP_SAME_OPERANDS_AND_RESULT(ThresholdedRelu_) -OP_SAME_OPERANDS_AND_RESULT(SquareSr) -OP_SAME_OPERANDS_AND_RESULT(Square) -OP_SAME_OPERANDS_AND_RESULT(Polygamma) -OP_SAME_OPERANDS_AND_RESULT(Polygamma_) -OP_SAME_OPERANDS_AND_RESULT(EnableCheckModelNanInf) -OP_SAME_OPERANDS_AND_RESULT(ViewShape) - -bool ScaleOpInferSymbolicShape(pir::Operation *op, - pir::InferSymbolicShapeContext *infer_context) { - pir::Value operand_source = op->operand_source(0); - const symbol::ShapeOrDataDimExprs &operand_shape_or_data = - infer_context->GetShapeOrDataForValue(operand_source); - std::vector shape(operand_shape_or_data.shape()); - - if (operand_shape_or_data.data()) { - const std::vector data = [&] { - const symbol::DimExpr scale = [&]() -> symbol::DimExpr { - if (op->num_operands() == 2) { - return infer_context->GetShapeOrDataForValue(op->operand_source(1)) - .data() - ->at(0); - } - return static_cast( - op->attribute("scale").dyn_cast().data()); - }(); - int bias = op->attribute("bias").dyn_cast().data(); - - std::vector data; - for (auto &val : *(operand_shape_or_data.data())) { - data.push_back(val * scale + bias); - } - return data; - }(); - - infer_context->SetShapeOrDataForValue( - op->result(0), symbol::TensorShapeOrDataDimExprs(shape, data)); - } else { - infer_context->SetShapeOrDataForValue(op->result(0), operand_shape_or_data); - } - - return true; -} - -bool ArgsortOpInferSymbolicShape( - pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) { - const symbol::ShapeOrDataDimExprs &operand_shape_or_data = - infer_context->GetShapeOrDataForValue(op->operand_source(0)); - infer_context->SetShapeOrDataForValue(op->result(0), operand_shape_or_data); - infer_context->SetShapeOrDataForValue(op->result(1), operand_shape_or_data); - return true; -} - -} // namespace paddle::dialect - -namespace cinn::dialect {} // namespace cinn::dialect - -#undef OP_SAME_OPERANDS_AND_RESULT diff --git a/paddle/fluid/pir/dialect/operator/interface/infer_symbolic_shape/.ipynb_checkpoints/same_operands_result-checkpoint.h b/paddle/fluid/pir/dialect/operator/interface/infer_symbolic_shape/.ipynb_checkpoints/same_operands_result-checkpoint.h deleted file mode 100644 index ed3565456c841..0000000000000 --- a/paddle/fluid/pir/dialect/operator/interface/infer_symbolic_shape/.ipynb_checkpoints/same_operands_result-checkpoint.h +++ /dev/null @@ -1,165 +0,0 @@ -// Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#pragma once - -#include "paddle/pir/include/dialect/shape/utils/shape_analysis.h" - -namespace paddle::dialect { -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Abs) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Abs_) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Acos) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Acos_) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Acosh) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Acosh_) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Angle) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Argsort) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Asin) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Asin_) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Asinh) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Asinh_) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Assign) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Assign_) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(AsStrided) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Atan) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Atan_) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Atanh) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Atanh_) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Bernoulli) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(BitwiseNot) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(BitwiseNot_) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Ceil) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Ceil_) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Celu) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Clip) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Clip_) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Conj) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(CopyTo) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Cos) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Cos_) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Cosh) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Cosh_) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(DequantizeLog) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Digamma) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Digamma_) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Dirichlet) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(EmptyLike) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Erf) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Erf_) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Erfinv) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Erfinv_) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Exp) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Exp_) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Expm1) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Expm1_) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Exponential_) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Fetch) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Fill) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Fill_) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Flip) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Floor) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Floor_) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(FullLike) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Imag) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Increment) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Increment_) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Isfinite) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(IsfiniteSr) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Isinf) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(IsinfSr) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Isnan) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(IsnanSr) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(I0) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(I0_) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(I0e) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(I1) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(I1e) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Lgamma) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Lgamma_) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Log1p) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Log1p_) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Log) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Log_) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(LogicalNot) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(LogicalNot_) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Logit) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Logit_) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Logsigmoid) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Logsigmoid_) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Poisson) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Pow) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Pow_) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Prelu) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Print) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(PutAlongAxis) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(PutAlongAxis_) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Real) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Reciprocal) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Reciprocal_) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Relu) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Relu6) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Relu_) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Reverse) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Roll) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Round) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Round_) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(RowConv) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Rsqrt) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Rsqrt_) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Scale) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(ScaleSr) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(ScaleSr_) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Scale_) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(ScatterNdAdd) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Scatter) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Scatter_) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Select) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Sign) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Sin) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Sin_) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Sinh) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Sinh_) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Softmax) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Softmax_) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Swish) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Tan) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Tan_) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Tanh) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Tanh_) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Tril) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Tril_) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Triu) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Triu_) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Trunc) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Trunc_) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Sigmoid) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Sigmoid_) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(LeakyRelu) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(LeakyRelu_) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(ThresholdedRelu) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(ThresholdedRelu_) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(SquareSr) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Square) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Polygamma) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Polygamma_) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(EnableCheckModelNanInf) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(ViewShape) - -} // namespace paddle::dialect - -namespace cinn::dialect { -using paddle::dialect::ReverseOpInferSymbolicShape; -using paddle::dialect::ScaleOpInferSymbolicShape; -using paddle::dialect::SelectOpInferSymbolicShape; -} // namespace cinn::dialect diff --git a/paddle/fluid/pir/dialect/operator/interface/infer_symbolic_shape/.ipynb_checkpoints/unary_infer_sym-checkpoint.cc b/paddle/fluid/pir/dialect/operator/interface/infer_symbolic_shape/.ipynb_checkpoints/unary_infer_sym-checkpoint.cc deleted file mode 100644 index 8781161564eeb..0000000000000 --- a/paddle/fluid/pir/dialect/operator/interface/infer_symbolic_shape/.ipynb_checkpoints/unary_infer_sym-checkpoint.cc +++ /dev/null @@ -1,2138 +0,0 @@ -// Copyright (c) 2024 PaddlePaddle Authors. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#include "paddle/fluid/pir/dialect/operator/interface/infer_symbolic_shape/infer_sym_slice_utils.h" -#include "paddle/fluid/pir/dialect/operator/interface/infer_symbolic_shape/infer_sym_utils.h" -#include "paddle/fluid/pir/dialect/operator/interface/infer_symbolic_shape/unary_infer_sym.h" - -namespace { -std::vector GetRealPadding( - const std::vector &origin_paddings, - const bool global_pooling, - const bool adaptive, - const std::string padding_algorithm, - const std::vector data_dims, - const std::vector &strides, - const std::vector &kernel_size) { - const auto &GetInitPadding = [&]() -> std::vector { - std::vector res; - // set padding size == data_dims.size() * 2 - if (origin_paddings.size() == data_dims.size()) { - for (std::size_t i = 0; i < origin_paddings.size(); ++i) { - res.emplace_back(symbol::DimExpr{origin_paddings.at(i)}); - res.emplace_back(symbol::DimExpr{origin_paddings.at(i)}); - } - } else { - PADDLE_ENFORCE_EQ( - data_dims.size() * 2, - origin_paddings.size(), - phi::errors::InvalidArgument( - "Paddings size %d should be the same or twice as the " - "pooling size %d.", - origin_paddings.size(), - data_dims.size() * 2)); - for (std::size_t i = 0; i < origin_paddings.size(); ++i) { - res.emplace_back(symbol::DimExpr{origin_paddings.at(i)}); - } - } - return res; - }; - - std::vector real_padding = GetInitPadding(); - - const auto &UpdataPadding = [&]() { - symbol::DimExpr one_dimexpr{1}; - symbol::DimExpr zero_dimexpr{0}; - // when padding_algorithm is "VALID" or "SAME" - if (padding_algorithm == "SAME") { - for (std::size_t i = 0; i < data_dims.size(); ++i) { - symbol::DimExpr stride_dimexpr = symbol::DimExpr{strides[i]}; - - symbol::DimExpr out_size = - (data_dims[i] + stride_dimexpr - one_dimexpr) / stride_dimexpr; - symbol::DimExprBuilder builder; - symbol::DimExpr pad_sum = - builder.Max((out_size - one_dimexpr) * stride_dimexpr + - kernel_size[i] - data_dims[i], - zero_dimexpr); - symbol::DimExpr pad_0 = pad_sum / symbol::DimExpr{2}; - symbol::DimExpr pad_1 = pad_sum - pad_0; - real_padding[i * 2] = pad_0; - real_padding[i * 2 + 1] = pad_1; - } - } else if (padding_algorithm == "VALID") { - real_padding.assign(real_padding.size(), zero_dimexpr); - } - - // if global_pooling == true or adaptive == true, padding will be ignore - if (global_pooling || adaptive) { - real_padding.assign(real_padding.size(), zero_dimexpr); - } - }; - - UpdataPadding(); - return real_padding; -} - -symbol::ShapeOrDataDimExprs Pool2dRawInferSymbolicShape( - pir::Operation *op, - const std::vector &kernel_size, - pir::InferSymbolicShapeContext *infer_context) { - const auto &x_shape_or_data = - infer_context->GetShapeOrDataForValue(op->operand_source(0)); - - const auto &x_dims = x_shape_or_data.shape(); - PADDLE_ENFORCE_EQ( - x_dims.size() == 4 || x_dims.size() == 5, - true, - phi::errors::InvalidArgument( - "the input of Op(pool) should be 4-D or 5-D Tensor. But " - "received: %u-D Tensor.", - x_dims.size())); - - PADDLE_ENFORCE_EQ(x_dims.size() - kernel_size.size(), - 2U, - phi::errors::InvalidArgument( - "the rank of input minus the size of kernel_size " - "must be equal to 2 in Op(pool). " - "But received: the rank of input is %d and the " - "rank of kernel_size is %d.", - x_dims.size(), - kernel_size.size())); - - std::vector strides = [&]() { - std::vector res; - const auto &stride_attr = - op->attributes().at("strides").dyn_cast(); - for (size_t i = 0; i < stride_attr.size(); i++) { - res.emplace_back( - stride_attr.at(i).dyn_cast().data()); - } - return res; - }(); - - PADDLE_ENFORCE_EQ( - kernel_size.size(), - strides.size(), - phi::errors::InvalidArgument( - "the rank of kernel_size and strides in Op(pool) must be equal. " - "But received: the rank of kernel_size is %d and the rank of stride " - "is %d.", - kernel_size.size(), - strides.size())); - - const std::string &data_format = - op->attribute("data_format").AsString(); - const bool channel_last = data_format == "NHWC" || data_format == "NDHWC"; - - const auto &data_dims = [&]() -> std::vector { - if (channel_last) { - return std::vector(x_dims.begin() + 1, x_dims.end() - 1); - } else { - return std::vector(x_dims.begin() + 2, x_dims.end()); - } - }(); - - bool global_pooling = - op->attribute("global_pooling").data(); - bool adaptive = op->attribute("adaptive").data(); - std::string padding_algorithm = - op->attribute("padding_algorithm").AsString(); - - const auto &real_paddings = [&]() -> std::vector { - std::vector paddings; - const auto &padding_attr = - op->attributes().at("paddings").dyn_cast(); - for (size_t i = 0; i < padding_attr.size(); i++) { - paddings.emplace_back( - padding_attr.at(i).dyn_cast().data()); - } - return GetRealPadding(paddings, - global_pooling, - adaptive, - padding_algorithm, - data_dims, - strides, - kernel_size - - ); - }(); - - const auto &real_kernel_size = [&]() -> std::vector { - if (global_pooling) { - return data_dims; - } - return kernel_size; - }(); - - const auto &output_shape_or_data = [&]() -> symbol::ShapeOrDataDimExprs { - std::vector output_shape; - bool ceil_mode = op->attribute("ceil_mode").data(); - if (adaptive) { - output_shape.insert( - output_shape.end(), real_kernel_size.begin(), real_kernel_size.end()); - } else { - for (size_t i = 0; i < data_dims.size(); ++i) { - symbol::DimExpr stride_dimexpr{strides[i]}; - symbol::DimExpr one_dimexpr{1}; - if (!ceil_mode) { - output_shape.emplace_back((data_dims[i] - real_kernel_size[i] + - real_paddings[2 * i] + - real_paddings[2 * i + 1]) / - stride_dimexpr + - one_dimexpr); - } else { - output_shape.emplace_back( - (data_dims[i] - real_kernel_size[i] + real_paddings[2 * i] + - real_paddings[2 * i + 1] + stride_dimexpr - one_dimexpr) / - stride_dimexpr + - one_dimexpr); - } - } - } - - // output_N = input_N - output_shape.insert(output_shape.begin(), x_dims[0]); - // output_C = input_C - if (channel_last) { - output_shape.push_back(x_dims[x_dims.size() - 1]); - } else { - output_shape.insert(output_shape.begin() + 1, x_dims[1]); - } - return symbol::ShapeOrDataDimExprs{ - symbol::TensorShapeOrDataDimExprs(output_shape)}; - }(); - - return output_shape_or_data; -} -} // namespace - -namespace paddle::dialect { -using paddle::dialect::details::CreateShapeOrDataForXShape; - -bool AllOpInferSymbolicShape(pir::Operation *op, - pir::InferSymbolicShapeContext *infer_context) { - const auto &axis = details::GetVectorAttr(op, "axis"); - return details::ReduceInferDim(op, - infer_context, - axis, - GetBoolAttr(op, "keepdim"), /*keepdim*/ - axis.size() == 0 /*reduce_all*/); -} - -bool AmaxOpInferSymbolicShape(pir::Operation *op, - pir::InferSymbolicShapeContext *infer_context) { - const auto &axis = details::GetVectorAttr(op, "axis"); - return details::ReduceInferDim(op, - infer_context, - axis, - GetBoolAttr(op, "keepdim"), /*keepdim*/ - axis.size() == 0 /*reduce_all*/); -} - -bool AminOpInferSymbolicShape(pir::Operation *op, - pir::InferSymbolicShapeContext *infer_context) { - const auto &axis = details::GetVectorAttr(op, "axis"); - return details::ReduceInferDim(op, - infer_context, - axis, - GetBoolAttr(op, "keepdim"), /*keepdim*/ - axis.size() == 0 /*reduce_all*/); -} - -bool AnyOpInferSymbolicShape(pir::Operation *op, - pir::InferSymbolicShapeContext *infer_context) { - const auto &axis = details::GetVectorAttr(op, "axis"); - return details::ReduceInferDim(op, - infer_context, - axis, - GetBoolAttr(op, "keepdim"), /*keepdim*/ - axis.size() == 0 /*reduce_all*/); -} - -bool ArgmaxOpInferSymbolicShape(pir::Operation *op, - pir::InferSymbolicShapeContext *infer_context) { - bool flatten = GetBoolAttr(op, "flatten"); - bool keepdims = GetBoolAttr(op, "keepdims"); - - const auto &input_sym_shape = - infer_context->GetShapeOrDataForValue(op->operand_source(0)).shape(); - int rank = input_sym_shape.size(); - - const auto &axis_shape_or_data = - infer_context->GetShapeOrDataForValue(op->operand_source(1)); - int axis = - static_cast(axis_shape_or_data.data().value().at(0).Get()); - if (axis < 0) axis += rank; - - const auto &out_sym_shape = [&] { - std::vector out_sym_shape; - if (flatten) { - if (keepdims) { - out_sym_shape.emplace_back(std::int64_t(rank)); - } else { - out_sym_shape.emplace_back(std::int64_t(0)); - } - } else { - for (int i = 0; i < axis; i++) { - out_sym_shape.emplace_back(input_sym_shape.at(i)); - } - if (keepdims) { - out_sym_shape.emplace_back(std::int64_t(1)); - } - - for (int i = axis + 1; i < rank; i++) { - out_sym_shape.emplace_back(input_sym_shape.at(i)); - } - } - return out_sym_shape; - }(); - - symbol::ShapeOrDataDimExprs shape_data{ - symbol::TensorShapeOrDataDimExprs(out_sym_shape)}; - - infer_context->SetShapeOrDataForValue(op->result(0), shape_data); - return true; -} - -bool ArgminOpInferSymbolicShape(pir::Operation *op, - pir::InferSymbolicShapeContext *infer_context) { - return ArgmaxOpInferSymbolicShape(op, infer_context); -} - -bool AsComplexOpInferSymbolicShape( - pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) { - pir::Value operand_source = op->operand_source(0); - const symbol::ShapeOrDataDimExprs &operand_shape_or_data = - infer_context->GetShapeOrDataForValue(operand_source); - - const std::vector out_dims = [&] { - std::vector out_dims = operand_shape_or_data.shape(); - out_dims.pop_back(); - return out_dims; - }(); - - symbol::ShapeOrDataDimExprs shape_data{ - symbol::TensorShapeOrDataDimExprs(out_dims)}; - - infer_context->SetShapeOrDataForValue(op->result(0), shape_data); - return true; -} -bool AsRealOpInferSymbolicShape(pir::Operation *op, - pir::InferSymbolicShapeContext *infer_context) { - pir::Value operand_source = op->operand_source(0); - const symbol::ShapeOrDataDimExprs &operand_shape_or_data = - infer_context->GetShapeOrDataForValue(operand_source); - - const std::vector out_dims = [&] { - std::vector out_dims = operand_shape_or_data.shape(); - out_dims.push_back(symbol::DimExpr(2)); - return out_dims; - }(); - - symbol::ShapeOrDataDimExprs shape_data{ - symbol::TensorShapeOrDataDimExprs(out_dims)}; - - infer_context->SetShapeOrDataForValue(op->result(0), shape_data); - return true; -} - -bool AssignOpInferSymbolicShape(pir::Operation *op, - pir::InferSymbolicShapeContext *infer_context) { - infer_context->SetShapeOrDataForValue( - op->result(0), - infer_context->GetShapeOrDataForValue(op->operand_source(0))); - return true; -} - -bool Assign_OpInferSymbolicShape( - pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) { - return AssignOpInferSymbolicShape(op, infer_context); -} - -bool BipartiteMatchOpInferSymbolicShape( - pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) { - const auto &dist_mat_shape_or_data = - infer_context->GetShapeOrDataForValue(op->operand_source(0)); - const auto &dims = dist_mat_shape_or_data.shape(); - - PADDLE_ENFORCE_EQ( - dims.size(), - 2, - phi::errors::InvalidArgument("The rank of Input(DistMat) must be 2.")); - - infer_context->SetShapeOrDataForValue(op->result(0), dist_mat_shape_or_data); - - infer_context->SetShapeOrDataForValue(op->result(1), dist_mat_shape_or_data); - - return true; -} - -bool CastOpInferSymbolicShape(pir::Operation *op, - pir::InferSymbolicShapeContext *infer_context) { - infer_context->SetShapeOrDataForValue( - op->result(0), - infer_context->GetShapeOrDataForValue(op->operand_source(0))); - return true; -} - -bool Cast_OpInferSymbolicShape(pir::Operation *op, - pir::InferSymbolicShapeContext *infer_context) { - return CastOpInferSymbolicShape(op, infer_context); -} - -bool CholeskyOpInferSymbolicShape( - pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) { - const auto &x_shape = - infer_context->GetShapeOrDataForValue(op->operand_source(0)); - - auto rank = x_shape.shape().size(); - PADDLE_ENFORCE_GE(rank, - 2, - common::errors::InvalidArgument( - "The Input(X) should have at least 2 dimensions. But " - "received a %d dimension tensor.", - rank)); - - infer_context->AddEqualCstr(x_shape.shape()[rank - 2], - x_shape.shape()[rank - 1]); - - infer_context->SetShapeOrDataForValue(op->result(0), x_shape); - - return true; -} - -bool ClipByNormOpInferSymbolicShape( - pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) { - const auto &input_shape = - infer_context->GetShapeOrDataForValue(op->operand_source(0)); - float max_norm = op->attribute("max_norm").data(); - PADDLE_ENFORCE_GT( - max_norm, - 0, - phi::errors::InvalidArgument("max_norm should be greater than 0. " - "Received max_norm is %f.", - max_norm)); - - infer_context->SetShapeOrDataForValue(op->result(0), input_shape); - return true; -} - -bool ClipByNormSrOpInferSymbolicShape( - pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) { - return ClipByNormOpInferSymbolicShape(op, infer_context); -} - -bool CummaxOpInferSymbolicShape(pir::Operation *op, - pir::InferSymbolicShapeContext *infer_context) { - pir::Value operand_source = op->operand_source(0); - const symbol::ShapeOrDataDimExprs &operand_shape_or_data = - infer_context->GetShapeOrDataForValue(operand_source); - - infer_context->SetShapeOrDataForValue(op->result(0), operand_shape_or_data); - infer_context->SetShapeOrDataForValue(op->result(1), operand_shape_or_data); - return true; -} -bool CumminOpInferSymbolicShape(pir::Operation *op, - pir::InferSymbolicShapeContext *infer_context) { - return CummaxOpInferSymbolicShape(op, infer_context); -} -bool CumprodOpInferSymbolicShape( - pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) { - pir::Value operand_source = op->operand_source(0); - const symbol::ShapeOrDataDimExprs &operand_shape_or_data = - infer_context->GetShapeOrDataForValue(operand_source); - infer_context->SetShapeOrDataForValue(op->result(0), operand_shape_or_data); - return true; -} -bool Cumprod_OpInferSymbolicShape( - pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) { - return CumprodOpInferSymbolicShape(op, infer_context); -} -bool CumsumOpInferSymbolicShape(pir::Operation *op, - pir::InferSymbolicShapeContext *infer_context) { - pir::Value operand_source = op->operand_source(0); - - const symbol::ShapeOrDataDimExprs &operand_shape_or_data = - infer_context->GetShapeOrDataForValue(operand_source); - - bool flatten = GetBoolAttr(op, "flatten"); - if (flatten) { - symbol::DimExpr product{1}; - const auto &dim_exprs = operand_shape_or_data.shape(); - for (const auto &dim_expr : dim_exprs) { - product = product * dim_expr; - } - const std::vector out_dims = {product}; - symbol::ShapeOrDataDimExprs shape_data{ - symbol::TensorShapeOrDataDimExprs(out_dims)}; - infer_context->SetShapeOrDataForValue(op->result(0), shape_data); - - } else { - infer_context->SetShapeOrDataForValue(op->result(0), operand_shape_or_data); - } - return true; -} -bool Cumsum_OpInferSymbolicShape( - pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) { - return CumsumOpInferSymbolicShape(op, infer_context); -} -bool ChannelShuffleOpInferSymbolicShape( - pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) { - const auto &x_shape_or_data = - infer_context->GetShapeOrDataForValue(op->operand_source(0)); - const std::vector &input_dims = x_shape_or_data.shape(); - - int groups = op->attribute("groups").data(); - std::string data_format = - op->attribute("data_format").AsString(); - - PADDLE_ENFORCE_EQ( - input_dims.size(), - 4, - phi::errors::InvalidArgument("Input should be a 4-D tensor of format [N, " - "C, H, W] or [N, H, W, C], but got %u.", - input_dims.size())); - PADDLE_ENFORCE_GE( - groups, - 1, - phi::errors::InvalidArgument("groups should be larger than 0.")); - PADDLE_ENFORCE_EQ( - data_format == "NCHW" || data_format == "NHWC", - true, - phi::errors::InvalidArgument("data_format must be one of NCHW and NHWC. " - "But received data_format: %s", - data_format)); - - const bool channel_last = (data_format == "NHWC"); - - symbol::DimExpr channels; - if (!channel_last) { - channels = input_dims[1]; - } else { - channels = input_dims[3]; - } - - symbol::DimExpr groups_expr = symbol::DimExpr(groups); - symbol::DimExpr expected_channels = groups_expr * (channels / groups_expr); - - infer_context->AddEqualCstr(channels, expected_channels); - - infer_context->SetShapeOrDataForValue(op->result(0), x_shape_or_data); - - return true; -} - -bool DiagEmbedOpInferSymbolicShape( - pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) { - pir::Value operand_source = op->operand_source(0); - const symbol::ShapeOrDataDimExprs &operand_shape_or_data = - infer_context->GetShapeOrDataForValue(operand_source); - const auto &attributes = op->attributes(); - int dim1 = attributes.at("dim1").dyn_cast().data(); - int dim2 = attributes.at("dim2").dyn_cast().data(); - int offset = attributes.at("offset").dyn_cast().data(); - - const auto &x_dims = operand_shape_or_data.shape(); - int dim1_ = dim1 < 0 ? x_dims.size() + dim1 + 1 : dim1; - int dim2_ = dim2 < 0 ? x_dims.size() + dim2 + 1 : dim2; - int64_t offset_ = static_cast(std::abs(offset)); - symbol::DimExpr new_dim_len = - symbol::DimExpr(offset_) + x_dims.at(x_dims.size() - 1); - - const auto &out_dims = [&] { - std::vector out_dims = x_dims; - out_dims.pop_back(); - out_dims.insert(out_dims.begin() + std::min(dim1_, dim2_), new_dim_len); - out_dims.insert(out_dims.begin() + std::max(dim1_, dim2_), new_dim_len); - return out_dims; - }(); - symbol::ShapeOrDataDimExprs shape_data{ - symbol::TensorShapeOrDataDimExprs(out_dims)}; - infer_context->SetShapeOrDataForValue(op->result(0), shape_data); - return true; -} -bool DiagonalOpInferSymbolicShape( - pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) { - pir::Value operand_source = op->operand_source(0); - const symbol::ShapeOrDataDimExprs &operand_shape_or_data = - infer_context->GetShapeOrDataForValue(operand_source); - const auto &attributes = op->attributes(); - int axis1 = attributes.at("axis1").dyn_cast().data(); - int axis2 = attributes.at("axis2").dyn_cast().data(); - int offset = attributes.at("offset").dyn_cast().data(); - - const auto &x_dims = operand_shape_or_data.shape(); - int axis1_ = axis1 < 0 ? x_dims.size() + axis1 : axis1; - int axis2_ = axis2 < 0 ? x_dims.size() + axis2 : axis2; - - auto out_dims = x_dims; - auto axis1_size = out_dims.at(axis1_); - auto axis2_size = out_dims.at(axis2_); - out_dims.erase(out_dims.begin() + std::max(axis1_, axis2_)); - out_dims.erase(out_dims.begin() + std::min(axis1_, axis2_)); - - symbol::DimExprBuilder builder; - symbol::DimExpr zero{0}; - symbol::DimExpr res_shape; - symbol::DimExpr offset_sym{offset}; - if (offset == 0) { - res_shape = builder.Min(axis1_size, axis2_size); - } else if (offset > 0) { - if (axis2_size.isa()) { - res_shape = (axis2_size.dyn_cast() - offset) > 0 - ? builder.Min(axis1_size, axis2_size - offset_sym) - : zero; - } else { - res_shape = infer_context->GetNextSymName(); - } - } else { - if (axis1_size.isa()) { - res_shape = (axis1_size.dyn_cast() + offset) > 0 - ? builder.Min(axis1_size + offset_sym, axis2_size) - : zero; - } else { - res_shape = infer_context->GetNextSymName(); - } - } - out_dims.push_back(symbol::SimplifyDimExpr(res_shape)); - - symbol::ShapeOrDataDimExprs shape_data{ - symbol::TensorShapeOrDataDimExprs(out_dims)}; - infer_context->SetShapeOrDataForValue(op->result(0), shape_data); - return true; -} - -bool DistributeFpnProposalsOpInferSymbolicShape( - pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) { - const auto &attributes = op->attributes(); - int32_t min_level = - attributes.at("min_level").dyn_cast().data(); - int32_t max_level = - attributes.at("max_level").dyn_cast().data(); - int32_t num_levels = max_level - min_level + 1; - int64_t batch_size = 1; - - symbol::DimExpr num_rois = - infer_context->GetShapeOrDataForValue(op->operand_source(0)) - .shape() - .at(0); - - const auto &multi_rois_out_shape = [&]() { - symbol::TensorListShapeOrDataDimExprs multi_rois_out_shape; - if (num_levels == 1) { - multi_rois_out_shape.emplace_back( - symbol::TensorShapeOrDataDimExprs({num_rois, 4})); - } else { - symbol::DimExpr last_dim = num_rois; - for (int i = 0; i < num_levels - 1; i++) { - const auto &next_sym_name = infer_context->GetNextSymName(); - std::vector level_dim = {next_sym_name, 4}; - multi_rois_out_shape.emplace_back( - symbol::TensorShapeOrDataDimExprs(level_dim)); - last_dim = last_dim - level_dim.at(0); - } - multi_rois_out_shape.emplace_back(symbol::TensorShapeOrDataDimExprs( - {infer_context->GetNextSymName(), 4})); - } - - return multi_rois_out_shape; - }(); - - const auto &rois_num_per_level_out_shape = [&]() { - symbol::TensorListShapeOrDataDimExprs rois_num_per_level_out_shape; - rois_num_per_level_out_shape.resize( - num_levels, symbol::TensorShapeOrDataDimExprs({batch_size})); - return rois_num_per_level_out_shape; - }(); - - const auto &restore_ind = [&]() { - if (op->operand_source(1)) { - return symbol::TensorShapeOrDataDimExprs( - {infer_context->GetNextSymName(), 1}); - } - return symbol::TensorShapeOrDataDimExprs({num_rois, 1}); - }(); - - infer_context->SetShapeOrDataForValue(op->result(0), multi_rois_out_shape); - infer_context->SetShapeOrDataForValue(op->result(1), - rois_num_per_level_out_shape); - infer_context->SetShapeOrDataForValue(op->result(2), restore_ind); - return true; -} - -bool EighOpInferSymbolicShape(pir::Operation *op, - pir::InferSymbolicShapeContext *infer_context) { - const auto &x_shape = - infer_context->GetShapeOrDataForValue(op->operand_source(0)).shape(); - std::vector out_shape; - for (size_t i = 0; i < x_shape.size() - 1; ++i) { - out_shape.push_back(x_shape.at(i)); - } - infer_context->SetShapeOrDataForValue( - op->result(0), symbol::TensorShapeOrDataDimExprs(out_shape)); - infer_context->SetShapeOrDataForValue( - op->result(1), symbol::TensorShapeOrDataDimExprs(x_shape)); - return true; -} - -bool EigvalshOpInferSymbolicShape( - pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) { - return EighOpInferSymbolicShape(op, infer_context); -} - -bool FakeChannelWiseQuantizeAbsMaxOpInferSymbolicShape( - pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) { - const auto &x_shape_or_data = - infer_context->GetShapeOrDataForValue(op->operand_source(0)); - - int bit_length = op->attribute("bit_length").data(); - int quant_axis = op->attribute("quant_axis").data(); - - PADDLE_ENFORCE_EQ(bit_length >= 1 && bit_length <= 16, - true, - common::errors::InvalidArgument( - "'bit_length' should be between 1 and 16, but " - "the received is %d", - bit_length)); - PADDLE_ENFORCE_EQ( - quant_axis == 0 || quant_axis == 1, - true, - common::errors::InvalidArgument("'quant_axis' should be 0 or 1, but " - "the received is %d", - quant_axis)); - - infer_context->SetShapeOrDataForValue( - op->result(0), - symbol::ShapeOrDataDimExprs{ - symbol::TensorShapeOrDataDimExprs(x_shape_or_data.shape())}); - - std::vector out_scale_shape = { - x_shape_or_data.shape()[quant_axis]}; - infer_context->SetShapeOrDataForValue( - op->result(1), - symbol::ShapeOrDataDimExprs{ - symbol::TensorShapeOrDataDimExprs(out_scale_shape)}); - - return true; -} - -bool FftC2cOpInferSymbolicShape(pir::Operation *op, - pir::InferSymbolicShapeContext *infer_context) { - const auto &x_shape_or_data = - infer_context->GetShapeOrDataForValue(op->operand_source(0)); - std::vector x_dims = x_shape_or_data.shape(); - - // Set the output shape to be the same as the input shape - infer_context->SetShapeOrDataForValue( - op->result(0), - symbol::ShapeOrDataDimExprs{symbol::TensorShapeOrDataDimExprs(x_dims)}); - - return true; -} - -bool FftC2rOpInferSymbolicShape(pir::Operation *op, - pir::InferSymbolicShapeContext *infer_context) { - const auto &x_shape_or_data = - infer_context->GetShapeOrDataForValue(op->operand_source(0)); - std::vector x_dims = x_shape_or_data.shape(); - - auto axes = paddle::dialect::details::GetVectorAttr(op, "axes"); - int64_t last_dim_size = - op->attribute("last_dim_size").data(); - int last_fft_axis = static_cast(axes.back()); - - std::vector out_dims = x_dims; - - if (last_dim_size > 0) { - out_dims[last_fft_axis] = symbol::DimExpr(last_dim_size); - } else { - symbol::DimExprBuilder builder; - out_dims[last_fft_axis] = - builder.Mul(x_dims[last_fft_axis], 2) - symbol::DimExpr{1}; - } - - infer_context->SetShapeOrDataForValue( - op->result(0), - symbol::ShapeOrDataDimExprs{symbol::TensorShapeOrDataDimExprs(out_dims)}); - - return true; -} - -bool FftR2cOpInferSymbolicShape(pir::Operation *op, - pir::InferSymbolicShapeContext *infer_context) { - const auto &x_shape_or_data = - infer_context->GetShapeOrDataForValue(op->operand_source(0)); - std::vector x_dims = x_shape_or_data.shape(); - - auto axes = paddle::dialect::details::GetVectorAttr(op, "axes"); - bool onesided = op->attribute("onesided").data(); - - std::vector out_dims = x_dims; - - if (onesided) { - int last_fft_axis = static_cast(axes.back()); - symbol::DimExprBuilder builder; - out_dims[last_fft_axis] = - builder.Add(builder.Div(x_dims[last_fft_axis], 2), 1); - } - - infer_context->SetShapeOrDataForValue( - op->result(0), - symbol::ShapeOrDataDimExprs{symbol::TensorShapeOrDataDimExprs(out_dims)}); - - return true; -} - -bool FillDiagonalOpInferSymbolicShape( - pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) { - const auto &x_shape_or_data = - infer_context->GetShapeOrDataForValue(op->operand_source(0)); - std::vector x_dims = x_shape_or_data.shape(); - - infer_context->SetShapeOrDataForValue( - op->result(0), - symbol::ShapeOrDataDimExprs{symbol::TensorShapeOrDataDimExprs(x_dims)}); - - return true; -} - -bool FillDiagonal_OpInferSymbolicShape( - pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) { - return FillDiagonalOpInferSymbolicShape(op, infer_context); -} - -bool FlattenOpInferSymbolicShape( - pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) { - const auto &attributes = op->attributes(); - int start_axis = - attributes.at("start_axis").dyn_cast().data(); - int stop_axis = - attributes.at("stop_axis").dyn_cast().data(); - - const auto &x_shape = - infer_context->GetShapeOrDataForValue(op->operand_source(0)).shape(); - int in_dims_size = x_shape.size(); - - if (in_dims_size == 0) { - PADDLE_ENFORCE_EQ( - start_axis == 0 || start_axis == -1, - true, - common::errors::InvalidArgument("The start_axis should be 0 or -1 when " - "the input tensor is a 0D-Tensor")); - PADDLE_ENFORCE_EQ(stop_axis == 0 || stop_axis == -1, - true, - common::errors::InvalidArgument( - "The stop_axis should be 0 or -1 when the " - "input tensor is a 0D-Tensor")); - // this can ensure out shape {1} - start_axis = 0; - stop_axis = -1; - } - - if (start_axis < 0) { - start_axis = start_axis + in_dims_size; - } - if (stop_axis < 0) { - stop_axis = stop_axis + in_dims_size; - } - if (in_dims_size > 0) { - PADDLE_ENFORCE_GE( - stop_axis, - start_axis, - common::errors::InvalidArgument("The stop_axis should be greater" - "than or equal to start_axis.")); - } - - symbol::DimExpr outer{1}; - std::vector out_shape; - out_shape.reserve(in_dims_size - stop_axis + start_axis + 1); - for (int i = 0; i < start_axis; ++i) { - out_shape.push_back(x_shape.at(i)); - } - for (int i = start_axis; i <= stop_axis; i++) { - outer = outer * x_shape.at(i); - } - out_shape.push_back(outer); - for (int i = stop_axis + 1; i < in_dims_size; i++) { - out_shape.push_back(x_shape.at(i)); - } - - symbol::ShapeOrDataDimExprs out_shape_data{ - symbol::TensorShapeOrDataDimExprs(out_shape)}; - infer_context->SetShapeOrDataForValue(op->result(0), out_shape_data); - - std::vector xshape_shape = x_shape; - xshape_shape.insert(xshape_shape.begin(), symbol::DimExpr{0}); - symbol::ShapeOrDataDimExprs xshape_shape_data{ - symbol::TensorShapeOrDataDimExprs(xshape_shape)}; - infer_context->SetShapeOrDataForValue(op->result(1), xshape_shape_data); - return true; -} - -bool Flatten_OpInferSymbolicShape( - pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) { - return FlattenOpInferSymbolicShape(op, infer_context); -} - -bool FoldOpInferSymbolicShape(pir::Operation *op, - pir::InferSymbolicShapeContext *infer_context) { - const auto &in_dims = - infer_context->GetShapeOrDataForValue(op->operand_source(0)).shape(); - - std::vector out_dims; - out_dims.push_back(in_dims[0]); - std::vector kernel_sizes = - paddle::dialect::details::GetVectorAttr(op, "kernel_sizes"); - out_dims.push_back(in_dims[1] / (kernel_sizes[0] * kernel_sizes[1])); - - infer_context->SetShapeOrDataForValue( - op->result(0), - symbol::ShapeOrDataDimExprs{symbol::TensorShapeOrDataDimExprs(out_dims)}); - - return true; -} - -bool IdentityLossOpInferSymbolicShape( - pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) { - const auto &input_shape = - infer_context->GetShapeOrDataForValue(op->operand_source(0)); - int reduction = op->attribute("reduction").data(); - if (reduction == 2) { - infer_context->SetShapeOrDataForValue(op->result(0), input_shape); - } else { - std::vector out_shape = {}; - infer_context->SetShapeOrDataForValue( - op->result(0), - symbol::ShapeOrDataDimExprs{ - symbol::TensorShapeOrDataDimExprs(out_shape)}); - } - - return true; -} - -bool IdentityLoss_OpInferSymbolicShape( - pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) { - return IdentityLossOpInferSymbolicShape(op, infer_context); -} - -bool KthvalueOpInferSymbolicShape( - pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) { - pir::Value operand_source = op->operand_source(0); - const symbol::ShapeOrDataDimExprs &operand_shape_or_data = - infer_context->GetShapeOrDataForValue(operand_source); - const auto &attributes = op->attributes(); - int axis = attributes.at("axis").dyn_cast().data(); - bool keepdim = GetBoolAttr(op, "keepdim"); - - const auto &input_dims = operand_shape_or_data.shape(); - const int &dim_size = input_dims.size(); - if (axis < 0) axis += dim_size; - std::vector out_dims; - for (int i = 0; i < axis; i++) { - out_dims.emplace_back(input_dims.at(i)); - } - if (keepdim && dim_size > 0) { - out_dims.emplace_back(symbol::DimExpr(1)); - } - for (int i = axis + 1; i < dim_size; i++) { - out_dims.emplace_back(input_dims.at(i)); - } - symbol::ShapeOrDataDimExprs shape_data{ - symbol::TensorShapeOrDataDimExprs(out_dims)}; - infer_context->SetShapeOrDataForValue(op->result(0), shape_data); - infer_context->SetShapeOrDataForValue(op->result(1), shape_data); - return true; -} - -bool LpPool2dOpInferSymbolicShape( - pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) { - const auto &kernel_size = [&]() -> std::vector { - std::vector kernel_size_int_vec = - op->attribute("kernel_size") - .data() - .GetData(); - return details::VecInt642Expr(kernel_size_int_vec); - }(); - infer_context->SetShapeOrDataForValue( - op->result(0), - Pool2dRawInferSymbolicShape(op, kernel_size, infer_context)); - return true; -} - -bool LogcumsumexpOpInferSymbolicShape( - pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) { - // same as CumsumOpInferSymbolicShape - return CumsumOpInferSymbolicShape(op, infer_context); -} - -bool LogsumexpOpInferSymbolicShape( - pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) { - bool keepdim = GetBoolAttr(op, "keepdim"); - std::vector axis_in = details::GetVectorAttr(op, "axis"); - std::vector axis; - axis.reserve(axis_in.size()); - std::for_each(axis_in.begin(), axis_in.end(), [&axis](const int &t) { - axis.push_back(static_cast(t)); - }); - bool reduce_all = axis.size() == 0 ? true : false; - return details::ReduceInferDim(op, infer_context, axis, keepdim, reduce_all); -} - -bool MaxOpInferSymbolicShape(pir::Operation *op, - pir::InferSymbolicShapeContext *infer_context) { - bool keepdim = GetBoolAttr(op, "keepdim"); - - const std::vector axis = [&] { - pir::Operation *axis_gen_op = op->operand_source(1).defining_op(); - std::vector axis_vec; - if (axis_gen_op->isa()) { - axis_vec = details::GetVectorAttr( - axis_gen_op->dyn_cast(), "value"); - } else { - // TODO(lanxianghit): there's other source: pir::VectorType, - // paddle::dialect::DenseTensorType, but after PRIM, maybe always - // FullIntArrayOp, to be confirmed - PADDLE_THROW(common::errors::Unimplemented( - "MaxOpInferSymbolicShape: 'axis' only " - "support FullIntArrayOp's result now.")); - } - return axis_vec; - }(); - - bool reduce_all = axis.size() == 0 ? true : false; - - return details::ReduceInferDim(op, infer_context, axis, keepdim, reduce_all); -} - -bool MaxoutOpInferSymbolicShape(pir::Operation *op, - pir::InferSymbolicShapeContext *infer_context) { - const auto &x_shape_or_data = - infer_context->GetShapeOrDataForValue(op->operand_source(0)); - const std::vector &in_x_dims = x_shape_or_data.shape(); - - int groups = op->attribute("groups").data(); - int axis = op->attribute("axis").data(); - - if (axis < 0) { - axis += in_x_dims.size(); - } - - std::vector output_shape = in_x_dims; - output_shape[axis] = in_x_dims[axis] / groups; - infer_context->SetShapeOrDataForValue( - op->result(0), - symbol::ShapeOrDataDimExprs{ - symbol::TensorShapeOrDataDimExprs(output_shape)}); - - return true; -} - -bool MinOpInferSymbolicShape(pir::Operation *op, - pir::InferSymbolicShapeContext *infer_context) { - return MaxOpInferSymbolicShape(op, infer_context); -} - -bool MeanAllOpInferSymbolicShape( - pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) { - const auto &x_shape_or_data = - infer_context->GetShapeOrDataForValue(op->operand_source(0)); - const std::vector &x_dims = x_shape_or_data.shape(); - - PADDLE_ENFORCE_GT( - x_dims.size(), - 0, - phi::errors::InvalidArgument("Input(x) of MeanAllOp must have rank " - "greater than 0, but received rank 0.")); - - std::vector output_shape = {}; - - infer_context->SetShapeOrDataForValue( - op->result(0), - symbol::ShapeOrDataDimExprs{ - symbol::TensorShapeOrDataDimExprs(output_shape)}); - - return true; -} - -bool NonzeroOpInferSymbolicShape( - pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) { - const auto &x_shape_or_data = - infer_context->GetShapeOrDataForValue(op->operand_source(0)); - const auto &x_shape = x_shape_or_data.shape(); - int rank = x_shape.size(); - - PADDLE_ENFORCE_GE( - rank, - 1UL, - common::errors::InvalidArgument( - "Input(x) should have number of dimension at least 1.")); - - std::string sym_name = infer_context->GetNextSymName(); - std::vector out_shape{symbol::DimExpr{sym_name}, - symbol::DimExpr{rank}}; - - symbol::ShapeOrDataDimExprs shape_data{ - symbol::TensorShapeOrDataDimExprs(out_shape)}; - infer_context->SetShapeOrDataForValue(op->result(0), shape_data); - return true; -} - -bool NumelOpInferSymbolicShape(pir::Operation *op, - pir::InferSymbolicShapeContext *infer_context) { - std::vector out_shape = {}; - infer_context->SetShapeOrDataForValue( - op->result(0), - symbol::ShapeOrDataDimExprs{ - symbol::TensorShapeOrDataDimExprs(out_shape)}); - - return true; -} - -bool PadOpInferSymbolicShape(pir::Operation *op, - pir::InferSymbolicShapeContext *infer_context) { - // input(0): Tensor x - const auto &x_shape_or_data = - infer_context->GetShapeOrDataForValue(op->operand_source(0)); - PADDLE_ENFORCE_EQ(x_shape_or_data.data().has_value(), - false, - common::errors::InvalidArgument( - "InferSymbolicShape of PadOp only support input with " - "value now.")); - const auto &x_dims_sym = x_shape_or_data.shape(); - const size_t rank = x_dims_sym.size(); - - // input(1): int[] paddings - std::vector paddings = - paddle::dialect::details::GetVectorAttr(op, "paddings"); - PADDLE_ENFORCE_EQ(rank * 2, - paddings.size(), - common::errors::InvalidArgument( - "The size of paddings should be 2 * input's rank. But " - "got paddings.size() = %d, input's rank = %d.", - paddings.size(), - rank)); - - // output - const auto &out_dims = [&] { - std::vector out_dims; - out_dims.reserve(rank); - for (size_t i = 0; i < rank; ++i) { - out_dims.push_back(x_dims_sym.at(i) + paddings.at(2 * i) + - paddings.at(2 * i + 1)); - } - return out_dims; - }(); - - infer_context->SetShapeOrDataForValue( - op->result(0), symbol::TensorShapeOrDataDimExprs(out_dims)); - - return true; -} - -bool Pad3dOpInferSymbolicShape(pir::Operation *op, - pir::InferSymbolicShapeContext *infer_context) { - const auto &x_shape = - infer_context->GetShapeOrDataForValue(op->operand_source(0)).shape(); - PADDLE_ENFORCE_EQ(x_shape.size(), - 5, - common::errors::InvalidArgument( - "The size of Input(X)'s dimension should be equal to " - "5, but received %d. ", - x_shape.size())); - const auto &paddings_shape = - infer_context->GetShapeOrDataForValue(op->operand_source(1)); - if (!paddings_shape.data().has_value()) { - std::stringstream ss; - ss << paddings_shape; - PADDLE_THROW( - common::errors::InvalidArgument("The data of paddings's symbol shape " - "should have value, but now got [%s].", - ss.str())); - } - const std::string &data_format = - op->attribute("data_format").AsString(); - - const std::vector &out_dims = [&] { - std::vector out_dims = x_shape; - const auto &paddings = paddings_shape.data().value(); - PADDLE_ENFORCE_EQ(paddings.size(), - 6, - common::errors::InvalidArgument( - "Shape of Input(Paddings) should be equal to " - "[6], but received [%d].", - paddings.size())); - if (data_format == "NCDHW") { - out_dims.at(1) = x_shape.at(1); - out_dims.at(2) = x_shape.at(2) + paddings.at(4) + paddings.at(5); - out_dims.at(3) = x_shape.at(3) + paddings.at(2) + paddings.at(3); - out_dims.at(4) = x_shape.at(4) + paddings.at(0) + paddings.at(1); - } else { - out_dims.at(1) = x_shape.at(1) + paddings.at(4) + paddings.at(5); - out_dims.at(2) = x_shape.at(2) + paddings.at(2) + paddings.at(3); - out_dims.at(3) = x_shape.at(3) + paddings.at(0) + paddings.at(1); - out_dims.at(4) = x_shape.at(4); - } - return out_dims; - }(); - - infer_context->SetShapeOrDataForValue( - op->result(0), symbol::TensorShapeOrDataDimExprs(out_dims)); - - return true; -} - -bool Pool2dOpInferSymbolicShape(pir::Operation *op, - pir::InferSymbolicShapeContext *infer_context) { - const auto &kernel_size_shape_or_data = - infer_context->GetShapeOrDataForValue(op->operand_source(1)); - const auto &kernel_size = - details::GetExprVecFromData(kernel_size_shape_or_data); - infer_context->SetShapeOrDataForValue( - op->result(0), - Pool2dRawInferSymbolicShape(op, kernel_size, infer_context)); - return true; -} - -bool ProdOpInferSymbolicShape(pir::Operation *op, - pir::InferSymbolicShapeContext *infer_context) { - bool keepdim = GetBoolAttr(op, "keepdim"); - bool reduce_all = GetBoolAttr(op, "reduce_all"); - - auto axis_gen_op = op->operand_source(1).defining_op(); - if (axis_gen_op->isa()) { - std::vector axis = details::GetVectorAttr( - axis_gen_op->dyn_cast(), "value"); - return details::ReduceInferDim( - op, infer_context, axis, keepdim, reduce_all); - } else { - // TODO(lanxianghit): deal with other source: pir::VectorType, - // paddle::dialect::DenseTensorType - PADDLE_THROW( - common::errors::Unimplemented("ProdOpInferSymbolicShape: 'axis' only " - "support FullIntArrayOp's result now.")); - } - - return true; -} - -bool RepeatInterleaveOpInferSymbolicShape( - pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) { - pir::Value operand_source = op->operand_source(0); - const symbol::ShapeOrDataDimExprs &operand_shape_or_data = - infer_context->GetShapeOrDataForValue(operand_source); - - const auto &attributes = op->attributes(); - int repeats = attributes.at("repeats").dyn_cast().data(); - // what should I do if axis is null - int axis = attributes.at("axis").dyn_cast().data(); - - const std::vector &in_dims_sym = [&] { - std::vector dims; - if (operand_shape_or_data.data().has_value()) { - dims = operand_shape_or_data.data().value(); - } else { - dims = operand_shape_or_data.shape(); - } - return dims; - }(); - - int x_rank = in_dims_sym.size(); - if (axis < 0) axis += x_rank; - - const auto &out_sym_shape = [&] { - std::vector out_sym_shape; - for (int i = 0; i < x_rank; i++) { - if (i == axis) { - out_sym_shape.push_back(in_dims_sym.at(i) * repeats); - } else { - out_sym_shape.push_back(in_dims_sym.at(i)); - } - } - return out_sym_shape; - }(); - - infer_context->SetShapeOrDataForValue( - op->result(0), - symbol::ShapeOrDataDimExprs{ - symbol::TensorShapeOrDataDimExprs(out_sym_shape)}); - - return true; -} - -bool ReshapeOpInferSymbolicShape( - pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) { - const symbol::ShapeOrDataDimExprs &x_dim_expr = - infer_context->GetShapeOrDataForValue(op->operand_source(0)); - const symbol::ShapeOrDataDimExprs &shape_dim_expr = - infer_context->GetShapeOrDataForValue(op->operand_source(1)); - - const auto &GetProduct = [&](const auto &dim_exprs, const auto &Filter) { - symbol::DimExpr product{1}; - for (const auto &dim_expr : dim_exprs) { - if (Filter(dim_expr)) { - product = product * dim_expr; - } - } - return product; - }; - - const auto &IsNotMinusOne = [&](const symbol::DimExpr &dim_expr) { - if (dim_expr.isa()) { - return dim_expr.dyn_cast() != static_cast(-1); - } - return true; - }; - - const auto &IsPositiveInteger = [&](const symbol::DimExpr &dim_expr) { - if (dim_expr.isa()) { - return dim_expr.dyn_cast() > static_cast(0); - } - return true; - }; - - const auto &IsZero = [&](const symbol::DimExpr &dim_expr) { - if (dim_expr.isa()) { - return dim_expr.dyn_cast() == static_cast(0); - } - return false; - }; - - const std::vector out_dims = [&] { - const auto &original_shape = - infer_context->GetShapeOrDataForValue(op->operand_source(0)).shape(); - ExprVec target_shape = details::GetExprVecFromData(shape_dim_expr); - - // replace '0' with original shape - for (size_t i = 0; i < target_shape.size(); i++) { - if (i < original_shape.size() && IsZero(target_shape.at(i))) { - target_shape.at(i) = original_shape.at(i); - } - } - - // replace '-1' with infered shape - const auto &numel = - GetProduct(original_shape, [](const auto &) { return true; }); - const auto &product_exclude_minus_one = - GetProduct(target_shape, IsPositiveInteger); - const auto &input_dims = target_shape; - - std::vector out_dims; - out_dims.reserve(input_dims.size()); - for (size_t i = 0; i < input_dims.size(); ++i) { - auto out_dim_expr = IsNotMinusOne(input_dims.at(i)) - ? input_dims.at(i) - : (numel / product_exclude_minus_one); - out_dims.emplace_back(out_dim_expr); - } - return out_dims; - }(); - - symbol::ShapeOrDataDimExprs shape_data = [&] { - if (x_dim_expr.data().has_value()) { - return symbol::TensorShapeOrDataDimExprs(out_dims, - x_dim_expr.data().value()); - } - return symbol::TensorShapeOrDataDimExprs(out_dims); - }(); - - infer_context->SetShapeOrDataForValue(op->result(0), shape_data); - return true; -} - -bool Reshape_OpInferSymbolicShape( - pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) { - return ReshapeOpInferSymbolicShape(op, infer_context); -} - -bool ShapeOpInferSymbolicShape(pir::Operation *op, - pir::InferSymbolicShapeContext *infer_context) { - const symbol::ShapeOrDataDimExprs &operand_shape_or_data = - infer_context->GetShapeOrDataForValue(op->operand_source(0)); - const auto &out_data = operand_shape_or_data.shape(); - const std::vector shape{std::int64_t(out_data.size())}; - symbol::ShapeOrDataDimExprs shape_or_data{ - symbol::TensorShapeOrDataDimExprs(shape, out_data)}; - - infer_context->SetShapeOrDataForValue(op->result(0), shape_or_data); - return true; -} - -bool ShapeSrOpInferSymbolicShape( - pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) { - return ShapeOpInferSymbolicShape(op, infer_context); -} - -bool SliceOpInferSymbolicShape(pir::Operation *op, - pir::InferSymbolicShapeContext *infer_context) { - pir::Value operand_source = op->operand_source(0); - pir::Value operand_starts = op->operand_source(1); - pir::Value operand_ends = op->operand_source(2); - pir::Value res = op->result(0); - - const symbol::ShapeOrDataDimExprs &starts_shape_data = - infer_context->GetShapeOrDataForValue(operand_starts); - const symbol::ShapeOrDataDimExprs &ends_shape_data = - infer_context->GetShapeOrDataForValue(operand_ends); - - std::vector axes_vec = details::GetVectorAttr(op, "axes"); - - ExprVec starts = slice_utils::GetExprVecFromData(starts_shape_data); - ExprVec ends = slice_utils::GetExprVecFromData(ends_shape_data); - - std::vector infer_flags = details::GetVectorAttr(op, "infer_flags"); - const std::vector decrease_axis = - details::GetVectorAttr(op, "decrease_axis"); - - infer_context->SetShapeOrDataForValue( - res, - slice_utils::SliceRawInferSymbolicShape(operand_source, - res, - starts, - ends, - axes_vec, - infer_flags, - decrease_axis, - infer_context)); - - return true; -} - -bool SplitOpInferSymbolicShape(pir::Operation *op, - pir::InferSymbolicShapeContext *infer_context) { - // input - const auto &x_shape_or_data = - infer_context->GetShapeOrDataForValue(op->operand_source(0)); - PADDLE_ENFORCE_EQ(x_shape_or_data.data().has_value(), - false, - common::errors::InvalidArgument( - "InferSymbolicShape of SplitOp only support input with " - "value now.")); - const auto &x_dims_sym = x_shape_or_data.shape(); - - // axis - CHECK(op->operand_source(2).defining_op()->isa()); - - int64_t axis = op->operand_source(2) - .defining_op() - .attributes() - .at("value") - .dyn_cast() - .data() - .to(); - size_t rank = x_dims_sym.size(); - axis = axis >= 0 ? axis : std::max(int64_t(0), int64_t(axis + rank)); - - // sections - const std::vector §ions_sym = - details::GetExprVecFromData( - infer_context->GetShapeOrDataForValue(op->operand_source(1))); - - // output - const symbol::TensorListShapeOrDataDimExprs &output_shape_data_list = [&] { - const auto &GetSum = [&](const auto &dim_exprs, const auto &Filter) { - symbol::DimExpr sum{0}; - for (const auto &dim_expr : dim_exprs) { - if (Filter(dim_expr)) { - sum = sum + dim_expr; - } - } - return sum; - }; - const auto &All = [&](const auto &dim_exprs, const auto &Cond) { - for (const auto &dim_expr : dim_exprs) { - if (!Cond(dim_expr)) { - return false; - } - } - return true; - }; - const auto &IsNotMinusOne = [&](const symbol::DimExpr &dim_expr) { - if (dim_expr.isa()) { - return dim_expr.dyn_cast() != static_cast(-1); - } - return true; - }; - const auto &sum_exclude_minus_one = GetSum(sections_sym, IsNotMinusOne); - - const bool &all_sections_sym_not_minus_one = - All(sections_sym, IsNotMinusOne); - if (all_sections_sym_not_minus_one) { - infer_context->AddEqualCstr(x_dims_sym.at(axis), sum_exclude_minus_one); - } - - symbol::TensorListShapeOrDataDimExprs shape_data_list; - std::vector output_dims_sym = x_dims_sym; - if (!all_sections_sym_not_minus_one && sections_sym.size() == 1) { - VLOG(3) << "[SplitOp]-1 is the only split section. The output shape is " - "identical to the input shape."; - shape_data_list.push_back( - symbol::TensorShapeOrDataDimExprs(output_dims_sym)); - return shape_data_list; - } - for (uint32_t idx = 0; idx < sections_sym.size(); idx++) { - const auto §ion_sym = sections_sym.at(idx); - output_dims_sym.at(axis) = - IsNotMinusOne(section_sym) - ? section_sym - : x_dims_sym.at(axis) - sum_exclude_minus_one; - - shape_data_list.push_back( - symbol::TensorShapeOrDataDimExprs(output_dims_sym)); - } - return shape_data_list; - }(); - - infer_context->SetShapeOrDataForValue( - op->result(0), symbol::ShapeOrDataDimExprs{output_shape_data_list}); - - return true; -} - -bool SplitWithNumOpInferSymbolicShape( - pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) { - const symbol::ShapeOrDataDimExprs &axis_shape_data = - infer_context->GetShapeOrDataForValue(op->operand_source(1)); - PADDLE_ENFORCE_EQ( - axis_shape_data.data().has_value(), - true, - common::errors::InvalidArgument( - "In InferSymbolicShape, axis of SplitWithNumOp is null")); - const std::vector &axis_data = - axis_shape_data.data().value(); - PADDLE_ENFORCE_EQ( - axis_data.size() == 1, - true, - common::errors::InvalidArgument( - "In SplitWithNumOp, data of axis should be one dimension")); - - const auto &attributes = op->attributes(); - int num = attributes.at("num").dyn_cast().data(); - - const auto &x_s_or_d = - infer_context->GetShapeOrDataForValue(op->operand_source(0)); - int rank = x_s_or_d.shape().size(); - - const auto &out_s_d = [&](int64_t split_axis, int64_t res_num) { - symbol::DimExpr input_axis_dim = x_s_or_d.shape().at(split_axis); - symbol::DimExpr axis_shape = input_axis_dim / symbol::DimExpr{res_num}; - - std::vector res_s_d; - for (size_t i = 0; i < x_s_or_d.shape().size(); ++i) { - const auto &sym_dim = split_axis == static_cast(i) - ? axis_shape - : x_s_or_d.shape().at(i); - res_s_d.push_back(sym_dim); - } - return symbol::TensorShapeOrDataDimExprs(res_s_d); - }; - - if (axis_data.at(0).isa()) { - // case 1: DimExpr of axis is int. axis_shape_or_data: {shape:{1}, - // data:{3}} eg: axis generator op is full_op and assign_op - int64_t axis = axis_data[0].dyn_cast(); - axis = axis < 0 ? axis + rank : axis; - symbol::TensorListShapeOrDataDimExprs res_list_s_d(num, out_s_d(axis, num)); - infer_context->SetShapeOrDataForValue( - op->result(0), symbol::ShapeOrDataDimExprs{res_list_s_d}); - } else if (axis_data.at(0).isa()) { - // case 2: DimExpr of axis is a symbol(string). axis_shape_or_data: - // {shape:{1}, data:{s0}} eg: axis generator op is data_op - int candidate_axis = -1; - int count = 0; - for (size_t i = 0; i < x_s_or_d.shape().size(); ++i) { - if (x_s_or_d.shape().at(i).isa()) { - if (x_s_or_d.shape().at(i).dyn_cast() % num == 0) { - count++; - candidate_axis = i; - } - } else { - PADDLE_THROW( - common::errors::InvalidArgument("The type of X must be int64_t.")); - } - } - if (count == 1) { - // caculate the axis of split_with_num_op - symbol::TensorListShapeOrDataDimExprs res_list_s_d( - num, out_s_d(candidate_axis, num)); - infer_context->SetShapeOrDataForValue( - op->result(0), symbol::ShapeOrDataDimExprs{res_list_s_d}); - } else { - // create new Symbol - std::vector res_s; - for (size_t i = 0; i < x_s_or_d.shape().size(); ++i) { - const auto &s_dim = - x_s_or_d.shape().at(i).dyn_cast() % num == 0 - ? symbol::DimExpr(infer_context->GetNextSymName()) - : x_s_or_d.shape().at(i); - res_s.emplace_back(s_dim); - } - const symbol::TensorShapeOrDataDimExprs &res_s_d = - symbol::TensorShapeOrDataDimExprs(res_s); - symbol::TensorListShapeOrDataDimExprs res_list_s_d(num, res_s_d); - infer_context->SetShapeOrDataForValue( - op->result(0), symbol::ShapeOrDataDimExprs{res_list_s_d}); - } - } else { - PADDLE_THROW(common::errors::InvalidArgument( - "The type of axis must be int64_t or string.")); - } - return true; -} - -bool SumOpInferSymbolicShape(pir::Operation *op, - pir::InferSymbolicShapeContext *infer_context) { - bool keepdim = GetBoolAttr(op, "keepdim"); - bool reduce_all = false; - - auto axis_gen_op = op->operand_source(1).defining_op(); - if (axis_gen_op->isa()) { - std::vector axis = details::GetVectorAttr( - axis_gen_op->dyn_cast(), "value"); - if (axis.size() == 0) { - reduce_all = true; - } - return details::ReduceInferDim( - op, infer_context, axis, keepdim, reduce_all); - } else { - // TODO(lanxianghit): deal with other source: pir::VectorType, - // paddle::dialect::DenseTensorType - PADDLE_THROW( - common::errors::Unimplemented("SumOpInferSymbolicShape: 'axis' only " - "support FullIntArrayOp's result now.")); - } - - return true; -} - -bool TileOpInferSymbolicShape(pir::Operation *op, - pir::InferSymbolicShapeContext *infer_context) { - pir::Value operand_x = op->operand_source(0); - symbol::ShapeOrDataDimExprs x_shape_or_data = - infer_context->GetShapeOrDataForValue(operand_x); - pir::Value operand_repeat_times = op->operand_source(1); - symbol::ShapeOrDataDimExprs repeat_times_shape_or_data = - infer_context->GetShapeOrDataForValue(operand_repeat_times); - - std::vector x_dimexpr = x_shape_or_data.shape(); - std::vector repeat_times_dimexpr = - details::GetExprVecFromData(repeat_times_shape_or_data); - if (repeat_times_dimexpr.empty()) { - repeat_times_dimexpr = std::vector(x_dimexpr.size(), 1); - } - - auto out_rank = std::max(static_cast(x_dimexpr.size()), - repeat_times_dimexpr.size()); - std::vector out_shape(out_rank); - if (x_dimexpr.size() > repeat_times_dimexpr.size()) { - auto diff = x_dimexpr.size() - repeat_times_dimexpr.size(); - repeat_times_dimexpr.insert(repeat_times_dimexpr.begin(), diff, 1); - } else { - auto diff = repeat_times_dimexpr.size() - x_dimexpr.size(); - x_dimexpr.insert(x_dimexpr.begin(), diff, 1); - } - - for (size_t i = 0; i < repeat_times_dimexpr.size(); ++i) { - out_shape.at(i) = x_dimexpr.at(i) * repeat_times_dimexpr.at(i); - } - - symbol::ShapeOrDataDimExprs shape_data{ - symbol::TensorShapeOrDataDimExprs(out_shape)}; - - pir::Value res = op->result(0); - infer_context->SetShapeOrDataForValue(res, shape_data); - - return true; -} - -bool TopkOpInferSymbolicShape(pir::Operation *op, - pir::InferSymbolicShapeContext *infer_context) { - symbol::ShapeOrDataDimExprs x_shape_or_data = - infer_context->GetShapeOrDataForValue(op->operand_source(0)); - symbol::ShapeOrDataDimExprs k_shape_or_data = - infer_context->GetShapeOrDataForValue(op->operand_source(1)); - const auto &attributes = op->attributes(); - int axis = attributes.at("axis").dyn_cast().data(); - const std::vector &in_dims_sym = [&] { - std::vector dims; - if (x_shape_or_data.data().has_value()) { - dims = x_shape_or_data.data().value(); - } else { - dims = x_shape_or_data.shape(); - } - return dims; - }(); - - int x_rank = in_dims_sym.size(); - - int k = k_shape_or_data.data().value().at(0).Get(); - - if (axis < 0) axis += x_rank; - const auto &out_sym_shape = [&] { - std::vector out_sym_shape; - for (int i = 0; i < x_rank; ++i) { - if (i == axis) { - out_sym_shape.push_back(symbol::DimExpr(k)); - } else { - out_sym_shape.push_back(in_dims_sym.at(i)); - } - } - return out_sym_shape; - }(); - - symbol::ShapeOrDataDimExprs shape_data{ - symbol::TensorShapeOrDataDimExprs(out_sym_shape)}; - - infer_context->SetShapeOrDataForValue(op->result(0), shape_data); - infer_context->SetShapeOrDataForValue(op->result(1), shape_data); - - return true; -} - -bool TopkV1OpInferSymbolicShape(pir::Operation *op, - pir::InferSymbolicShapeContext *infer_context) { - return TopkOpInferSymbolicShape(op, infer_context); -} - -bool TransposeOpInferSymbolicShape( - pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) { - std::vector perm = - op->attributes().at("perm").dyn_cast().AsVector(); - if (perm.size() == 1) { - // perm must be [0], which means nothing to do with input, just copy the - // info from input - infer_context->SetShapeOrDataForValue( - op->result(0), - infer_context->GetShapeOrDataForValue(op->operand_source(0))); - return true; - } - const std::vector &x_dims = [&] { - std::vector dims; - const auto &x_shape_or_data = - infer_context->GetShapeOrDataForValue(op->operand_source(0)); - if (x_shape_or_data.data().has_value()) { - dims = x_shape_or_data.data().value(); - } else { - dims = x_shape_or_data.shape(); - } - return dims; - }(); - - int x_rank = x_dims.size(); - - const std::vector formatted_axis = [x_rank, &perm] { - std::vector out(perm.size(), 0); - std::transform(perm.begin(), - perm.end(), - out.begin(), - [](pir::Attribute &p) -> int32_t { - return p.dyn_cast().data(); - }); - - // format the negative axis - std::for_each(out.begin(), out.end(), [x_rank](int32_t &v) { - if (v < 0) { - v += x_rank; - } - }); - return out; - }(); - - int axis_size = static_cast(formatted_axis.size()); - - std::vector out_dims(x_dims); - for (int i = 0; i < axis_size; ++i) { - out_dims.at(i) = x_dims.at(formatted_axis.at(i)); - } - - infer_context->SetShapeOrDataForValue(op->result(0), - ShapeOrData{TensorExprs(out_dims)}); - - return true; -} - -bool Transpose_OpInferSymbolicShape( - pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) { - return TransposeOpInferSymbolicShape(op, infer_context); -} - -bool SqueezeOpInferSymbolicShape( - pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) { - PADDLE_ENFORCE_EQ( - op->num_operands(), - 2, - common::errors::InvalidArgument( - "SqueezeOpInferSymbolicShape ONLY support num_operands() == 2 " - "now, but got %d operands", - op->num_operands())); - - auto x_shape_or_data = - infer_context->GetShapeOrDataForValue(op->operand_source(0)); - auto axes_shape_or_data = - infer_context->GetShapeOrDataForValue(op->operand_source(1)); - - std::vector in_dims_sym; - if (x_shape_or_data.data().has_value()) { - in_dims_sym = x_shape_or_data.data().value(); - } else { - in_dims_sym = x_shape_or_data.shape(); - } - - std::vector squeeze_dims_sym; - if (axes_shape_or_data.data().has_value()) { - squeeze_dims_sym = axes_shape_or_data.data().value(); - } else { - squeeze_dims_sym = axes_shape_or_data.shape(); - } - - std::vector squeeze_dims; - for (auto squeeze_dim : squeeze_dims_sym) { - PADDLE_ENFORCE_EQ( - squeeze_dim.Has(), - true, - common::errors::InvalidArgument( - "in SqueezeOpInferSymbolicShape, axes must be known int type, " - "but got: %s", - symbol::ToString(squeeze_dim))); - squeeze_dims.emplace_back( - static_cast(squeeze_dim.Get())); - } - - // GetOutputSqueezeShape - size_t num_squeeze_dims = squeeze_dims.size(); - std::vector should_squeeze(in_dims_sym.size(), false); - // Mark dimensions need to be squeezed. - if (num_squeeze_dims == 0) { - for (size_t i = 0; i < in_dims_sym.size(); ++i) { - // TODO(lanxianghit): if symbol here, maybe we need the result of dim expr - // simplification - if (in_dims_sym.at(i) == 1) { - should_squeeze.at(i) = true; - } - } - } else { - for (size_t i = 0; i < num_squeeze_dims; ++i) { - if (in_dims_sym.size() == 0) { - continue; - } - int current = squeeze_dims.at(i) < 0 - ? squeeze_dims.at(i) + in_dims_sym.size() - : squeeze_dims.at(i); - - if (!should_squeeze.at(current)) { - // At compile time, dim of SYMBOL is allowed to squeeze? - if (in_dims_sym.at(current) == 1) { - should_squeeze.at(current) = true; - } else if (!in_dims_sym.at(current).Has()) { - should_squeeze.at(current) = true; - } else { - should_squeeze.at(current) = true; - } - } - } - } - - // Make output dimensions - std::vector output_shape_sym; - for (size_t i = 0; i < in_dims_sym.size(); ++i) { - if (!should_squeeze.at(i)) { - output_shape_sym.emplace_back(in_dims_sym.at(i)); - } - } - - symbol::ShapeOrDataDimExprs shape_data{ - symbol::TensorShapeOrDataDimExprs(output_shape_sym)}; - - pir::Value res = op->result(0); - infer_context->SetShapeOrDataForValue(res, shape_data); - infer_context->SetShapeOrDataForValue( - op->result(1), CreateShapeOrDataForXShape(x_shape_or_data)); - - return true; -} -bool Squeeze_OpInferSymbolicShape( - pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) { - return SqueezeOpInferSymbolicShape(op, infer_context); -} - -bool UnbindOpInferSymbolicShape(pir::Operation *op, - pir::InferSymbolicShapeContext *infer_context) { - // input - const auto &x_shape_or_data = - infer_context->GetShapeOrDataForValue(op->operand_source(0)); - PADDLE_ENFORCE_EQ( - x_shape_or_data.data().has_value(), - false, - common::errors::InvalidArgument( - "InferSymbolicShape of UnbindOp only support input with " - "value now.")); - const auto &x_dims_sym = x_shape_or_data.shape(); - - // axis - int axis = op->attributes().at("axis").dyn_cast().data(); - int rank = x_dims_sym.size(); - axis = axis >= 0 ? axis : axis + rank; - - // output - const symbol::TensorListShapeOrDataDimExprs &output_shape_data_list = [&] { - symbol::TensorListShapeOrDataDimExprs shape_data_list; - std::vector output_dims_sym = x_dims_sym; - - const symbol::DimExpr &unbound_dim = x_dims_sym.at(axis); - PADDLE_ENFORCE_EQ(unbound_dim.isa(), - true, - common::errors::InvalidArgument( - "InferSymbolicShape of UnbindOp only support unbound " - "dim with constant length!")); - output_dims_sym.erase(output_dims_sym.begin() + axis); - const int64_t unbound_dim_length = unbound_dim.dyn_cast(); - - for (uint32_t idx = 0; idx < unbound_dim_length; idx++) { - shape_data_list.push_back( - symbol::TensorShapeOrDataDimExprs(output_dims_sym)); - } - return shape_data_list; - }(); - - infer_context->SetShapeOrDataForValue( - op->result(0), symbol::ShapeOrDataDimExprs{output_shape_data_list}); - - return true; -} - -bool UniqueOpInferSymbolicShape(pir::Operation *op, - pir::InferSymbolicShapeContext *infer_context) { - const auto &x_shape_or_data = - infer_context->GetShapeOrDataForValue(op->operand_source(0)); - PADDLE_ENFORCE_EQ( - x_shape_or_data.data().has_value(), - false, - common::errors::InvalidArgument( - "InferSymbolicShape of UniqueOp only support input with " - "value now.")); - const auto &x_dims_sym = x_shape_or_data.shape(); - const size_t rank = x_dims_sym.size(); - std::vector axes = - paddle::dialect::details::GetVectorAttr(op, "axis"); - - symbol::DimExpr unique_dim_sym = - infer_context->GetNextSymName(); // unknown until runtime - - const std::vector &counts_dims = [&] { - std::vector out_dims; - out_dims.push_back(unique_dim_sym); - return out_dims; - }(); - - const std::vector &index_dims = counts_dims; - - const std::vector &out_dims = [&] { - if (axes.empty()) { - return counts_dims; - } - std::vector out_dims = x_dims_sym; - int axis = axes.at(0); - axis = axis >= 0 ? axis : axis + rank; - out_dims.at(axis) = unique_dim_sym; - return out_dims; - }(); - - const std::vector &inverse_dims = [&] { - std::vector inverse_dims; - if (axes.empty()) { - // flatten before unique - symbol::DimExpr product{1}; - for (const auto &x_dim : x_dims_sym) { - product = product * x_dim; - } - inverse_dims.push_back(product); - } else { - int axis = axes.at(0); - axis = axis >= 0 ? axis : axis + rank; - inverse_dims.push_back(x_dims_sym.at(axis)); - } - return inverse_dims; - }(); - - bool return_index = GetBoolAttr(op, "return_index"); - bool return_inverse = GetBoolAttr(op, "return_inverse"); - bool return_counts = GetBoolAttr(op, "return_counts"); - - symbol::ShapeOrDataDimExprs empty{symbol::TensorShapeOrDataDimExprs{}}; - infer_context->SetShapeOrDataForValue( - op->result(0), symbol::TensorShapeOrDataDimExprs{out_dims}); - infer_context->SetShapeOrDataForValue( - op->result(1), - return_index ? symbol::TensorShapeOrDataDimExprs{index_dims} : empty); - infer_context->SetShapeOrDataForValue( - op->result(2), - return_inverse ? symbol::TensorShapeOrDataDimExprs{inverse_dims} : empty); - infer_context->SetShapeOrDataForValue( - op->result(3), - return_counts ? symbol::TensorShapeOrDataDimExprs{counts_dims} : empty); - - return true; -} - -bool UniqueConsecutiveOpInferSymbolicShape( - pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) { - const auto &x_shape_or_data = - infer_context->GetShapeOrDataForValue(op->operand_source(0)); - PADDLE_ENFORCE_EQ( - x_shape_or_data.data().has_value(), - false, - common::errors::InvalidArgument( - "InferSymbolicShape of UniqueConsecutiveOp only support input with " - "value now.")); - const auto &x_dims_sym = x_shape_or_data.shape(); - const size_t rank = x_dims_sym.size(); - std::vector axes = - paddle::dialect::details::GetVectorAttr(op, "axis"); - - symbol::DimExpr unique_dim_sym = - infer_context->GetNextSymName(); // unknown until runtime - - const std::vector &counts_dims = [&] { - std::vector out_dims; - out_dims.push_back(unique_dim_sym); - return out_dims; - }(); - - const std::vector &out_dims = [&] { - if (axes.empty()) { - return counts_dims; - } - std::vector out_dims = x_dims_sym; - int axis = axes.at(0); - axis = axis >= 0 ? axis : axis + rank; - out_dims.at(axis) = unique_dim_sym; - return out_dims; - }(); - - const std::vector &inverse_dims = [&] { - std::vector inverse_dims; - if (axes.empty()) { - // flatten before unique - symbol::DimExpr product{1}; - for (const auto &x_dim : x_dims_sym) { - product = product * x_dim; - } - inverse_dims.push_back(product); - } else { - int axis = axes.at(0); - axis = axis >= 0 ? axis : axis + rank; - inverse_dims.push_back(x_dims_sym.at(axis)); - } - return inverse_dims; - }(); - - bool return_inverse = GetBoolAttr(op, "return_inverse"); - bool return_counts = GetBoolAttr(op, "return_counts"); - - symbol::ShapeOrDataDimExprs empty{symbol::TensorShapeOrDataDimExprs{}}; - infer_context->SetShapeOrDataForValue( - op->result(0), symbol::TensorShapeOrDataDimExprs{out_dims}); - infer_context->SetShapeOrDataForValue( - op->result(1), - return_inverse ? symbol::TensorShapeOrDataDimExprs{inverse_dims} : empty); - infer_context->SetShapeOrDataForValue( - op->result(2), - return_counts ? symbol::TensorShapeOrDataDimExprs{counts_dims} : empty); - - return true; -} - -bool UnsqueezeOpInferSymbolicShape( - pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) { - PADDLE_ENFORCE_EQ( - op->num_operands(), - 2, - common::errors::InvalidArgument( - "UnsqueezeOp InferSymbolicShape ONLY support num_operands() == 2 " - "now, but got %d operands", - op->num_operands())); - - auto x_shape_or_data = - infer_context->GetShapeOrDataForValue(op->operand_source(0)); - auto axes_shape_or_data = - infer_context->GetShapeOrDataForValue(op->operand_source(1)); - - std::vector x_sym_shape; - if (x_shape_or_data.data().has_value()) { - x_sym_shape = x_shape_or_data.data().value(); - } else { - x_sym_shape = x_shape_or_data.shape(); - } - int x_dims_size = x_sym_shape.size(); - - std::vector axes_sym; - if (axes_shape_or_data.data().has_value()) { - axes_sym = axes_shape_or_data.data().value(); - } else { - axes_sym = axes_shape_or_data.shape(); - } - int axes_sym_size = axes_sym.size(); - - // GetUnsqueezeShape - int output_rank = x_dims_size + axes_sym_size; - std::vector result_sym_dims(output_rank, 0); - - int cur_output_rank = x_dims_size; - for (auto axis_expr : axes_sym) { - PADDLE_ENFORCE_EQ( - axis_expr.Has(), - true, - common::errors::InvalidArgument( - "in UnsqueezeOpInferSymbolicShape, axes must be known int type, " - "but got: %s", - symbol::ToString(axis_expr))); - int axis = static_cast(axis_expr.Get()); - int cur = axis < 0 ? axis + cur_output_rank + 1 : axis; - - // Move old axis, and insert new axis - for (int i = cur_output_rank; i >= cur; --i) { - if (result_sym_dims.at(i) == 1) { - // Move axis - result_sym_dims.at(i + 1) = 1; - result_sym_dims.at(i) = 0; - } - } - result_sym_dims.at(cur) = 1; - // Add the output size. - cur_output_rank++; - } - - // Make output shape - for (int in_idx = 0, out_idx = 0; out_idx < output_rank; ++out_idx) { - if (result_sym_dims.at(out_idx) == 0) { - result_sym_dims.at(out_idx) = x_sym_shape.at(in_idx++); - } - } - - symbol::ShapeOrDataDimExprs shape_data{ - symbol::TensorShapeOrDataDimExprs(result_sym_dims)}; - - pir::Value res = op->result(0); - infer_context->SetShapeOrDataForValue(res, shape_data); - infer_context->SetShapeOrDataForValue( - op->result(1), CreateShapeOrDataForXShape(x_shape_or_data)); - - return true; -} -bool Unsqueeze_OpInferSymbolicShape( - pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) { - return UnsqueezeOpInferSymbolicShape(op, infer_context); -} - -} // namespace paddle::dialect diff --git a/paddle/fluid/pir/dialect/operator/interface/infer_symbolic_shape/.ipynb_checkpoints/unary_infer_sym-checkpoint.h b/paddle/fluid/pir/dialect/operator/interface/infer_symbolic_shape/.ipynb_checkpoints/unary_infer_sym-checkpoint.h deleted file mode 100644 index 4390c63f99ec4..0000000000000 --- a/paddle/fluid/pir/dialect/operator/interface/infer_symbolic_shape/.ipynb_checkpoints/unary_infer_sym-checkpoint.h +++ /dev/null @@ -1,95 +0,0 @@ -// Copyright (c) 2024 PaddlePaddle Authors. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#pragma once - -#include "paddle/pir/include/dialect/shape/utils/shape_analysis.h" - -namespace paddle::dialect { -OP_DECLARE_INFER_SYMBOLIC_SHAPE(All) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Amax) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Amin) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Any) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Argmax) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Argmin) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(AsComplex) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(AsReal) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Assign) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Assign_) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(BipartiteMatch) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Cast) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Cast_) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Cholesky) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(ClipByNorm) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(ClipByNormSr) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Cummax) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Cummin) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Cumprod) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Cumprod_) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Cumsum) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Cumsum_) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(ChannelShuffle) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(DiagEmbed) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Diagonal) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(DistributeFpnProposals) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Eigh) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Eigvalsh) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(FakeChannelWiseQuantizeAbsMax) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(FftC2c) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(FftC2r) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(FftR2c) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(FillDiagonal) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(FillDiagonal_) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Flatten) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Flatten_) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Fold) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(IdentityLoss) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(IdentityLoss_) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Kthvalue) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(LpPool2d) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Logcumsumexp) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Logsumexp) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Max) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Maxout) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Min) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(MeanAll) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Nonzero) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Numel) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Pad) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Pad3d) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Pool2d) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Prod) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(RepeatInterleave) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Reshape) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Reshape_) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Shape) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(ShapeSr) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Slice) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Split) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(SplitWithNum) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Squeeze) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Squeeze_) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Sum) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Tile) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Topk) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(TopkV1) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Transpose) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Transpose_) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Unbind) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Unique) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(UniqueConsecutive) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Unsqueeze) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Unsqueeze_) - -} // namespace paddle::dialect From 84c12b844a4a599a693767c0e13ea54b4076839d Mon Sep 17 00:00:00 2001 From: Fripping <15010770306@163.com> Date: Tue, 13 Aug 2024 14:23:09 +0800 Subject: [PATCH 5/8] update cinn api --- .../multiary_infer_sym-checkpoint.cc | 1307 ++++++++++ .../same_operands_result-checkpoint.cc | 214 ++ .../same_operands_result-checkpoint.h | 164 ++ .../unary_infer_sym-checkpoint.cc | 2157 +++++++++++++++++ .../unary_infer_sym-checkpoint.h | 96 + .../multiary_infer_sym.cc | 79 +- .../same_operands_result.cc | 1 - .../same_operands_result.h | 1 - .../infer_symbolic_shape/unary_infer_sym.cc | 90 +- .../infer_symbolic_shape/unary_infer_sym.h | 1 + 10 files changed, 4062 insertions(+), 48 deletions(-) create mode 100644 paddle/fluid/pir/dialect/operator/interface/infer_symbolic_shape/.ipynb_checkpoints/multiary_infer_sym-checkpoint.cc create mode 100644 paddle/fluid/pir/dialect/operator/interface/infer_symbolic_shape/.ipynb_checkpoints/same_operands_result-checkpoint.cc create mode 100644 paddle/fluid/pir/dialect/operator/interface/infer_symbolic_shape/.ipynb_checkpoints/same_operands_result-checkpoint.h create mode 100644 paddle/fluid/pir/dialect/operator/interface/infer_symbolic_shape/.ipynb_checkpoints/unary_infer_sym-checkpoint.cc create mode 100644 paddle/fluid/pir/dialect/operator/interface/infer_symbolic_shape/.ipynb_checkpoints/unary_infer_sym-checkpoint.h diff --git a/paddle/fluid/pir/dialect/operator/interface/infer_symbolic_shape/.ipynb_checkpoints/multiary_infer_sym-checkpoint.cc b/paddle/fluid/pir/dialect/operator/interface/infer_symbolic_shape/.ipynb_checkpoints/multiary_infer_sym-checkpoint.cc new file mode 100644 index 0000000000000..8045ab2210f71 --- /dev/null +++ b/paddle/fluid/pir/dialect/operator/interface/infer_symbolic_shape/.ipynb_checkpoints/multiary_infer_sym-checkpoint.cc @@ -0,0 +1,1307 @@ +// Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/common/ddim.h" +#include "paddle/common/layout.h" +#include "paddle/fluid/pir/dialect/operator/interface/infer_symbolic_shape/infer_sym_slice_utils.h" +#include "paddle/fluid/pir/dialect/operator/interface/infer_symbolic_shape/infer_sym_utils.h" +#include "paddle/fluid/pir/dialect/operator/interface/infer_symbolic_shape/multiary_infer_sym.h" +#include "paddle/fluid/pir/dialect/operator/ir/op_attribute.h" + +namespace paddle::dialect { + +bool AccuracyOpInferSymbolicShape( + pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) { + const symbol::ShapeOrDataDimExprs &out_shape = + infer_context->GetShapeOrDataForValue(op->operand_source(0)); + const symbol::ShapeOrDataDimExprs &label_shape = + infer_context->GetShapeOrDataForValue(op->operand_source(2)); + + // Assume indices has same shape as inference, because + // it's the output of topk. + PADDLE_ENFORCE_EQ( + label_shape.shape().size(), + 2UL, + common::errors::InvalidArgument( + "ShapeError: label's dimensions of AccuracyOp must be 2. " + "But received label's dimensions = %d", + label_shape.shape().size())); + + infer_context->AddEqualCstr(label_shape.shape()[1], symbol::DimExpr{1}); + infer_context->AddEqualCstr(out_shape.shape()[0], label_shape.shape()[0]); + + std::vector accuracy_shape = {}; + infer_context->SetShapeOrDataForValue( + op->result(0), + symbol::ShapeOrDataDimExprs{ + symbol::TensorShapeOrDataDimExprs(accuracy_shape)}); + + std::vector correct_shape = {}; + infer_context->SetShapeOrDataForValue( + op->result(1), + symbol::ShapeOrDataDimExprs{ + symbol::TensorShapeOrDataDimExprs(correct_shape)}); + + std::vector total_shape = {}; + infer_context->SetShapeOrDataForValue( + op->result(2), + symbol::ShapeOrDataDimExprs{ + symbol::TensorShapeOrDataDimExprs(total_shape)}); + + return true; +} + +bool AddNOpInferSymbolicShape(pir::Operation *op, + pir::InferSymbolicShapeContext *infer_context) { + const auto &input_list_shape = + infer_context->GetShapeOrDataForValue(op->operand_source(0)); + PADDLE_ENFORCE_EQ( + input_list_shape.isa(), + true, + common::errors::InvalidArgument( + "The type of inputs shape should be TensorListShapeOrDataDimExprs")); + const auto &inputs_shape = + input_list_shape.dyn_cast(); + PADDLE_ENFORCE_GT( + inputs_shape.size(), + 0, + common::errors::InvalidArgument( + "The input tensor X's dimensions of AddNOp " + "should be larger than 0. But received X's dimensions %d.", + inputs_shape.size())); + symbol::TensorShapeOrDataDimExprs candidate_shape = inputs_shape.front(); + for (size_t i = 1; i < inputs_shape.size(); ++i) { + // 0D tensor + if (inputs_shape[i].shape().size() == 0) { + continue; + } + if (candidate_shape.shape().size() == 0) { + candidate_shape = inputs_shape[i]; + continue; + } + for (size_t j = 0; j < candidate_shape.shape().size(); ++j) { + infer_context->AddEqualCstr(candidate_shape.shape()[j], + inputs_shape[i].shape()[j]); + } + } + infer_context->SetShapeOrDataForValue( + op->result(0), symbol::ShapeOrDataDimExprs{candidate_shape}); + + return true; +} + +bool AddmmOpInferSymbolicShape(pir::Operation *op, + pir::InferSymbolicShapeContext *infer_context) { + const auto &input_shape = + infer_context->GetShapeOrDataForValue(op->operand_source(0)); + const auto &x_shape = + infer_context->GetShapeOrDataForValue(op->operand_source(1)); + const auto &y_shape = + infer_context->GetShapeOrDataForValue(op->operand_source(2)); + + auto ndim_input = input_shape.shape().size(); + auto ndim_x = x_shape.shape().size(); + auto ndim_y = y_shape.shape().size(); + + PADDLE_ENFORCE_EQ(ndim_input == 2 || ndim_input == 1, + true, + common::errors::InvalidArgument( + "The input tensor input's dimension must be 2 or 1. " + "But received input's dimension = [%d].", + ndim_input)); + PADDLE_ENFORCE_EQ(ndim_x, + 2, + common::errors::InvalidArgument( + "The input tensor x's dimension must be 2. " + "But received x's dimension = [%d].", + ndim_x)); + PADDLE_ENFORCE_EQ(ndim_y, + 2, + common::errors::InvalidArgument( + "The input tensor y's dimension must be 2. " + "But received y's dimension = [%d].", + ndim_y)); + + std::vector output_shape; + output_shape.push_back(x_shape.shape()[0]); + output_shape.push_back(y_shape.shape()[1]); + + infer_context->SetShapeOrDataForValue( + op->result(0), + symbol::ShapeOrDataDimExprs{ + symbol::TensorShapeOrDataDimExprs(output_shape)}); + + infer_context->AddEqualCstr(x_shape.shape()[1], y_shape.shape()[0]); + + if (ndim_input == 2) { + infer_context->AddBroadcastableCstr(input_shape.shape()[0], + x_shape.shape()[0]); + infer_context->AddBroadcastableCstr(input_shape.shape()[1], + y_shape.shape()[1]); + } else if (ndim_input == 1) { + infer_context->AddBroadcastableCstr(input_shape.shape()[0], + y_shape.shape()[1]); + } + + return true; +} + +bool Addmm_OpInferSymbolicShape(pir::Operation *op, + pir::InferSymbolicShapeContext *infer_context) { + return AddmmOpInferSymbolicShape(op, infer_context); +} + +bool AucOpInferSymbolicShape(pir::Operation *op, + pir::InferSymbolicShapeContext *infer_context) { + const auto &predict_shape = + infer_context->GetShapeOrDataForValue(op->operand_source(0)); + const auto &label_shape = + infer_context->GetShapeOrDataForValue(op->operand_source(1)); + + PADDLE_ENFORCE_GE( + predict_shape.shape().size(), + 2, + common::errors::InvalidArgument( + "The Input(Predict) has not been initialized properly. The " + "shape of Input(Predict) = [%s], the shape size must be " + "greater_equal 2.", + predict_shape.shape())); + + const auto &predict_height = predict_shape.shape()[0]; + const auto &label_height = label_shape.shape()[0]; + + infer_context->AddEqualCstr(predict_height, label_height); + + int num_thresholds = + op->attribute("num_thresholds").data(); + int slide_steps = op->attribute("slide_steps").data(); + + int num_pred_buckets = num_thresholds + 1; + + PADDLE_ENFORCE_GE( + num_pred_buckets, + 1, + common::errors::InvalidArgument("num_thresholds must larger than 1")); + PADDLE_ENFORCE_GE( + slide_steps, + 0, + common::errors::InvalidArgument("slide_steps must be natural number")); + + infer_context->SetShapeOrDataForValue( + op->result(0), + symbol::ShapeOrDataDimExprs{ + symbol::TensorShapeOrDataDimExprs(std::vector{})}); + + if (slide_steps) { + infer_context->SetShapeOrDataForValue( + op->result(1), + symbol::ShapeOrDataDimExprs{ + symbol::TensorShapeOrDataDimExprs(std::vector{ + (1 + slide_steps) * num_pred_buckets + 1})}); + infer_context->SetShapeOrDataForValue( + op->result(2), + symbol::ShapeOrDataDimExprs{ + symbol::TensorShapeOrDataDimExprs(std::vector{ + (1 + slide_steps) * num_pred_buckets + 1})}); + } else { + infer_context->SetShapeOrDataForValue( + op->result(1), + symbol::ShapeOrDataDimExprs{symbol::TensorShapeOrDataDimExprs( + std::vector{1, num_pred_buckets})}); + infer_context->SetShapeOrDataForValue( + op->result(2), + symbol::ShapeOrDataDimExprs{symbol::TensorShapeOrDataDimExprs( + std::vector{1, num_pred_buckets})}); + } + + return true; +} + +bool BatchNormOpInferSymbolicShape( + pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) { + const auto &x_shape_or_data = + infer_context->GetShapeOrDataForValue(op->operand_source(0)); + const auto &scale_shape_or_data = + infer_context->GetShapeOrDataForValue(op->operand_source(3)); + const auto &bias_shape_or_data = + infer_context->GetShapeOrDataForValue(op->operand_source(4)); + + std::vector x_dims = x_shape_or_data.shape(); + + std::string data_layout_str = + op->attribute("data_format").AsString(); + const DataLayout data_layout = common::StringToDataLayout(data_layout_str); + + PADDLE_ENFORCE_GE( + x_dims.size(), + 2, + phi::errors::InvalidArgument( + "ShapeError: the dimension of input " + "X must greater than or equal to 2. But received: the shape of input " + "X = [%s], the dimension of input X =[%d]", + x_dims, + x_dims.size())); + PADDLE_ENFORCE_LE( + x_dims.size(), + 5, + phi::errors::InvalidArgument( + "ShapeError: the dimension of input X " + "must smaller than or equal to 5. But received: the shape of input X " + "= [%s], the dimension of input X = [%d]", + x_dims, + x_dims.size())); + + symbol::DimExpr C = (data_layout == DataLayout::kNCHW) + ? x_dims[1] + : x_dims[x_dims.size() - 1]; + + if (!scale_shape_or_data.isa()) { + std::vector scale_dims = scale_shape_or_data.shape(); + PADDLE_ENFORCE_EQ(scale_dims.size(), + 1UL, + phi::errors::InvalidArgument( + "ShapeError: the dimension of scale must equal to 1." + "But received: the dimension of scale is [%d]", + scale_dims.size())); + infer_context->AddEqualCstr(scale_dims[0], C); + } + + if (!bias_shape_or_data.isa()) { + std::vector bias_dims = bias_shape_or_data.shape(); + PADDLE_ENFORCE_EQ(bias_dims.size(), + 1UL, + phi::errors::InvalidArgument( + "ShapeError: the dimension of bias must equal to 1." + "But received: the dimension of bias is [%d]", + bias_dims.size())); + infer_context->AddEqualCstr(bias_dims[0], C); + } + + // Set output shapes + infer_context->SetShapeOrDataForValue( + op->result(0), + symbol::ShapeOrDataDimExprs{symbol::TensorShapeOrDataDimExprs(x_dims)}); + + std::vector param_dims = {C}; + infer_context->SetShapeOrDataForValue( + op->result(1), + symbol::ShapeOrDataDimExprs{ + symbol::TensorShapeOrDataDimExprs(param_dims)}); + infer_context->SetShapeOrDataForValue( + op->result(2), + symbol::ShapeOrDataDimExprs{ + symbol::TensorShapeOrDataDimExprs(param_dims)}); + + if (op->result(3) && op->result(3).type()) { + infer_context->SetShapeOrDataForValue( + op->result(3), + symbol::ShapeOrDataDimExprs{ + symbol::TensorShapeOrDataDimExprs(param_dims)}); + } + if (op->result(4) && op->result(4).type()) { + infer_context->SetShapeOrDataForValue( + op->result(4), + symbol::ShapeOrDataDimExprs{ + symbol::TensorShapeOrDataDimExprs(param_dims)}); + } + if (op->result(5) && op->result(5).type()) { + std::vector reserve_space_dims{ + symbol::DimExpr{infer_context->GetNextSymName()}}; + infer_context->SetShapeOrDataForValue( + op->result(5), + symbol::ShapeOrDataDimExprs{ + symbol::TensorShapeOrDataDimExprs(reserve_space_dims)}); + } + + return true; +} + +bool BatchNorm_OpInferSymbolicShape( + pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) { + return BatchNormOpInferSymbolicShape(op, infer_context); +} + +bool BicubicInterpOpInferSymbolicShape( + pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) { + const symbol::ShapeOrDataDimExprs &x = + infer_context->GetShapeOrDataForValue(op->operand_source(0)); + + const auto &attributes = op->attributes(); + + const std::string data_format = + attributes.at("data_format").dyn_cast().AsString(); + int out_d = attributes.at("out_d").dyn_cast().data(); + int out_h = attributes.at("out_h").dyn_cast().data(); + int out_w = attributes.at("out_w").dyn_cast().data(); + const std::vector &scale = details::GetVectorAttr(op, "scale"); + + const bool has_size_tensor = [&] { + pir::Value size_tensor = op->operand_source(2); + if (!size_tensor || !size_tensor.type()) { + return false; + } + const auto &list_size_tensor = + size_tensor.type().dyn_cast(); + return list_size_tensor && !list_size_tensor.empty(); + }(); + auto GetSizeTensorDataExpr = + [&](pir::Value value) -> std::vector { + const symbol::ShapeOrDataDimExprs &size_tensor_shape = + infer_context->GetShapeOrDataForValue(value); + PADDLE_ENFORCE_EQ( + size_tensor_shape.isa(), + true, + common::errors::InvalidArgument( + "The size_tensor of Interpolation should be type of " + "TensorListShapeOrDataDimExprs")); + return details::GetOrCreateExprVecFromData(size_tensor_shape, + infer_context); + }; + auto GetOutSizeDataExpr = + [&](pir::Value value) -> std::vector { + const symbol::ShapeOrDataDimExprs &out_size_tensor_shape = + infer_context->GetShapeOrDataForValue(value); + return details::GetOrCreateExprVecFromData(out_size_tensor_shape, + infer_context); + }; + auto GetOutDimByScale = [&](const symbol::DimExpr &in_dim, + float scale) -> symbol::DimExpr { + PADDLE_ENFORCE_GT(scale, + 0, + common::errors::InvalidArgument( + "The scale in Attr(scale) of Operator(interpolate) " + "should be greater than 0, but received value is %d.", + scale)); + if (in_dim.isa()) { + return symbol::DimExpr{ + static_cast(in_dim.dyn_cast() * scale)}; + } + return symbol::DimExpr{infer_context->GetNextSymName()}; + }; + + std::vector size_tensor; + if (out_d != -1) size_tensor.push_back(out_d); + if (out_h != -1) size_tensor.push_back(out_h); + if (out_w != -1) size_tensor.push_back(out_w); + + const DataLayout data_layout = common::StringToDataLayout(data_format); + + if (x.shape().size() == 3) { + // shape check for 1D interpolate for input tensor shape NCHW + if (!size_tensor.empty()) { + // top priority size + std::vector dim_out; + if (data_layout == DataLayout::kNCHW) { + dim_out = {x.shape()[0], x.shape()[1], symbol::DimExpr{out_w}}; + } else { + dim_out = {x.shape()[0], symbol::DimExpr{out_w}, x.shape()[2]}; + } + + symbol::ShapeOrDataDimExprs shape_data{ + symbol::TensorShapeOrDataDimExprs(dim_out)}; + + pir::Value res = op->result(0); + infer_context->SetShapeOrDataForValue(res, shape_data); + return true; + } + + symbol::DimExpr out_w_tmp{0}; + const auto &next_sym = infer_context->GetNextSymName(); + out_w_tmp = symbol::DimExpr(next_sym); + + std::vector dim_out; + if (data_layout == DataLayout::kNCHW) { + dim_out = {x.shape()[0], x.shape()[1], out_w_tmp}; + } else { + dim_out = {x.shape()[0], out_w_tmp, x.shape()[2]}; + } + + symbol::ShapeOrDataDimExprs shape_data{ + symbol::TensorShapeOrDataDimExprs(dim_out)}; + + pir::Value res = op->result(0); + infer_context->SetShapeOrDataForValue(res, shape_data); + return true; + } else if (x.shape().size() == 4) { + // shape check for 2D interpolate for input tensor shape NCHW + auto GetOutHW = [&]() -> std::tuple { + // top priority size + if (has_size_tensor) { + const auto &size_tensor_list_shape = + GetSizeTensorDataExpr(op->operand_source(2)); + PADDLE_ENFORCE_EQ(size_tensor_list_shape.size(), + 2, + common::errors::InvalidArgument( + "The size of size_tensor list should be 2.")); + return std::make_tuple(size_tensor_list_shape.at(0), + size_tensor_list_shape.at(1)); + } + // has out_size tensor + if (op->operand_source(1)) { + const auto &out_size_shape_or_data = + infer_context->GetShapeOrDataForValue(op->operand_source(1)); + PADDLE_ENFORCE_EQ( + out_size_shape_or_data.shape().size(), + 1, + common::errors::InvalidArgument( + "The rank of input out_size tensor should be 1.")); + infer_context->AddEqualCstr(out_size_shape_or_data.shape()[0], + symbol::DimExpr{2}); + const auto &out_size_data = GetOutSizeDataExpr(op->operand_source(1)); + return std::make_tuple(symbol::DimExpr{out_size_data[0]}, + symbol::DimExpr{out_size_data[1]}); + } + // has scale + if (scale.size() == 2) { + float scale_h = scale[0]; + float scale_w = scale[1]; + const auto &in_h = + data_layout == DataLayout::kNCHW ? x.shape()[2] : x.shape()[1]; + const auto &in_w = + data_layout == DataLayout::kNCHW ? x.shape()[3] : x.shape()[2]; + return std::make_tuple(GetOutDimByScale(in_h, scale_h), + GetOutDimByScale(in_w, scale_w)); + } + + return std::make_tuple(symbol::DimExpr{out_h}, symbol::DimExpr{out_w}); + }; + + const std::vector dim_out = [&] { + const auto &[out_h_sym, out_w_sym] = GetOutHW(); + if (data_layout == DataLayout::kNCHW) { + return std::vector{ + x.shape()[0], x.shape()[1], out_h_sym, out_w_sym}; + } else { + return std::vector{ + x.shape()[0], out_h_sym, out_w_sym, x.shape()[3]}; + } + }(); + + symbol::ShapeOrDataDimExprs shape_data{ + symbol::TensorShapeOrDataDimExprs(dim_out)}; + infer_context->SetShapeOrDataForValue(op->result(0), shape_data); + + return true; + } else if (x.shape().size() == 5) { + auto GetOutDHW = + [&]() -> std::tuple { + // top priority size + if (has_size_tensor) { + const auto &size_tensor_list_shape = + GetSizeTensorDataExpr(op->operand_source(2)); + PADDLE_ENFORCE_EQ(size_tensor_list_shape.size(), + 3, + common::errors::InvalidArgument( + "The size of size_tensor list should be 3.")); + return std::make_tuple(size_tensor_list_shape.at(0), + size_tensor_list_shape.at(1), + size_tensor_list_shape.at(2)); + } + // has out_size tensor + if (op->operand_source(1)) { + const auto &out_size_data = GetOutSizeDataExpr(op->operand_source(1)); + return std::make_tuple(symbol::DimExpr{out_size_data[0]}, + symbol::DimExpr{out_size_data[1]}, + symbol::DimExpr{out_size_data[2]}); + } + // has scale + if (scale.size() == 3) { + float scale_d = scale[0]; + float scale_h = scale[1]; + float scale_w = scale[2]; + const auto &in_d = + data_layout == DataLayout::kNCHW ? x.shape()[2] : x.shape()[1]; + const auto &in_h = + data_layout == DataLayout::kNCHW ? x.shape()[3] : x.shape()[2]; + const auto &in_w = + data_layout == DataLayout::kNCHW ? x.shape()[4] : x.shape()[3]; + return std::make_tuple(GetOutDimByScale(in_d, scale_d), + GetOutDimByScale(in_h, scale_h), + GetOutDimByScale(in_w, scale_w)); + } + + return std::make_tuple(symbol::DimExpr{out_d}, + symbol::DimExpr{out_h}, + symbol::DimExpr{out_w}); + }; + + const std::vector dim_out = [&] { + const auto &[out_d_sym, out_h_sym, out_w_sym] = GetOutDHW(); + if (data_layout == DataLayout::kNCHW) { + return std::vector{ + x.shape()[0], x.shape()[1], out_d_sym, out_h_sym, out_w_sym}; + } else { + return std::vector{ + x.shape()[0], out_d_sym, out_h_sym, out_w_sym, x.shape()[4]}; + } + }(); + + symbol::ShapeOrDataDimExprs shape_data{ + symbol::TensorShapeOrDataDimExprs(dim_out)}; + infer_context->SetShapeOrDataForValue(op->result(0), shape_data); + return true; + } else { + PADDLE_THROW( + common::errors::Fatal("Input(X) dimension must be 3, 4 or 5!")); + } + + return true; +} + +bool BilinearOpInferSymbolicShape( + pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) { + const auto &x_shape = + infer_context->GetShapeOrDataForValue(op->operand_source(0)); + const auto &y_shape = + infer_context->GetShapeOrDataForValue(op->operand_source(1)); + const auto &weight_shape = + infer_context->GetShapeOrDataForValue(op->operand_source(2)); + + PADDLE_ENFORCE_EQ( + x_shape.shape().size(), + 2UL, + common::errors::InvalidArgument("The input(X) must be a 2D Tensor.")); + PADDLE_ENFORCE_EQ( + y_shape.shape().size(), + 2UL, + common::errors::InvalidArgument("The input(Y) must be a 2D Tensor.")); + PADDLE_ENFORCE_EQ( + weight_shape.shape().size(), + 3UL, + common::errors::InvalidArgument( + "Expected the input(Weight) is a 3D tensor. But received %dD tensor.", + weight_shape.shape().size())); + + infer_context->AddEqualCstr(x_shape.shape()[0], y_shape.shape()[0]); + + infer_context->AddEqualCstr(x_shape.shape()[1], weight_shape.shape()[1]); + infer_context->AddEqualCstr(y_shape.shape()[1], weight_shape.shape()[2]); + + if (op->operand_source(3)) { // has bias + const auto &bias_shape = + infer_context->GetShapeOrDataForValue(op->operand_source(3)); + PADDLE_ENFORCE_EQ(bias_shape.shape().size(), + 2UL, + common::errors::InvalidArgument( + "The Input(Bias) must be a 2-D tensor with " + "the 2nd dimension fixed to 1 (a row vector).")); + infer_context->AddEqualCstr(bias_shape.shape()[0], symbol::DimExpr{1}); + infer_context->AddEqualCstr(bias_shape.shape()[1], weight_shape.shape()[0]); + } + + infer_context->SetShapeOrDataForValue( + op->result(0), + symbol::ShapeOrDataDimExprs{symbol::TensorShapeOrDataDimExprs( + {x_shape.shape()[0], weight_shape.shape()[0]})}); + + return true; +} + +bool BilinearInterpOpInferSymbolicShape( + pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) { + return BicubicInterpOpInferSymbolicShape(op, infer_context); +} + +bool CrossEntropyWithSoftmaxOpInferSymbolicShape( + pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) { + const symbol::ShapeOrDataDimExprs &input_shape = + infer_context->GetShapeOrDataForValue(op->operand_source(0)); + const symbol::ShapeOrDataDimExprs &index_shape = + infer_context->GetShapeOrDataForValue(op->operand_source(1)); + + const auto &input_dim = input_shape.shape(); + const auto &index_dim = index_shape.shape(); + const auto &attributes = op->attributes(); + int axis = attributes.at("axis").dyn_cast().data(); + if (axis < 0) axis += input_shape.shape().size(); + bool soft_label = + attributes.at("soft_label").dyn_cast().data(); + PADDLE_ENFORCE(!soft_label || input_dim.size() == index_dim.size(), + common::errors::InvalidArgument( + "The input and index should have the same rank when " + "soft_label is true. But received input rank(%d) and " + "index rank(%d)", + input_dim.size(), + index_dim.size())); + + auto softmax_dim = index_dim; + auto out_dim = index_dim; + + if (index_dim.size() == input_dim.size()) { + if (soft_label) { + out_dim[axis] = 1; + } + softmax_dim[axis] = input_dim[axis]; + } else { + softmax_dim.insert(softmax_dim.begin() + axis, input_dim[axis]); + if (soft_label) { + out_dim.insert(out_dim.begin() + axis, 1); + } + } + + infer_context->SetShapeOrDataForValue( + op->result(0), symbol::TensorShapeOrDataDimExprs(softmax_dim)); + infer_context->SetShapeOrDataForValue( + op->result(1), symbol::TensorShapeOrDataDimExprs(out_dim)); + + return true; +} + +bool CrossEntropyWithSoftmax_OpInferSymbolicShape( + pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) { + return CrossEntropyWithSoftmaxOpInferSymbolicShape(op, infer_context); +} + +bool ConcatOpInferSymbolicShape(pir::Operation *op, + pir::InferSymbolicShapeContext *infer_context) { + const auto &axis_expr = + infer_context->GetShapeOrDataForValue(op->operand_source(1)); + if (!axis_expr.data() || !axis_expr.data()->at(0).isa()) { + pir::Value res = op->result(0); + infer_context->SetSymbolForValueByStaticShape(res); + return true; + } + + pir::Value operand_source = op->operand_source(0); + const auto &shape_data_list = + infer_context->GetShapeOrDataForValue(operand_source) + .dyn_cast(); + + size_t rank = shape_data_list.at(0).shape().size(); + const int64_t axis = [&] { + int64_t axis = axis_expr.data()->at(0).dyn_cast(); + return axis >= 0 ? axis : std::max(int64_t(0), int64_t(axis + rank)); + }(); + + if (shape_data_list.at(0).data().has_value()) { + if (rank == 1) { + const auto &s_or_d = + infer_context->GetShapeOrDataForValue(operand_source); + ExprVec data = details::GetExprVecFromData(s_or_d); + + const std::vector shape{std::int64_t(data.size())}; + symbol::ShapeOrDataDimExprs shape_data{ + symbol::TensorShapeOrDataDimExprs(shape, data)}; + pir::Value res = op->result(0); + infer_context->SetShapeOrDataForValue(res, shape_data); + + return true; + } else { + PADDLE_THROW(common::errors::Unimplemented( + op->name() + + " 's InferSymbolicShape can NOT deal with rank > 1 now.")); + } + std::vector data; + data.reserve(shape_data_list.size()); + for (auto &data_elem : shape_data_list) { + data.push_back(data_elem.data().value().at(0)); + } + const std::vector shape{std::int64_t(data.size())}; + symbol::ShapeOrDataDimExprs shape_data{ + symbol::TensorShapeOrDataDimExprs(shape, data)}; + pir::Value res = op->result(0); + infer_context->SetShapeOrDataForValue(res, shape_data); + + return true; + } + + const std::vector &out_dims = [&] { + std::vector out_dims = shape_data_list.at(0).shape(); + for (size_t i = 0; i < rank; ++i) { + if (i != static_cast(axis)) { + details::BuildCstrEqForTensorListAlongAxis( + infer_context, shape_data_list, i); + continue; + } + for (size_t j = 1; j < shape_data_list.size(); ++j) { + out_dims.at(axis) = + out_dims.at(axis) + shape_data_list.at(j).shape().at(axis); + } + } + return out_dims; + }(); + + symbol::ShapeOrDataDimExprs shape_data{ + symbol::TensorShapeOrDataDimExprs(out_dims)}; + + pir::Value res = op->result(0); + infer_context->SetShapeOrDataForValue(res, shape_data); + + return true; +} + +bool FullWithTensorOpInferSymbolicShape( + pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) { + pir::Value operand_source = op->operand_source(1); + const symbol::ShapeOrDataDimExprs &operand_shape_or_data = + infer_context->GetShapeOrDataForValue(operand_source); + + const auto &out_shape = operand_shape_or_data.data().has_value() + ? operand_shape_or_data.data().value() + : operand_shape_or_data.shape(); + + infer_context->SetShapeOrDataForValue( + op->result(0), symbol::TensorShapeOrDataDimExprs(out_shape)); + return true; +} + +bool FlashAttnOpInferSymbolicShape( + pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) { + pir::Value operand_source = op->operand_source(0); + const symbol::ShapeOrDataDimExprs &q = + infer_context->GetShapeOrDataForValue(operand_source); + + const symbol::ShapeOrDataDimExprs &k = + infer_context->GetShapeOrDataForValue(op->operand_source(1)); + + const symbol::ShapeOrDataDimExprs &v = + infer_context->GetShapeOrDataForValue(op->operand_source(2)); + + PADDLE_ENFORCE_EQ(q.shape().size(), + 4, + common::errors::InvalidArgument( + "flash_attn receive input with dim " + "[batch_size, seq_len, num_heads, head_dim]")); + + infer_context->AddEqualCstr(q.shape()[0], k.shape()[0]); + infer_context->AddEqualCstr(q.shape()[0], v.shape()[0]); + infer_context->AddEqualCstr(k.shape()[1], v.shape()[1]); + + if (op->operand_source(4)) { + const symbol::ShapeOrDataDimExprs &attn_mask = + infer_context->GetShapeOrDataForValue(op->operand_source(4)); + infer_context->AddEqualCstr(attn_mask.shape()[0], q.shape()[0]); + infer_context->AddEqualCstr(attn_mask.shape()[2], q.shape()[1]); + infer_context->AddEqualCstr(attn_mask.shape()[3], k.shape()[1]); + } + + std::vector out_shape = q.shape(); + + out_shape.back() = v.shape().back(); + + infer_context->SetShapeOrDataForValue( + op->result(0), symbol::TensorShapeOrDataDimExprs(out_shape)); + + // GPU has round for seqlen, but XPU has not. Here we align with the GPU + // version. + auto round_multiple = [](symbol::DimExpr x) { + auto m = symbol::DimExpr{128}; + auto m_minus_one = symbol::DimExpr{127}; + return (x + m_minus_one) / m * m; + }; + auto batch_size_expr = q.shape()[0]; + auto num_heads_expr = q.shape()[2]; + auto seqlen_q_rounded_expr = round_multiple(q.shape()[1]); + auto seqlen_k_rounded_expr = round_multiple(k.shape()[1]); + if (op->result(1)) { + std::vector softmax_shape{batch_size_expr, + num_heads_expr, + seqlen_q_rounded_expr, + seqlen_k_rounded_expr}; + infer_context->SetShapeOrDataForValue( + op->result(1), symbol::TensorShapeOrDataDimExprs(softmax_shape)); + } + if (op->result(2)) { + std::vector softmax_lse_shape{ + batch_size_expr, num_heads_expr, seqlen_q_rounded_expr}; + infer_context->SetShapeOrDataForValue( + op->result(2), symbol::TensorShapeOrDataDimExprs(softmax_lse_shape)); + } + if (op->result(3)) { + std::vector seed_offset_shape{symbol::DimExpr{2}}; + infer_context->SetShapeOrDataForValue( + op->result(3), symbol::TensorShapeOrDataDimExprs(out_shape)); + } + return true; +} + +bool GroupNormOpInferSymbolicShape( + pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) { + const symbol::ShapeOrDataDimExprs &x_shape = + infer_context->GetShapeOrDataForValue(op->operand_source(0)); + + infer_context->SetShapeOrDataForValue(op->result(0), x_shape); + + const symbol::DimExpr &batch_size = x_shape.shape()[0]; + int groups = op->attribute("groups").data(); + symbol::TensorShapeOrDataDimExprs mean_shape( + std::vector{batch_size, groups}); + if (op->result(1)) { + infer_context->SetShapeOrDataForValue(op->result(1), mean_shape); + } + if (op->result(2)) { + infer_context->SetShapeOrDataForValue(op->result(2), mean_shape); + } + return true; +} + +bool LerpOpInferSymbolicShape(pir::Operation *op, + pir::InferSymbolicShapeContext *infer_context) { + const auto &x_shape_or_data = + infer_context->GetShapeOrDataForValue(op->operand_source(0)); + const auto &y_shape_or_data = + infer_context->GetShapeOrDataForValue(op->operand_source(1)); + const auto &w_shape_or_data = + infer_context->GetShapeOrDataForValue(op->operand_source(2)); + const auto &x_shape = x_shape_or_data.shape(); + const auto &y_shape = y_shape_or_data.shape(); + const auto &w_shape = w_shape_or_data.shape(); + size_t x_ndims = x_shape.size(); + size_t y_ndims = y_shape.size(); + size_t w_ndims = w_shape.size(); + std::vector out1_shape; + std::vector out2_shape; + if (x_ndims > y_ndims) { + out1_shape.assign(x_shape.begin(), x_shape.end()); + } else if (x_ndims < y_ndims) { + out1_shape.assign(y_shape.begin(), y_shape.end()); + } else { + symbol::DimExprBuilder builder; + for (size_t i = 0; i < x_ndims; ++i) { + out1_shape.emplace_back(builder.Broadcast(x_shape[i], y_shape[i])); + infer_context->AddBroadcastableCstr(x_shape[i], y_shape[i]); + } + } + size_t out1_ndims = out1_shape.size(); + if (w_ndims > out1_ndims) { + out2_shape.assign(w_shape.begin(), w_shape.end()); + } else if (w_ndims < out1_ndims) { + out2_shape.assign(out1_shape.begin(), out1_shape.end()); + } else { + symbol::DimExprBuilder builder; + for (size_t i = 0; i < w_ndims; ++i) { + out2_shape.emplace_back(builder.Broadcast(w_shape[i], out1_shape[i])); + infer_context->AddBroadcastableCstr(w_shape[i], out1_shape[i]); + } + } + infer_context->SetShapeOrDataForValue( + op->result(0), + symbol::ShapeOrDataDimExprs{ + symbol::TensorShapeOrDataDimExprs(out2_shape)}); + return true; +} + +bool Lerp_OpInferSymbolicShape(pir::Operation *op, + pir::InferSymbolicShapeContext *infer_context) { + return LerpOpInferSymbolicShape(op, infer_context); +} + +bool LayerNormOpInferSymbolicShape( + pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) { + // Get the shapes of input tensors + const auto &x_shape_or_data = + infer_context->GetShapeOrDataForValue(op->operand_source(0)); + const auto &scale_shape_or_data = + infer_context->GetShapeOrDataForValue(op->operand_source(1)); + const auto &bias_shape_or_data = + infer_context->GetShapeOrDataForValue(op->operand_source(2)); + + std::vector x_dims = x_shape_or_data.shape(); + int begin_norm_axis = + op->attribute("begin_norm_axis").data(); + + // Flatten x_dims to 2D and get dim[1] + symbol::DimExpr matrix_dim_1 = x_dims[begin_norm_axis]; + for (std::size_t i = begin_norm_axis + 1; i < x_dims.size(); ++i) { + matrix_dim_1 = matrix_dim_1 * x_dims[i]; + } + + if (!scale_shape_or_data.isa()) { + std::vector scale_dims = scale_shape_or_data.shape(); + infer_context->AddEqualCstr(scale_dims[0], matrix_dim_1); + } + if (!bias_shape_or_data.isa()) { + std::vector bias_dims = bias_shape_or_data.shape(); + infer_context->AddEqualCstr(bias_dims[0], matrix_dim_1); + } + + // Set output shapes + infer_context->SetShapeOrDataForValue( + op->result(0), + symbol::ShapeOrDataDimExprs{symbol::TensorShapeOrDataDimExprs(x_dims)}); + + // Set mean and variance shapes + std::vector before_norm_dims( + x_dims.begin(), x_dims.begin() + begin_norm_axis); + infer_context->SetShapeOrDataForValue( + op->result(1), + symbol::ShapeOrDataDimExprs{ + symbol::TensorShapeOrDataDimExprs(before_norm_dims)}); + infer_context->SetShapeOrDataForValue( + op->result(2), + symbol::ShapeOrDataDimExprs{ + symbol::TensorShapeOrDataDimExprs(before_norm_dims)}); + + return true; +} + +bool LinspaceOpInferSymbolicShape( + pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) { + const auto &num_shape_or_data = + infer_context->GetShapeOrDataForValue(op->operand_source(2)); + const auto step = [&] { + symbol::DimExpr expr; + if (num_shape_or_data.data().has_value()) { + expr = num_shape_or_data.data().value()[0]; + } else { + expr = num_shape_or_data.shape()[0]; + } + return expr; + }(); + const symbol::ShapeOrDataDimExprs &shape_data = [&] { + std::vector out_dims{step}; + return symbol::ShapeOrDataDimExprs{ + symbol::TensorShapeOrDataDimExprs(out_dims)}; + }(); + infer_context->SetShapeOrDataForValue(op->result(0), shape_data); + return true; +} + +bool LinearInterpOpInferSymbolicShape( + pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) { + return BicubicInterpOpInferSymbolicShape(op, infer_context); +} + +bool LogspaceOpInferSymbolicShape( + pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) { + return LinspaceOpInferSymbolicShape(op, infer_context); +} + +bool NearestInterpOpInferSymbolicShape( + pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) { + return BicubicInterpOpInferSymbolicShape(op, infer_context); +} + +bool MemoryEfficientAttentionOpInferSymbolicShape( + pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) { + const auto &q_shape = + infer_context->GetShapeOrDataForValue(op->operand_source(0)).shape(); + const auto &k_shape = + infer_context->GetShapeOrDataForValue(op->operand_source(1)).shape(); + const auto &v_shape = + infer_context->GetShapeOrDataForValue(op->operand_source(2)).shape(); + PADDLE_ENFORCE_EQ( + q_shape.size(), + 4, + common::errors::InvalidArgument("Query should be a 4-D tensor" + "But received Query dimension(%d)", + q_shape.size())); + PADDLE_ENFORCE_EQ( + k_shape.size(), + 4, + common::errors::InvalidArgument("Key should be a 4-D tensor" + "But received Key dimension(%d)", + k_shape.size())); + PADDLE_ENFORCE_EQ( + v_shape.size(), + 4, + common::errors::InvalidArgument("Value should be a 4-D tensor" + "But received Value dimension(%d)", + v_shape.size())); + + const auto &query_batch_size = q_shape[0]; + const auto &query_seq_length = q_shape[1]; + const auto &query_num_head = q_shape[2]; + const auto &query_head_size = q_shape[3]; + + const auto &key_batch_size = k_shape[0]; + const auto &key_seq_length = k_shape[1]; + const auto &key_num_head = k_shape[2]; + const auto &key_head_size = k_shape[3]; + + const auto &value_batch_size = v_shape[0]; + const auto &value_seq_length = v_shape[1]; + const auto &value_num_head = v_shape[2]; + const auto &value_head_size = v_shape[3]; + + infer_context->AddEqualCstr(query_batch_size, key_batch_size); + infer_context->AddEqualCstr(key_batch_size, value_batch_size); + + infer_context->AddEqualCstr(query_num_head, key_num_head); + infer_context->AddEqualCstr(key_num_head, value_num_head); + + infer_context->AddEqualCstr(query_head_size, key_head_size); + + infer_context->AddEqualCstr(key_seq_length, value_seq_length); + + const std::vector out_dims{ + query_batch_size, query_seq_length, query_num_head, value_head_size}; + const std::vector logsumexp_dims{query_num_head, + query_batch_size}; + const std::vector seed_and_offset_dims{2}; + + infer_context->SetShapeOrDataForValue( + op->result(0), symbol::TensorShapeOrDataDimExprs(out_dims)); + infer_context->SetShapeOrDataForValue( + op->result(1), symbol::TensorShapeOrDataDimExprs(logsumexp_dims)); + infer_context->SetShapeOrDataForValue( + op->result(2), symbol::TensorShapeOrDataDimExprs(seed_and_offset_dims)); + + return true; +} + +bool RoiAlignOpInferSymbolicShape( + pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) { + const auto &x = op->operand_source(0); + const auto &boxes = op->operand_source(1); + + const auto &num_boxes = + infer_context->GetShapeOrDataForValue(boxes).shape()[0]; + symbol::DimExpr channel_num = + infer_context->GetShapeOrDataForValue(x).shape()[1]; + + int32_t out_h = op->attribute("pooled_height").data(); + int32_t out_w = op->attribute("pooled_width").data(); + + std::vector out_dim = {num_boxes, channel_num, out_h, out_w}; + infer_context->SetShapeOrDataForValue( + op->result(0), symbol::TensorShapeOrDataDimExprs(out_dim)); + return true; +} + +bool MeshgridOpInferSymbolicShape( + pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) { + const symbol::TensorListShapeOrDataDimExprs &shape_data_list = + infer_context->GetShapeOrDataForValue(op->operand_source(0)) + .dyn_cast(); + + const symbol::ShapeOrDataDimExprs sym_shape_dim_exprs = [&] { + symbol::TensorListShapeOrDataDimExprs shape_dim_exprs_list; + std::vector vec; + + for (auto &shape_data : shape_data_list) { + if (shape_data.shape().size() == 0) { + vec.emplace_back(1); + } else { + vec.emplace_back(shape_data.shape()[0]); + } + } + + auto shape_dim_exprs = symbol::TensorShapeOrDataDimExprs(vec); + + for (size_t i = 0; i < shape_data_list.size(); i++) { + shape_dim_exprs_list.emplace_back(shape_dim_exprs); + } + + return symbol::ShapeOrDataDimExprs(shape_dim_exprs_list); + }(); + + pir::Value res = op->result(0); + infer_context->SetShapeOrDataForValue(res, sym_shape_dim_exprs); + return true; +} + +bool StackOpInferSymbolicShape(pir::Operation *op, + pir::InferSymbolicShapeContext *infer_context) { + pir::Value operand_source = op->operand_source(0); + + const auto &attributes = op->attributes(); + int axis = attributes.at("axis").dyn_cast().data(); + const symbol::TensorListShapeOrDataDimExprs &shape_data_list = + infer_context->GetShapeOrDataForValue(operand_source) + .dyn_cast(); + + size_t rank = shape_data_list.at(0).shape().size(); + if (axis < 0) axis += rank + 1; + const symbol::ShapeOrDataDimExprs shape_data = [&] { + std::vector result_shape = {}; + std::vector result_data = {}; + const symbol::TensorShapeOrDataDimExprs &x_shape_data = + shape_data_list.at(0); + + const bool data_flag = [&] { + for (const auto &shape_data : shape_data_list) { + if (!shape_data.data().has_value()) { + return false; + } + } + return true; + }(); + + if (data_flag) { + // case 1: data is not empty, eg: shape_data_list = + // [[shape:{3},data:{S0,6,7}],...] + if (axis == 0 && x_shape_data.data().value().size() <= 1) { + for (const auto &shape_data : shape_data_list) { + result_data.emplace_back(shape_data.data().value().at(0)); + } + } else { + PADDLE_THROW(common::errors::Unimplemented( + op->name() + + " 's InferSymbolicShape can NOT deal with data size > 1 now.")); + } + result_shape.emplace_back( + static_cast(shape_data_list.size())); + } else { + // case 2: data is empty, eg: shape_data_list = + // [[shape:{5,6,7},data:{}],...] + for (size_t i = 0; i < rank; ++i) { + details::BuildCstrEqForTensorListAlongAxis( + infer_context, shape_data_list, i); + } + for (const symbol::DimExpr &dim : x_shape_data.shape()) { + result_shape.emplace_back(dim); + } + result_shape.insert(result_shape.begin() + axis, + static_cast(shape_data_list.size())); + } + + if (result_data.empty()) { + return symbol::ShapeOrDataDimExprs( + symbol::TensorShapeOrDataDimExprs(result_shape)); + } + return symbol::ShapeOrDataDimExprs( + symbol::TensorShapeOrDataDimExprs(result_shape, result_data)); + }(); + + pir::Value res = op->result(0); + infer_context->SetShapeOrDataForValue(res, shape_data); + return true; +} + +bool TrilinearInterpOpInferSymbolicShape( + pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) { + return BicubicInterpOpInferSymbolicShape(op, infer_context); +} + +bool WhereOpInferSymbolicShape(pir::Operation *op, + pir::InferSymbolicShapeContext *infer_context) { + infer_context->SetShapeOrDataForValue( + op->result(0), + infer_context->GetShapeOrDataForValue(op->operand_source(0))); + + const std::vector &operands = {op->operand_source(0), + op->operand_source(1)}; + + size_t rank = infer_context->GetShapeOrDataForValue(op->operand_source(0)) + .shape() + .size(); + + for (size_t i = 0; i < rank; ++i) { + paddle::dialect::details::BuildCstrEqForTensorListAlongAxis( + infer_context, operands, i); + } + + return true; +} + +bool Where_OpInferSymbolicShape(pir::Operation *op, + pir::InferSymbolicShapeContext *infer_context) { + return WhereOpInferSymbolicShape(op, infer_context); +} + +bool YoloLossOpInferSymbolicShape( + pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) { + const auto &x_shape = + infer_context->GetShapeOrDataForValue(op->operand_source(0)).shape(); + const auto &box_shape = + infer_context->GetShapeOrDataForValue(op->operand_source(1)).shape(); + const auto &label_shape = + infer_context->GetShapeOrDataForValue(op->operand_source(2)).shape(); + const std::vector &anchors_mask = + paddle::dialect::details::GetVectorAttr(op, "anchor_mask"); + int mask_num = anchors_mask.size(); + int class_num = op->attribute("class_num").data(); + + PADDLE_ENFORCE_EQ(x_shape.size(), + 4, + phi::errors::InvalidArgument( + "Input(X) should be a 4-D tensor. But received " + "X dimension size(%s)", + x_shape.size())); + PADDLE_ENFORCE_EQ( + box_shape.size(), + 3, + phi::errors::InvalidArgument("Input(GTBox) should be a 3-D tensor, but " + "received gtbox dimension size(%s)", + box_shape.size())); + PADDLE_ENFORCE_EQ(label_shape.size(), + 2, + phi::errors::InvalidArgument( + "Input(GTLabel) should be a 2-D tensor," + "But received Input(GTLabel) dimension size(%s) != 2.", + label_shape.size())); + infer_context->AddEqualCstr(box_shape[2], symbol::DimExpr(4)); + infer_context->AddEqualCstr(x_shape[2], x_shape[3]); + infer_context->AddEqualCstr(x_shape[1], + symbol::DimExpr(mask_num * (5 + class_num))); + infer_context->AddEqualCstr(label_shape[0], box_shape[0]); + infer_context->AddEqualCstr(label_shape[1], box_shape[1]); + + if (op->operand_source(3) != nullptr) { + const auto &score_shape = + infer_context->GetShapeOrDataForValue(op->operand_source(3)).shape(); + PADDLE_ENFORCE_EQ( + score_shape.size(), + 2, + phi::errors::InvalidArgument("Input(GTScore) should be a 2-D tensor" + "But received GTScore dimension(%s)", + box_shape.size())); + infer_context->AddEqualCstr(score_shape[0], box_shape[0]); + infer_context->AddEqualCstr(score_shape[1], box_shape[1]); + } + + std::vector out_shape = {x_shape[0]}; + infer_context->SetShapeOrDataForValue( + op->result(0), + symbol::ShapeOrDataDimExprs{ + symbol::TensorShapeOrDataDimExprs(out_shape)}); + + std::vector obj_mask_shape = { + x_shape[0], symbol::DimExpr(mask_num), x_shape[2], x_shape[3]}; + infer_context->SetShapeOrDataForValue( + op->result(1), + symbol::ShapeOrDataDimExprs{ + symbol::TensorShapeOrDataDimExprs(obj_mask_shape)}); + + std::vector match_mask_shape = {box_shape[0], box_shape[1]}; + infer_context->SetShapeOrDataForValue( + op->result(2), + symbol::ShapeOrDataDimExprs{ + symbol::TensorShapeOrDataDimExprs(match_mask_shape)}); + + return true; +} + +bool FakeChannelWiseDequantizeMaxAbsOpInferSymbolicShape( + pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) { + const auto &x_shape_or_data = + infer_context->GetShapeOrDataForValue(op->operand_source(0)); + + int quant_axis = op->attribute("quant_axis").data(); + int x_num_col_dims = + op->attribute("x_num_col_dims").data(); + + PADDLE_ENFORCE_EQ( + quant_axis == 0 || quant_axis == 1, + true, + common::errors::InvalidArgument("'quant_axis' should be 0 or 1, but " + "the received is %d", + quant_axis)); + PADDLE_ENFORCE_EQ(x_num_col_dims == 0, + false, + common::errors::InvalidArgument( + "'x_num_col_dims' should be larger than 0, but " + "the received is %d", + x_num_col_dims)); + + infer_context->SetShapeOrDataForValue( + op->result(0), + symbol::ShapeOrDataDimExprs{ + symbol::TensorShapeOrDataDimExprs(x_shape_or_data.shape())}); + + return true; +} + +} // namespace paddle::dialect diff --git a/paddle/fluid/pir/dialect/operator/interface/infer_symbolic_shape/.ipynb_checkpoints/same_operands_result-checkpoint.cc b/paddle/fluid/pir/dialect/operator/interface/infer_symbolic_shape/.ipynb_checkpoints/same_operands_result-checkpoint.cc new file mode 100644 index 0000000000000..3eb6b62bdc1fd --- /dev/null +++ b/paddle/fluid/pir/dialect/operator/interface/infer_symbolic_shape/.ipynb_checkpoints/same_operands_result-checkpoint.cc @@ -0,0 +1,214 @@ +// Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/fluid/pir/dialect/operator/interface/infer_symbolic_shape/same_operands_result.h" + +#define OP_SAME_OPERANDS_AND_RESULT(name) \ + bool name##OpInferSymbolicShape( \ + pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) { \ + const auto &operand_shape = \ + infer_context->GetShapeOrDataForValue(op->operand_source(0)).shape(); \ + infer_context->SetShapeOrDataForValue( \ + op->result(0), \ + symbol::ShapeOrDataDimExprs{ \ + symbol::TensorShapeOrDataDimExprs(operand_shape)}); \ + return true; \ + } + +namespace paddle::dialect { + +OP_SAME_OPERANDS_AND_RESULT(Abs) +OP_SAME_OPERANDS_AND_RESULT(Abs_) +OP_SAME_OPERANDS_AND_RESULT(Acos) +OP_SAME_OPERANDS_AND_RESULT(Acos_) +OP_SAME_OPERANDS_AND_RESULT(Acosh) +OP_SAME_OPERANDS_AND_RESULT(Acosh_) +OP_SAME_OPERANDS_AND_RESULT(Angle) +OP_SAME_OPERANDS_AND_RESULT(Asin) +OP_SAME_OPERANDS_AND_RESULT(Asin_) +OP_SAME_OPERANDS_AND_RESULT(Asinh) +OP_SAME_OPERANDS_AND_RESULT(Asinh_) +OP_SAME_OPERANDS_AND_RESULT(Atan) +OP_SAME_OPERANDS_AND_RESULT(Atan_) +OP_SAME_OPERANDS_AND_RESULT(Atanh) +OP_SAME_OPERANDS_AND_RESULT(Atanh_) +OP_SAME_OPERANDS_AND_RESULT(Bernoulli) +OP_SAME_OPERANDS_AND_RESULT(BitwiseNot) +OP_SAME_OPERANDS_AND_RESULT(BitwiseNot_) +OP_SAME_OPERANDS_AND_RESULT(Ceil) +OP_SAME_OPERANDS_AND_RESULT(Ceil_) +OP_SAME_OPERANDS_AND_RESULT(Celu) +OP_SAME_OPERANDS_AND_RESULT(Clip) +OP_SAME_OPERANDS_AND_RESULT(Clip_) +OP_SAME_OPERANDS_AND_RESULT(Conj) +OP_SAME_OPERANDS_AND_RESULT(CopyTo) +OP_SAME_OPERANDS_AND_RESULT(Cos) +OP_SAME_OPERANDS_AND_RESULT(Cos_) +OP_SAME_OPERANDS_AND_RESULT(Cosh) +OP_SAME_OPERANDS_AND_RESULT(Cosh_) +OP_SAME_OPERANDS_AND_RESULT(DequantizeLog) +OP_SAME_OPERANDS_AND_RESULT(Digamma) +OP_SAME_OPERANDS_AND_RESULT(Digamma_) +OP_SAME_OPERANDS_AND_RESULT(Dirichlet) +OP_SAME_OPERANDS_AND_RESULT(EmptyLike) +OP_SAME_OPERANDS_AND_RESULT(Erf) +OP_SAME_OPERANDS_AND_RESULT(Erf_) +OP_SAME_OPERANDS_AND_RESULT(Erfinv) +OP_SAME_OPERANDS_AND_RESULT(Erfinv_) +OP_SAME_OPERANDS_AND_RESULT(Exp) +OP_SAME_OPERANDS_AND_RESULT(Exp_) +OP_SAME_OPERANDS_AND_RESULT(Expm1) +OP_SAME_OPERANDS_AND_RESULT(Expm1_) +OP_SAME_OPERANDS_AND_RESULT(Exponential_) +OP_SAME_OPERANDS_AND_RESULT(Fill) +OP_SAME_OPERANDS_AND_RESULT(Fill_) +OP_SAME_OPERANDS_AND_RESULT(Fetch) +OP_SAME_OPERANDS_AND_RESULT(Flip) +OP_SAME_OPERANDS_AND_RESULT(Floor) +OP_SAME_OPERANDS_AND_RESULT(Floor_) +OP_SAME_OPERANDS_AND_RESULT(FullLike) +OP_SAME_OPERANDS_AND_RESULT(Imag) +OP_SAME_OPERANDS_AND_RESULT(Increment) +OP_SAME_OPERANDS_AND_RESULT(Increment_) +OP_SAME_OPERANDS_AND_RESULT(Isfinite) +OP_SAME_OPERANDS_AND_RESULT(IsfiniteSr) +OP_SAME_OPERANDS_AND_RESULT(Isinf) +OP_SAME_OPERANDS_AND_RESULT(IsinfSr) +OP_SAME_OPERANDS_AND_RESULT(Isnan) +OP_SAME_OPERANDS_AND_RESULT(IsnanSr) +OP_SAME_OPERANDS_AND_RESULT(I0) +OP_SAME_OPERANDS_AND_RESULT(I0_) +OP_SAME_OPERANDS_AND_RESULT(I0e) +OP_SAME_OPERANDS_AND_RESULT(I1) +OP_SAME_OPERANDS_AND_RESULT(I1e) +OP_SAME_OPERANDS_AND_RESULT(Lgamma) +OP_SAME_OPERANDS_AND_RESULT(Lgamma_) +OP_SAME_OPERANDS_AND_RESULT(Log1p) +OP_SAME_OPERANDS_AND_RESULT(Log1p_) +OP_SAME_OPERANDS_AND_RESULT(Log) +OP_SAME_OPERANDS_AND_RESULT(Log_) +OP_SAME_OPERANDS_AND_RESULT(LogicalNot) +OP_SAME_OPERANDS_AND_RESULT(LogicalNot_) +OP_SAME_OPERANDS_AND_RESULT(Logit) +OP_SAME_OPERANDS_AND_RESULT(Logit_) +OP_SAME_OPERANDS_AND_RESULT(Logsigmoid) +OP_SAME_OPERANDS_AND_RESULT(Logsigmoid_) +OP_SAME_OPERANDS_AND_RESULT(Pow) +OP_SAME_OPERANDS_AND_RESULT(Poisson) +OP_SAME_OPERANDS_AND_RESULT(Pow_) +OP_SAME_OPERANDS_AND_RESULT(Prelu) +OP_SAME_OPERANDS_AND_RESULT(Print) +OP_SAME_OPERANDS_AND_RESULT(PutAlongAxis) +OP_SAME_OPERANDS_AND_RESULT(PutAlongAxis_) +OP_SAME_OPERANDS_AND_RESULT(Real) +OP_SAME_OPERANDS_AND_RESULT(Reciprocal) +OP_SAME_OPERANDS_AND_RESULT(Reciprocal_) +OP_SAME_OPERANDS_AND_RESULT(Relu) +OP_SAME_OPERANDS_AND_RESULT(Relu6) +OP_SAME_OPERANDS_AND_RESULT(Relu_) +OP_SAME_OPERANDS_AND_RESULT(Reverse) +OP_SAME_OPERANDS_AND_RESULT(Roll) +OP_SAME_OPERANDS_AND_RESULT(Round) +OP_SAME_OPERANDS_AND_RESULT(Round_) +OP_SAME_OPERANDS_AND_RESULT(RowConv) +OP_SAME_OPERANDS_AND_RESULT(Rsqrt) +OP_SAME_OPERANDS_AND_RESULT(Rsqrt_) +OP_SAME_OPERANDS_AND_RESULT(ScaleSr) +OP_SAME_OPERANDS_AND_RESULT(ScaleSr_) +OP_SAME_OPERANDS_AND_RESULT(Scale_) +OP_SAME_OPERANDS_AND_RESULT(ScatterNdAdd) +OP_SAME_OPERANDS_AND_RESULT(Scatter) +OP_SAME_OPERANDS_AND_RESULT(Scatter_) +OP_SAME_OPERANDS_AND_RESULT(Select) +OP_SAME_OPERANDS_AND_RESULT(Sign) +OP_SAME_OPERANDS_AND_RESULT(Sin) +OP_SAME_OPERANDS_AND_RESULT(Sin_) +OP_SAME_OPERANDS_AND_RESULT(Sinh) +OP_SAME_OPERANDS_AND_RESULT(Sinh_) +OP_SAME_OPERANDS_AND_RESULT(Softmax) +OP_SAME_OPERANDS_AND_RESULT(Softmax_) +OP_SAME_OPERANDS_AND_RESULT(Swish) +OP_SAME_OPERANDS_AND_RESULT(Tan) +OP_SAME_OPERANDS_AND_RESULT(Tan_) +OP_SAME_OPERANDS_AND_RESULT(Tanh) +OP_SAME_OPERANDS_AND_RESULT(Tanh_) +OP_SAME_OPERANDS_AND_RESULT(Tril) +OP_SAME_OPERANDS_AND_RESULT(Tril_) +OP_SAME_OPERANDS_AND_RESULT(Triu) +OP_SAME_OPERANDS_AND_RESULT(Triu_) +OP_SAME_OPERANDS_AND_RESULT(Trunc) +OP_SAME_OPERANDS_AND_RESULT(Trunc_) +OP_SAME_OPERANDS_AND_RESULT(Sigmoid) +OP_SAME_OPERANDS_AND_RESULT(Sigmoid_) +OP_SAME_OPERANDS_AND_RESULT(LeakyRelu) +OP_SAME_OPERANDS_AND_RESULT(LeakyRelu_) +OP_SAME_OPERANDS_AND_RESULT(ThresholdedRelu) +OP_SAME_OPERANDS_AND_RESULT(ThresholdedRelu_) +OP_SAME_OPERANDS_AND_RESULT(SquareSr) +OP_SAME_OPERANDS_AND_RESULT(Square) +OP_SAME_OPERANDS_AND_RESULT(Polygamma) +OP_SAME_OPERANDS_AND_RESULT(Polygamma_) +OP_SAME_OPERANDS_AND_RESULT(EnableCheckModelNanInf) +OP_SAME_OPERANDS_AND_RESULT(ViewShape) + +bool ScaleOpInferSymbolicShape(pir::Operation *op, + pir::InferSymbolicShapeContext *infer_context) { + pir::Value operand_source = op->operand_source(0); + const symbol::ShapeOrDataDimExprs &operand_shape_or_data = + infer_context->GetShapeOrDataForValue(operand_source); + std::vector shape(operand_shape_or_data.shape()); + + if (operand_shape_or_data.data()) { + const std::vector data = [&] { + const symbol::DimExpr scale = [&]() -> symbol::DimExpr { + if (op->num_operands() == 2) { + return infer_context->GetShapeOrDataForValue(op->operand_source(1)) + .data() + ->at(0); + } + return static_cast( + op->attribute("scale").dyn_cast().data()); + }(); + int bias = op->attribute("bias").dyn_cast().data(); + + std::vector data; + for (auto &val : *(operand_shape_or_data.data())) { + data.push_back(val * scale + bias); + } + return data; + }(); + + infer_context->SetShapeOrDataForValue( + op->result(0), symbol::TensorShapeOrDataDimExprs(shape, data)); + } else { + infer_context->SetShapeOrDataForValue(op->result(0), operand_shape_or_data); + } + + return true; +} + +bool ArgsortOpInferSymbolicShape( + pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) { + const symbol::ShapeOrDataDimExprs &operand_shape_or_data = + infer_context->GetShapeOrDataForValue(op->operand_source(0)); + infer_context->SetShapeOrDataForValue(op->result(0), operand_shape_or_data); + infer_context->SetShapeOrDataForValue(op->result(1), operand_shape_or_data); + return true; +} + +} // namespace paddle::dialect + +namespace cinn::dialect {} // namespace cinn::dialect + +#undef OP_SAME_OPERANDS_AND_RESULT diff --git a/paddle/fluid/pir/dialect/operator/interface/infer_symbolic_shape/.ipynb_checkpoints/same_operands_result-checkpoint.h b/paddle/fluid/pir/dialect/operator/interface/infer_symbolic_shape/.ipynb_checkpoints/same_operands_result-checkpoint.h new file mode 100644 index 0000000000000..2e84c7297643f --- /dev/null +++ b/paddle/fluid/pir/dialect/operator/interface/infer_symbolic_shape/.ipynb_checkpoints/same_operands_result-checkpoint.h @@ -0,0 +1,164 @@ +// Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include "paddle/pir/include/dialect/shape/utils/shape_analysis.h" + +namespace paddle::dialect { +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Abs) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Abs_) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Acos) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Acos_) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Acosh) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Acosh_) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Angle) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Argsort) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Asin) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Asin_) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Asinh) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Asinh_) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Assign) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Assign_) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Atan) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Atan_) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Atanh) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Atanh_) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Bernoulli) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(BitwiseNot) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(BitwiseNot_) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Ceil) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Ceil_) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Celu) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Clip) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Clip_) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Conj) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(CopyTo) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Cos) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Cos_) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Cosh) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Cosh_) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(DequantizeLog) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Digamma) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Digamma_) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Dirichlet) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(EmptyLike) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Erf) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Erf_) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Erfinv) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Erfinv_) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Exp) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Exp_) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Expm1) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Expm1_) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Exponential_) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Fetch) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Fill) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Fill_) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Flip) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Floor) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Floor_) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(FullLike) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Imag) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Increment) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Increment_) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Isfinite) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(IsfiniteSr) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Isinf) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(IsinfSr) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Isnan) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(IsnanSr) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(I0) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(I0_) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(I0e) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(I1) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(I1e) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Lgamma) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Lgamma_) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Log1p) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Log1p_) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Log) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Log_) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(LogicalNot) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(LogicalNot_) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Logit) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Logit_) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Logsigmoid) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Logsigmoid_) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Poisson) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Pow) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Pow_) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Prelu) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Print) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(PutAlongAxis) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(PutAlongAxis_) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Real) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Reciprocal) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Reciprocal_) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Relu) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Relu6) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Relu_) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Reverse) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Roll) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Round) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Round_) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(RowConv) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Rsqrt) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Rsqrt_) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Scale) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(ScaleSr) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(ScaleSr_) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Scale_) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(ScatterNdAdd) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Scatter) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Scatter_) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Select) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Sign) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Sin) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Sin_) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Sinh) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Sinh_) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Softmax) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Softmax_) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Swish) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Tan) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Tan_) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Tanh) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Tanh_) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Tril) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Tril_) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Triu) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Triu_) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Trunc) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Trunc_) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Sigmoid) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Sigmoid_) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(LeakyRelu) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(LeakyRelu_) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(ThresholdedRelu) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(ThresholdedRelu_) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(SquareSr) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Square) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Polygamma) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Polygamma_) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(EnableCheckModelNanInf) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(ViewShape) + +} // namespace paddle::dialect + +namespace cinn::dialect { +using paddle::dialect::ReverseOpInferSymbolicShape; +using paddle::dialect::ScaleOpInferSymbolicShape; +using paddle::dialect::SelectOpInferSymbolicShape; +} // namespace cinn::dialect diff --git a/paddle/fluid/pir/dialect/operator/interface/infer_symbolic_shape/.ipynb_checkpoints/unary_infer_sym-checkpoint.cc b/paddle/fluid/pir/dialect/operator/interface/infer_symbolic_shape/.ipynb_checkpoints/unary_infer_sym-checkpoint.cc new file mode 100644 index 0000000000000..dace8ed3286d2 --- /dev/null +++ b/paddle/fluid/pir/dialect/operator/interface/infer_symbolic_shape/.ipynb_checkpoints/unary_infer_sym-checkpoint.cc @@ -0,0 +1,2157 @@ +// Copyright (c) 2024 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/fluid/pir/dialect/operator/interface/infer_symbolic_shape/infer_sym_slice_utils.h" +#include "paddle/fluid/pir/dialect/operator/interface/infer_symbolic_shape/infer_sym_utils.h" +#include "paddle/fluid/pir/dialect/operator/interface/infer_symbolic_shape/unary_infer_sym.h" + +namespace { +std::vector GetRealPadding( + const std::vector &origin_paddings, + const bool global_pooling, + const bool adaptive, + const std::string padding_algorithm, + const std::vector data_dims, + const std::vector &strides, + const std::vector &kernel_size) { + const auto &GetInitPadding = [&]() -> std::vector { + std::vector res; + // set padding size == data_dims.size() * 2 + if (origin_paddings.size() == data_dims.size()) { + for (std::size_t i = 0; i < origin_paddings.size(); ++i) { + res.emplace_back(symbol::DimExpr{origin_paddings.at(i)}); + res.emplace_back(symbol::DimExpr{origin_paddings.at(i)}); + } + } else { + PADDLE_ENFORCE_EQ( + data_dims.size() * 2, + origin_paddings.size(), + phi::errors::InvalidArgument( + "Paddings size %d should be the same or twice as the " + "pooling size %d.", + origin_paddings.size(), + data_dims.size() * 2)); + for (std::size_t i = 0; i < origin_paddings.size(); ++i) { + res.emplace_back(symbol::DimExpr{origin_paddings.at(i)}); + } + } + return res; + }; + + std::vector real_padding = GetInitPadding(); + + const auto &UpdataPadding = [&]() { + symbol::DimExpr one_dimexpr{1}; + symbol::DimExpr zero_dimexpr{0}; + // when padding_algorithm is "VALID" or "SAME" + if (padding_algorithm == "SAME") { + for (std::size_t i = 0; i < data_dims.size(); ++i) { + symbol::DimExpr stride_dimexpr = symbol::DimExpr{strides[i]}; + + symbol::DimExpr out_size = + (data_dims[i] + stride_dimexpr - one_dimexpr) / stride_dimexpr; + symbol::DimExprBuilder builder; + symbol::DimExpr pad_sum = + builder.Max((out_size - one_dimexpr) * stride_dimexpr + + kernel_size[i] - data_dims[i], + zero_dimexpr); + symbol::DimExpr pad_0 = pad_sum / symbol::DimExpr{2}; + symbol::DimExpr pad_1 = pad_sum - pad_0; + real_padding[i * 2] = pad_0; + real_padding[i * 2 + 1] = pad_1; + } + } else if (padding_algorithm == "VALID") { + real_padding.assign(real_padding.size(), zero_dimexpr); + } + + // if global_pooling == true or adaptive == true, padding will be ignore + if (global_pooling || adaptive) { + real_padding.assign(real_padding.size(), zero_dimexpr); + } + }; + + UpdataPadding(); + return real_padding; +} + +symbol::ShapeOrDataDimExprs Pool2dRawInferSymbolicShape( + pir::Operation *op, + const std::vector &kernel_size, + pir::InferSymbolicShapeContext *infer_context) { + const auto &x_shape_or_data = + infer_context->GetShapeOrDataForValue(op->operand_source(0)); + + const auto &x_dims = x_shape_or_data.shape(); + PADDLE_ENFORCE_EQ( + x_dims.size() == 4 || x_dims.size() == 5, + true, + phi::errors::InvalidArgument( + "the input of Op(pool) should be 4-D or 5-D Tensor. But " + "received: %u-D Tensor.", + x_dims.size())); + + PADDLE_ENFORCE_EQ(x_dims.size() - kernel_size.size(), + 2U, + phi::errors::InvalidArgument( + "the rank of input minus the size of kernel_size " + "must be equal to 2 in Op(pool). " + "But received: the rank of input is %d and the " + "rank of kernel_size is %d.", + x_dims.size(), + kernel_size.size())); + + std::vector strides = [&]() { + std::vector res; + const auto &stride_attr = + op->attributes().at("strides").dyn_cast(); + for (size_t i = 0; i < stride_attr.size(); i++) { + res.emplace_back( + stride_attr.at(i).dyn_cast().data()); + } + return res; + }(); + + PADDLE_ENFORCE_EQ( + kernel_size.size(), + strides.size(), + phi::errors::InvalidArgument( + "the rank of kernel_size and strides in Op(pool) must be equal. " + "But received: the rank of kernel_size is %d and the rank of stride " + "is %d.", + kernel_size.size(), + strides.size())); + + const std::string &data_format = + op->attribute("data_format").AsString(); + const bool channel_last = data_format == "NHWC" || data_format == "NDHWC"; + + const auto &data_dims = [&]() -> std::vector { + if (channel_last) { + return std::vector(x_dims.begin() + 1, x_dims.end() - 1); + } else { + return std::vector(x_dims.begin() + 2, x_dims.end()); + } + }(); + + bool global_pooling = + op->attribute("global_pooling").data(); + bool adaptive = op->attribute("adaptive").data(); + std::string padding_algorithm = + op->attribute("padding_algorithm").AsString(); + + const auto &real_paddings = [&]() -> std::vector { + std::vector paddings; + const auto &padding_attr = + op->attributes().at("paddings").dyn_cast(); + for (size_t i = 0; i < padding_attr.size(); i++) { + paddings.emplace_back( + padding_attr.at(i).dyn_cast().data()); + } + return GetRealPadding(paddings, + global_pooling, + adaptive, + padding_algorithm, + data_dims, + strides, + kernel_size + + ); + }(); + + const auto &real_kernel_size = [&]() -> std::vector { + if (global_pooling) { + return data_dims; + } + return kernel_size; + }(); + + const auto &output_shape_or_data = [&]() -> symbol::ShapeOrDataDimExprs { + std::vector output_shape; + bool ceil_mode = op->attribute("ceil_mode").data(); + if (adaptive) { + output_shape.insert( + output_shape.end(), real_kernel_size.begin(), real_kernel_size.end()); + } else { + for (size_t i = 0; i < data_dims.size(); ++i) { + symbol::DimExpr stride_dimexpr{strides[i]}; + symbol::DimExpr one_dimexpr{1}; + if (!ceil_mode) { + output_shape.emplace_back((data_dims[i] - real_kernel_size[i] + + real_paddings[2 * i] + + real_paddings[2 * i + 1]) / + stride_dimexpr + + one_dimexpr); + } else { + output_shape.emplace_back( + (data_dims[i] - real_kernel_size[i] + real_paddings[2 * i] + + real_paddings[2 * i + 1] + stride_dimexpr - one_dimexpr) / + stride_dimexpr + + one_dimexpr); + } + } + } + + // output_N = input_N + output_shape.insert(output_shape.begin(), x_dims[0]); + // output_C = input_C + if (channel_last) { + output_shape.push_back(x_dims[x_dims.size() - 1]); + } else { + output_shape.insert(output_shape.begin() + 1, x_dims[1]); + } + return symbol::ShapeOrDataDimExprs{ + symbol::TensorShapeOrDataDimExprs(output_shape)}; + }(); + + return output_shape_or_data; +} +} // namespace + +namespace paddle::dialect { +using paddle::dialect::details::CreateShapeOrDataForXShape; + +bool AllOpInferSymbolicShape(pir::Operation *op, + pir::InferSymbolicShapeContext *infer_context) { + const auto &axis = details::GetVectorAttr(op, "axis"); + return details::ReduceInferDim(op, + infer_context, + axis, + GetBoolAttr(op, "keepdim"), /*keepdim*/ + axis.size() == 0 /*reduce_all*/); +} + +bool AmaxOpInferSymbolicShape(pir::Operation *op, + pir::InferSymbolicShapeContext *infer_context) { + const auto &axis = details::GetVectorAttr(op, "axis"); + return details::ReduceInferDim(op, + infer_context, + axis, + GetBoolAttr(op, "keepdim"), /*keepdim*/ + axis.size() == 0 /*reduce_all*/); +} + +bool AminOpInferSymbolicShape(pir::Operation *op, + pir::InferSymbolicShapeContext *infer_context) { + const auto &axis = details::GetVectorAttr(op, "axis"); + return details::ReduceInferDim(op, + infer_context, + axis, + GetBoolAttr(op, "keepdim"), /*keepdim*/ + axis.size() == 0 /*reduce_all*/); +} + +bool AnyOpInferSymbolicShape(pir::Operation *op, + pir::InferSymbolicShapeContext *infer_context) { + const auto &axis = details::GetVectorAttr(op, "axis"); + return details::ReduceInferDim(op, + infer_context, + axis, + GetBoolAttr(op, "keepdim"), /*keepdim*/ + axis.size() == 0 /*reduce_all*/); +} + +bool ArgmaxOpInferSymbolicShape(pir::Operation *op, + pir::InferSymbolicShapeContext *infer_context) { + bool flatten = GetBoolAttr(op, "flatten"); + bool keepdims = GetBoolAttr(op, "keepdims"); + + const auto &input_sym_shape = + infer_context->GetShapeOrDataForValue(op->operand_source(0)).shape(); + int rank = input_sym_shape.size(); + + const auto &axis_shape_or_data = + infer_context->GetShapeOrDataForValue(op->operand_source(1)); + int axis = + static_cast(axis_shape_or_data.data().value().at(0).Get()); + if (axis < 0) axis += rank; + + const auto &out_sym_shape = [&] { + std::vector out_sym_shape; + if (flatten) { + if (keepdims) { + out_sym_shape.emplace_back(std::int64_t(rank)); + } else { + out_sym_shape.emplace_back(std::int64_t(0)); + } + } else { + for (int i = 0; i < axis; i++) { + out_sym_shape.emplace_back(input_sym_shape.at(i)); + } + if (keepdims) { + out_sym_shape.emplace_back(std::int64_t(1)); + } + + for (int i = axis + 1; i < rank; i++) { + out_sym_shape.emplace_back(input_sym_shape.at(i)); + } + } + return out_sym_shape; + }(); + + symbol::ShapeOrDataDimExprs shape_data{ + symbol::TensorShapeOrDataDimExprs(out_sym_shape)}; + + infer_context->SetShapeOrDataForValue(op->result(0), shape_data); + return true; +} + +bool ArgminOpInferSymbolicShape(pir::Operation *op, + pir::InferSymbolicShapeContext *infer_context) { + return ArgmaxOpInferSymbolicShape(op, infer_context); +} + +bool AsComplexOpInferSymbolicShape( + pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) { + pir::Value operand_source = op->operand_source(0); + const symbol::ShapeOrDataDimExprs &operand_shape_or_data = + infer_context->GetShapeOrDataForValue(operand_source); + + const std::vector out_dims = [&] { + std::vector out_dims = operand_shape_or_data.shape(); + out_dims.pop_back(); + return out_dims; + }(); + + symbol::ShapeOrDataDimExprs shape_data{ + symbol::TensorShapeOrDataDimExprs(out_dims)}; + + infer_context->SetShapeOrDataForValue(op->result(0), shape_data); + return true; +} +bool AsRealOpInferSymbolicShape(pir::Operation *op, + pir::InferSymbolicShapeContext *infer_context) { + pir::Value operand_source = op->operand_source(0); + const symbol::ShapeOrDataDimExprs &operand_shape_or_data = + infer_context->GetShapeOrDataForValue(operand_source); + + const std::vector out_dims = [&] { + std::vector out_dims = operand_shape_or_data.shape(); + out_dims.push_back(symbol::DimExpr(2)); + return out_dims; + }(); + + symbol::ShapeOrDataDimExprs shape_data{ + symbol::TensorShapeOrDataDimExprs(out_dims)}; + + infer_context->SetShapeOrDataForValue(op->result(0), shape_data); + return true; +} + +bool AssignOpInferSymbolicShape(pir::Operation *op, + pir::InferSymbolicShapeContext *infer_context) { + infer_context->SetShapeOrDataForValue( + op->result(0), + infer_context->GetShapeOrDataForValue(op->operand_source(0))); + return true; +} + +bool Assign_OpInferSymbolicShape( + pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) { + return AssignOpInferSymbolicShape(op, infer_context); +} + +bool AsStridedOpInferSymbolicShape( + pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) { + const std::vector &shape = + paddle::dialect::details::GetVectorAttr(op, "dims"); + + int rank = shape.size(); + std::vector out_shape; + for (int i = 0; i < rank; ++i) { + out_shape.push_back(symbol::DimExpr(shape[i])); + } + + infer_context->SetShapeOrDataForValue( + op->result(0), + symbol::ShapeOrDataDimExprs{ + symbol::TensorShapeOrDataDimExprs(out_shape)}); + + return true; +} + +bool BipartiteMatchOpInferSymbolicShape( + pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) { + const auto &dist_mat_shape_or_data = + infer_context->GetShapeOrDataForValue(op->operand_source(0)); + const auto &dims = dist_mat_shape_or_data.shape(); + + PADDLE_ENFORCE_EQ( + dims.size(), + 2, + phi::errors::InvalidArgument("The rank of Input(DistMat) must be 2.")); + + infer_context->SetShapeOrDataForValue(op->result(0), dist_mat_shape_or_data); + + infer_context->SetShapeOrDataForValue(op->result(1), dist_mat_shape_or_data); + + return true; +} + +bool CastOpInferSymbolicShape(pir::Operation *op, + pir::InferSymbolicShapeContext *infer_context) { + infer_context->SetShapeOrDataForValue( + op->result(0), + infer_context->GetShapeOrDataForValue(op->operand_source(0))); + return true; +} + +bool Cast_OpInferSymbolicShape(pir::Operation *op, + pir::InferSymbolicShapeContext *infer_context) { + return CastOpInferSymbolicShape(op, infer_context); +} + +bool CholeskyOpInferSymbolicShape( + pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) { + const auto &x_shape = + infer_context->GetShapeOrDataForValue(op->operand_source(0)); + + auto rank = x_shape.shape().size(); + PADDLE_ENFORCE_GE(rank, + 2, + common::errors::InvalidArgument( + "The Input(X) should have at least 2 dimensions. But " + "received a %d dimension tensor.", + rank)); + + infer_context->AddEqualCstr(x_shape.shape()[rank - 2], + x_shape.shape()[rank - 1]); + + infer_context->SetShapeOrDataForValue(op->result(0), x_shape); + + return true; +} + +bool ClipByNormOpInferSymbolicShape( + pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) { + const auto &input_shape = + infer_context->GetShapeOrDataForValue(op->operand_source(0)); + float max_norm = op->attribute("max_norm").data(); + PADDLE_ENFORCE_GT( + max_norm, + 0, + phi::errors::InvalidArgument("max_norm should be greater than 0. " + "Received max_norm is %f.", + max_norm)); + + infer_context->SetShapeOrDataForValue(op->result(0), input_shape); + return true; +} + +bool ClipByNormSrOpInferSymbolicShape( + pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) { + return ClipByNormOpInferSymbolicShape(op, infer_context); +} + +bool CummaxOpInferSymbolicShape(pir::Operation *op, + pir::InferSymbolicShapeContext *infer_context) { + pir::Value operand_source = op->operand_source(0); + const symbol::ShapeOrDataDimExprs &operand_shape_or_data = + infer_context->GetShapeOrDataForValue(operand_source); + + infer_context->SetShapeOrDataForValue(op->result(0), operand_shape_or_data); + infer_context->SetShapeOrDataForValue(op->result(1), operand_shape_or_data); + return true; +} +bool CumminOpInferSymbolicShape(pir::Operation *op, + pir::InferSymbolicShapeContext *infer_context) { + return CummaxOpInferSymbolicShape(op, infer_context); +} +bool CumprodOpInferSymbolicShape( + pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) { + pir::Value operand_source = op->operand_source(0); + const symbol::ShapeOrDataDimExprs &operand_shape_or_data = + infer_context->GetShapeOrDataForValue(operand_source); + infer_context->SetShapeOrDataForValue(op->result(0), operand_shape_or_data); + return true; +} +bool Cumprod_OpInferSymbolicShape( + pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) { + return CumprodOpInferSymbolicShape(op, infer_context); +} +bool CumsumOpInferSymbolicShape(pir::Operation *op, + pir::InferSymbolicShapeContext *infer_context) { + pir::Value operand_source = op->operand_source(0); + + const symbol::ShapeOrDataDimExprs &operand_shape_or_data = + infer_context->GetShapeOrDataForValue(operand_source); + + bool flatten = GetBoolAttr(op, "flatten"); + if (flatten) { + symbol::DimExpr product{1}; + const auto &dim_exprs = operand_shape_or_data.shape(); + for (const auto &dim_expr : dim_exprs) { + product = product * dim_expr; + } + const std::vector out_dims = {product}; + symbol::ShapeOrDataDimExprs shape_data{ + symbol::TensorShapeOrDataDimExprs(out_dims)}; + infer_context->SetShapeOrDataForValue(op->result(0), shape_data); + + } else { + infer_context->SetShapeOrDataForValue(op->result(0), operand_shape_or_data); + } + return true; +} +bool Cumsum_OpInferSymbolicShape( + pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) { + return CumsumOpInferSymbolicShape(op, infer_context); +} +bool ChannelShuffleOpInferSymbolicShape( + pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) { + const auto &x_shape_or_data = + infer_context->GetShapeOrDataForValue(op->operand_source(0)); + const std::vector &input_dims = x_shape_or_data.shape(); + + int groups = op->attribute("groups").data(); + std::string data_format = + op->attribute("data_format").AsString(); + + PADDLE_ENFORCE_EQ( + input_dims.size(), + 4, + phi::errors::InvalidArgument("Input should be a 4-D tensor of format [N, " + "C, H, W] or [N, H, W, C], but got %u.", + input_dims.size())); + PADDLE_ENFORCE_GE( + groups, + 1, + phi::errors::InvalidArgument("groups should be larger than 0.")); + PADDLE_ENFORCE_EQ( + data_format == "NCHW" || data_format == "NHWC", + true, + phi::errors::InvalidArgument("data_format must be one of NCHW and NHWC. " + "But received data_format: %s", + data_format)); + + const bool channel_last = (data_format == "NHWC"); + + symbol::DimExpr channels; + if (!channel_last) { + channels = input_dims[1]; + } else { + channels = input_dims[3]; + } + + symbol::DimExpr groups_expr = symbol::DimExpr(groups); + symbol::DimExpr expected_channels = groups_expr * (channels / groups_expr); + + infer_context->AddEqualCstr(channels, expected_channels); + + infer_context->SetShapeOrDataForValue(op->result(0), x_shape_or_data); + + return true; +} + +bool DiagEmbedOpInferSymbolicShape( + pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) { + pir::Value operand_source = op->operand_source(0); + const symbol::ShapeOrDataDimExprs &operand_shape_or_data = + infer_context->GetShapeOrDataForValue(operand_source); + const auto &attributes = op->attributes(); + int dim1 = attributes.at("dim1").dyn_cast().data(); + int dim2 = attributes.at("dim2").dyn_cast().data(); + int offset = attributes.at("offset").dyn_cast().data(); + + const auto &x_dims = operand_shape_or_data.shape(); + int dim1_ = dim1 < 0 ? x_dims.size() + dim1 + 1 : dim1; + int dim2_ = dim2 < 0 ? x_dims.size() + dim2 + 1 : dim2; + int64_t offset_ = static_cast(std::abs(offset)); + symbol::DimExpr new_dim_len = + symbol::DimExpr(offset_) + x_dims.at(x_dims.size() - 1); + + const auto &out_dims = [&] { + std::vector out_dims = x_dims; + out_dims.pop_back(); + out_dims.insert(out_dims.begin() + std::min(dim1_, dim2_), new_dim_len); + out_dims.insert(out_dims.begin() + std::max(dim1_, dim2_), new_dim_len); + return out_dims; + }(); + symbol::ShapeOrDataDimExprs shape_data{ + symbol::TensorShapeOrDataDimExprs(out_dims)}; + infer_context->SetShapeOrDataForValue(op->result(0), shape_data); + return true; +} +bool DiagonalOpInferSymbolicShape( + pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) { + pir::Value operand_source = op->operand_source(0); + const symbol::ShapeOrDataDimExprs &operand_shape_or_data = + infer_context->GetShapeOrDataForValue(operand_source); + const auto &attributes = op->attributes(); + int axis1 = attributes.at("axis1").dyn_cast().data(); + int axis2 = attributes.at("axis2").dyn_cast().data(); + int offset = attributes.at("offset").dyn_cast().data(); + + const auto &x_dims = operand_shape_or_data.shape(); + int axis1_ = axis1 < 0 ? x_dims.size() + axis1 : axis1; + int axis2_ = axis2 < 0 ? x_dims.size() + axis2 : axis2; + + auto out_dims = x_dims; + auto axis1_size = out_dims.at(axis1_); + auto axis2_size = out_dims.at(axis2_); + out_dims.erase(out_dims.begin() + std::max(axis1_, axis2_)); + out_dims.erase(out_dims.begin() + std::min(axis1_, axis2_)); + + symbol::DimExprBuilder builder; + symbol::DimExpr zero{0}; + symbol::DimExpr res_shape; + symbol::DimExpr offset_sym{offset}; + if (offset == 0) { + res_shape = builder.Min(axis1_size, axis2_size); + } else if (offset > 0) { + if (axis2_size.isa()) { + res_shape = (axis2_size.dyn_cast() - offset) > 0 + ? builder.Min(axis1_size, axis2_size - offset_sym) + : zero; + } else { + res_shape = infer_context->GetNextSymName(); + } + } else { + if (axis1_size.isa()) { + res_shape = (axis1_size.dyn_cast() + offset) > 0 + ? builder.Min(axis1_size + offset_sym, axis2_size) + : zero; + } else { + res_shape = infer_context->GetNextSymName(); + } + } + out_dims.push_back(symbol::SimplifyDimExpr(res_shape)); + + symbol::ShapeOrDataDimExprs shape_data{ + symbol::TensorShapeOrDataDimExprs(out_dims)}; + infer_context->SetShapeOrDataForValue(op->result(0), shape_data); + return true; +} + +bool DistributeFpnProposalsOpInferSymbolicShape( + pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) { + const auto &attributes = op->attributes(); + int32_t min_level = + attributes.at("min_level").dyn_cast().data(); + int32_t max_level = + attributes.at("max_level").dyn_cast().data(); + int32_t num_levels = max_level - min_level + 1; + int64_t batch_size = 1; + + symbol::DimExpr num_rois = + infer_context->GetShapeOrDataForValue(op->operand_source(0)) + .shape() + .at(0); + + const auto &multi_rois_out_shape = [&]() { + symbol::TensorListShapeOrDataDimExprs multi_rois_out_shape; + if (num_levels == 1) { + multi_rois_out_shape.emplace_back( + symbol::TensorShapeOrDataDimExprs({num_rois, 4})); + } else { + symbol::DimExpr last_dim = num_rois; + for (int i = 0; i < num_levels - 1; i++) { + const auto &next_sym_name = infer_context->GetNextSymName(); + std::vector level_dim = {next_sym_name, 4}; + multi_rois_out_shape.emplace_back( + symbol::TensorShapeOrDataDimExprs(level_dim)); + last_dim = last_dim - level_dim.at(0); + } + multi_rois_out_shape.emplace_back(symbol::TensorShapeOrDataDimExprs( + {infer_context->GetNextSymName(), 4})); + } + + return multi_rois_out_shape; + }(); + + const auto &rois_num_per_level_out_shape = [&]() { + symbol::TensorListShapeOrDataDimExprs rois_num_per_level_out_shape; + rois_num_per_level_out_shape.resize( + num_levels, symbol::TensorShapeOrDataDimExprs({batch_size})); + return rois_num_per_level_out_shape; + }(); + + const auto &restore_ind = [&]() { + if (op->operand_source(1)) { + return symbol::TensorShapeOrDataDimExprs( + {infer_context->GetNextSymName(), 1}); + } + return symbol::TensorShapeOrDataDimExprs({num_rois, 1}); + }(); + + infer_context->SetShapeOrDataForValue(op->result(0), multi_rois_out_shape); + infer_context->SetShapeOrDataForValue(op->result(1), + rois_num_per_level_out_shape); + infer_context->SetShapeOrDataForValue(op->result(2), restore_ind); + return true; +} + +bool EighOpInferSymbolicShape(pir::Operation *op, + pir::InferSymbolicShapeContext *infer_context) { + const auto &x_shape = + infer_context->GetShapeOrDataForValue(op->operand_source(0)).shape(); + std::vector out_shape; + for (size_t i = 0; i < x_shape.size() - 1; ++i) { + out_shape.push_back(x_shape.at(i)); + } + infer_context->SetShapeOrDataForValue( + op->result(0), symbol::TensorShapeOrDataDimExprs(out_shape)); + infer_context->SetShapeOrDataForValue( + op->result(1), symbol::TensorShapeOrDataDimExprs(x_shape)); + return true; +} + +bool EigvalshOpInferSymbolicShape( + pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) { + return EighOpInferSymbolicShape(op, infer_context); +} + +bool FakeChannelWiseQuantizeAbsMaxOpInferSymbolicShape( + pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) { + const auto &x_shape_or_data = + infer_context->GetShapeOrDataForValue(op->operand_source(0)); + + int bit_length = op->attribute("bit_length").data(); + int quant_axis = op->attribute("quant_axis").data(); + + PADDLE_ENFORCE_EQ(bit_length >= 1 && bit_length <= 16, + true, + common::errors::InvalidArgument( + "'bit_length' should be between 1 and 16, but " + "the received is %d", + bit_length)); + PADDLE_ENFORCE_EQ( + quant_axis == 0 || quant_axis == 1, + true, + common::errors::InvalidArgument("'quant_axis' should be 0 or 1, but " + "the received is %d", + quant_axis)); + + infer_context->SetShapeOrDataForValue( + op->result(0), + symbol::ShapeOrDataDimExprs{ + symbol::TensorShapeOrDataDimExprs(x_shape_or_data.shape())}); + + std::vector out_scale_shape = { + x_shape_or_data.shape()[quant_axis]}; + infer_context->SetShapeOrDataForValue( + op->result(1), + symbol::ShapeOrDataDimExprs{ + symbol::TensorShapeOrDataDimExprs(out_scale_shape)}); + + return true; +} + +bool FftC2cOpInferSymbolicShape(pir::Operation *op, + pir::InferSymbolicShapeContext *infer_context) { + const auto &x_shape_or_data = + infer_context->GetShapeOrDataForValue(op->operand_source(0)); + std::vector x_dims = x_shape_or_data.shape(); + + // Set the output shape to be the same as the input shape + infer_context->SetShapeOrDataForValue( + op->result(0), + symbol::ShapeOrDataDimExprs{symbol::TensorShapeOrDataDimExprs(x_dims)}); + + return true; +} + +bool FftC2rOpInferSymbolicShape(pir::Operation *op, + pir::InferSymbolicShapeContext *infer_context) { + const auto &x_shape_or_data = + infer_context->GetShapeOrDataForValue(op->operand_source(0)); + std::vector x_dims = x_shape_or_data.shape(); + + auto axes = paddle::dialect::details::GetVectorAttr(op, "axes"); + int64_t last_dim_size = + op->attribute("last_dim_size").data(); + int last_fft_axis = static_cast(axes.back()); + + std::vector out_dims = x_dims; + + if (last_dim_size > 0) { + out_dims[last_fft_axis] = symbol::DimExpr(last_dim_size); + } else { + symbol::DimExprBuilder builder; + out_dims[last_fft_axis] = + builder.Mul(x_dims[last_fft_axis], 2) - symbol::DimExpr{1}; + } + + infer_context->SetShapeOrDataForValue( + op->result(0), + symbol::ShapeOrDataDimExprs{symbol::TensorShapeOrDataDimExprs(out_dims)}); + + return true; +} + +bool FftR2cOpInferSymbolicShape(pir::Operation *op, + pir::InferSymbolicShapeContext *infer_context) { + const auto &x_shape_or_data = + infer_context->GetShapeOrDataForValue(op->operand_source(0)); + std::vector x_dims = x_shape_or_data.shape(); + + auto axes = paddle::dialect::details::GetVectorAttr(op, "axes"); + bool onesided = op->attribute("onesided").data(); + + std::vector out_dims = x_dims; + + if (onesided) { + int last_fft_axis = static_cast(axes.back()); + symbol::DimExprBuilder builder; + out_dims[last_fft_axis] = + builder.Add(builder.Div(x_dims[last_fft_axis], 2), 1); + } + + infer_context->SetShapeOrDataForValue( + op->result(0), + symbol::ShapeOrDataDimExprs{symbol::TensorShapeOrDataDimExprs(out_dims)}); + + return true; +} + +bool FillDiagonalOpInferSymbolicShape( + pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) { + const auto &x_shape_or_data = + infer_context->GetShapeOrDataForValue(op->operand_source(0)); + std::vector x_dims = x_shape_or_data.shape(); + + infer_context->SetShapeOrDataForValue( + op->result(0), + symbol::ShapeOrDataDimExprs{symbol::TensorShapeOrDataDimExprs(x_dims)}); + + return true; +} + +bool FillDiagonal_OpInferSymbolicShape( + pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) { + return FillDiagonalOpInferSymbolicShape(op, infer_context); +} + +bool FlattenOpInferSymbolicShape( + pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) { + const auto &attributes = op->attributes(); + int start_axis = + attributes.at("start_axis").dyn_cast().data(); + int stop_axis = + attributes.at("stop_axis").dyn_cast().data(); + + const auto &x_shape = + infer_context->GetShapeOrDataForValue(op->operand_source(0)).shape(); + int in_dims_size = x_shape.size(); + + if (in_dims_size == 0) { + PADDLE_ENFORCE_EQ( + start_axis == 0 || start_axis == -1, + true, + common::errors::InvalidArgument("The start_axis should be 0 or -1 when " + "the input tensor is a 0D-Tensor")); + PADDLE_ENFORCE_EQ(stop_axis == 0 || stop_axis == -1, + true, + common::errors::InvalidArgument( + "The stop_axis should be 0 or -1 when the " + "input tensor is a 0D-Tensor")); + // this can ensure out shape {1} + start_axis = 0; + stop_axis = -1; + } + + if (start_axis < 0) { + start_axis = start_axis + in_dims_size; + } + if (stop_axis < 0) { + stop_axis = stop_axis + in_dims_size; + } + if (in_dims_size > 0) { + PADDLE_ENFORCE_GE( + stop_axis, + start_axis, + common::errors::InvalidArgument("The stop_axis should be greater" + "than or equal to start_axis.")); + } + + symbol::DimExpr outer{1}; + std::vector out_shape; + out_shape.reserve(in_dims_size - stop_axis + start_axis + 1); + for (int i = 0; i < start_axis; ++i) { + out_shape.push_back(x_shape.at(i)); + } + for (int i = start_axis; i <= stop_axis; i++) { + outer = outer * x_shape.at(i); + } + out_shape.push_back(outer); + for (int i = stop_axis + 1; i < in_dims_size; i++) { + out_shape.push_back(x_shape.at(i)); + } + + symbol::ShapeOrDataDimExprs out_shape_data{ + symbol::TensorShapeOrDataDimExprs(out_shape)}; + infer_context->SetShapeOrDataForValue(op->result(0), out_shape_data); + + std::vector xshape_shape = x_shape; + xshape_shape.insert(xshape_shape.begin(), symbol::DimExpr{0}); + symbol::ShapeOrDataDimExprs xshape_shape_data{ + symbol::TensorShapeOrDataDimExprs(xshape_shape)}; + infer_context->SetShapeOrDataForValue(op->result(1), xshape_shape_data); + return true; +} + +bool Flatten_OpInferSymbolicShape( + pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) { + return FlattenOpInferSymbolicShape(op, infer_context); +} + +bool FoldOpInferSymbolicShape(pir::Operation *op, + pir::InferSymbolicShapeContext *infer_context) { + const auto &in_dims = + infer_context->GetShapeOrDataForValue(op->operand_source(0)).shape(); + + std::vector out_dims; + out_dims.push_back(in_dims[0]); + std::vector kernel_sizes = + paddle::dialect::details::GetVectorAttr(op, "kernel_sizes"); + out_dims.push_back(in_dims[1] / (kernel_sizes[0] * kernel_sizes[1])); + + infer_context->SetShapeOrDataForValue( + op->result(0), + symbol::ShapeOrDataDimExprs{symbol::TensorShapeOrDataDimExprs(out_dims)}); + + return true; +} + +bool IdentityLossOpInferSymbolicShape( + pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) { + const auto &input_shape = + infer_context->GetShapeOrDataForValue(op->operand_source(0)); + int reduction = op->attribute("reduction").data(); + if (reduction == 2) { + infer_context->SetShapeOrDataForValue(op->result(0), input_shape); + } else { + std::vector out_shape = {}; + infer_context->SetShapeOrDataForValue( + op->result(0), + symbol::ShapeOrDataDimExprs{ + symbol::TensorShapeOrDataDimExprs(out_shape)}); + } + + return true; +} + +bool IdentityLoss_OpInferSymbolicShape( + pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) { + return IdentityLossOpInferSymbolicShape(op, infer_context); +} + +bool KthvalueOpInferSymbolicShape( + pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) { + pir::Value operand_source = op->operand_source(0); + const symbol::ShapeOrDataDimExprs &operand_shape_or_data = + infer_context->GetShapeOrDataForValue(operand_source); + const auto &attributes = op->attributes(); + int axis = attributes.at("axis").dyn_cast().data(); + bool keepdim = GetBoolAttr(op, "keepdim"); + + const auto &input_dims = operand_shape_or_data.shape(); + const int &dim_size = input_dims.size(); + if (axis < 0) axis += dim_size; + std::vector out_dims; + for (int i = 0; i < axis; i++) { + out_dims.emplace_back(input_dims.at(i)); + } + if (keepdim && dim_size > 0) { + out_dims.emplace_back(symbol::DimExpr(1)); + } + for (int i = axis + 1; i < dim_size; i++) { + out_dims.emplace_back(input_dims.at(i)); + } + symbol::ShapeOrDataDimExprs shape_data{ + symbol::TensorShapeOrDataDimExprs(out_dims)}; + infer_context->SetShapeOrDataForValue(op->result(0), shape_data); + infer_context->SetShapeOrDataForValue(op->result(1), shape_data); + return true; +} + +bool LpPool2dOpInferSymbolicShape( + pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) { + const auto &kernel_size = [&]() -> std::vector { + std::vector kernel_size_int_vec = + op->attribute("kernel_size") + .data() + .GetData(); + return details::VecInt642Expr(kernel_size_int_vec); + }(); + infer_context->SetShapeOrDataForValue( + op->result(0), + Pool2dRawInferSymbolicShape(op, kernel_size, infer_context)); + return true; +} + +bool LogcumsumexpOpInferSymbolicShape( + pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) { + // same as CumsumOpInferSymbolicShape + return CumsumOpInferSymbolicShape(op, infer_context); +} + +bool LogsumexpOpInferSymbolicShape( + pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) { + bool keepdim = GetBoolAttr(op, "keepdim"); + std::vector axis_in = details::GetVectorAttr(op, "axis"); + std::vector axis; + axis.reserve(axis_in.size()); + std::for_each(axis_in.begin(), axis_in.end(), [&axis](const int &t) { + axis.push_back(static_cast(t)); + }); + bool reduce_all = axis.size() == 0 ? true : false; + return details::ReduceInferDim(op, infer_context, axis, keepdim, reduce_all); +} + +bool MaxOpInferSymbolicShape(pir::Operation *op, + pir::InferSymbolicShapeContext *infer_context) { + bool keepdim = GetBoolAttr(op, "keepdim"); + + const std::vector axis = [&] { + pir::Operation *axis_gen_op = op->operand_source(1).defining_op(); + std::vector axis_vec; + if (axis_gen_op->isa()) { + axis_vec = details::GetVectorAttr( + axis_gen_op->dyn_cast(), "value"); + } else { + // TODO(lanxianghit): there's other source: pir::VectorType, + // paddle::dialect::DenseTensorType, but after PRIM, maybe always + // FullIntArrayOp, to be confirmed + PADDLE_THROW(common::errors::Unimplemented( + "MaxOpInferSymbolicShape: 'axis' only " + "support FullIntArrayOp's result now.")); + } + return axis_vec; + }(); + + bool reduce_all = axis.size() == 0 ? true : false; + + return details::ReduceInferDim(op, infer_context, axis, keepdim, reduce_all); +} + +bool MaxoutOpInferSymbolicShape(pir::Operation *op, + pir::InferSymbolicShapeContext *infer_context) { + const auto &x_shape_or_data = + infer_context->GetShapeOrDataForValue(op->operand_source(0)); + const std::vector &in_x_dims = x_shape_or_data.shape(); + + int groups = op->attribute("groups").data(); + int axis = op->attribute("axis").data(); + + if (axis < 0) { + axis += in_x_dims.size(); + } + + std::vector output_shape = in_x_dims; + output_shape[axis] = in_x_dims[axis] / groups; + infer_context->SetShapeOrDataForValue( + op->result(0), + symbol::ShapeOrDataDimExprs{ + symbol::TensorShapeOrDataDimExprs(output_shape)}); + + return true; +} + +bool MinOpInferSymbolicShape(pir::Operation *op, + pir::InferSymbolicShapeContext *infer_context) { + return MaxOpInferSymbolicShape(op, infer_context); +} + +bool MeanAllOpInferSymbolicShape( + pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) { + const auto &x_shape_or_data = + infer_context->GetShapeOrDataForValue(op->operand_source(0)); + const std::vector &x_dims = x_shape_or_data.shape(); + + PADDLE_ENFORCE_GT( + x_dims.size(), + 0, + phi::errors::InvalidArgument("Input(x) of MeanAllOp must have rank " + "greater than 0, but received rank 0.")); + + std::vector output_shape = {}; + + infer_context->SetShapeOrDataForValue( + op->result(0), + symbol::ShapeOrDataDimExprs{ + symbol::TensorShapeOrDataDimExprs(output_shape)}); + + return true; +} + +bool NonzeroOpInferSymbolicShape( + pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) { + const auto &x_shape_or_data = + infer_context->GetShapeOrDataForValue(op->operand_source(0)); + const auto &x_shape = x_shape_or_data.shape(); + int rank = x_shape.size(); + + PADDLE_ENFORCE_GE( + rank, + 1UL, + common::errors::InvalidArgument( + "Input(x) should have number of dimension at least 1.")); + + std::string sym_name = infer_context->GetNextSymName(); + std::vector out_shape{symbol::DimExpr{sym_name}, + symbol::DimExpr{rank}}; + + symbol::ShapeOrDataDimExprs shape_data{ + symbol::TensorShapeOrDataDimExprs(out_shape)}; + infer_context->SetShapeOrDataForValue(op->result(0), shape_data); + return true; +} + +bool NumelOpInferSymbolicShape(pir::Operation *op, + pir::InferSymbolicShapeContext *infer_context) { + std::vector out_shape = {}; + infer_context->SetShapeOrDataForValue( + op->result(0), + symbol::ShapeOrDataDimExprs{ + symbol::TensorShapeOrDataDimExprs(out_shape)}); + + return true; +} + +bool PadOpInferSymbolicShape(pir::Operation *op, + pir::InferSymbolicShapeContext *infer_context) { + // input(0): Tensor x + const auto &x_shape_or_data = + infer_context->GetShapeOrDataForValue(op->operand_source(0)); + PADDLE_ENFORCE_EQ(x_shape_or_data.data().has_value(), + false, + common::errors::InvalidArgument( + "InferSymbolicShape of PadOp only support input with " + "value now.")); + const auto &x_dims_sym = x_shape_or_data.shape(); + const size_t rank = x_dims_sym.size(); + + // input(1): int[] paddings + std::vector paddings = + paddle::dialect::details::GetVectorAttr(op, "paddings"); + PADDLE_ENFORCE_EQ(rank * 2, + paddings.size(), + common::errors::InvalidArgument( + "The size of paddings should be 2 * input's rank. But " + "got paddings.size() = %d, input's rank = %d.", + paddings.size(), + rank)); + + // output + const auto &out_dims = [&] { + std::vector out_dims; + out_dims.reserve(rank); + for (size_t i = 0; i < rank; ++i) { + out_dims.push_back(x_dims_sym.at(i) + paddings.at(2 * i) + + paddings.at(2 * i + 1)); + } + return out_dims; + }(); + + infer_context->SetShapeOrDataForValue( + op->result(0), symbol::TensorShapeOrDataDimExprs(out_dims)); + + return true; +} + +bool Pad3dOpInferSymbolicShape(pir::Operation *op, + pir::InferSymbolicShapeContext *infer_context) { + const auto &x_shape = + infer_context->GetShapeOrDataForValue(op->operand_source(0)).shape(); + PADDLE_ENFORCE_EQ(x_shape.size(), + 5, + common::errors::InvalidArgument( + "The size of Input(X)'s dimension should be equal to " + "5, but received %d. ", + x_shape.size())); + const auto &paddings_shape = + infer_context->GetShapeOrDataForValue(op->operand_source(1)); + if (!paddings_shape.data().has_value()) { + std::stringstream ss; + ss << paddings_shape; + PADDLE_THROW( + common::errors::InvalidArgument("The data of paddings's symbol shape " + "should have value, but now got [%s].", + ss.str())); + } + const std::string &data_format = + op->attribute("data_format").AsString(); + + const std::vector &out_dims = [&] { + std::vector out_dims = x_shape; + const auto &paddings = paddings_shape.data().value(); + PADDLE_ENFORCE_EQ(paddings.size(), + 6, + common::errors::InvalidArgument( + "Shape of Input(Paddings) should be equal to " + "[6], but received [%d].", + paddings.size())); + if (data_format == "NCDHW") { + out_dims.at(1) = x_shape.at(1); + out_dims.at(2) = x_shape.at(2) + paddings.at(4) + paddings.at(5); + out_dims.at(3) = x_shape.at(3) + paddings.at(2) + paddings.at(3); + out_dims.at(4) = x_shape.at(4) + paddings.at(0) + paddings.at(1); + } else { + out_dims.at(1) = x_shape.at(1) + paddings.at(4) + paddings.at(5); + out_dims.at(2) = x_shape.at(2) + paddings.at(2) + paddings.at(3); + out_dims.at(3) = x_shape.at(3) + paddings.at(0) + paddings.at(1); + out_dims.at(4) = x_shape.at(4); + } + return out_dims; + }(); + + infer_context->SetShapeOrDataForValue( + op->result(0), symbol::TensorShapeOrDataDimExprs(out_dims)); + + return true; +} + +bool Pool2dOpInferSymbolicShape(pir::Operation *op, + pir::InferSymbolicShapeContext *infer_context) { + const auto &kernel_size_shape_or_data = + infer_context->GetShapeOrDataForValue(op->operand_source(1)); + const auto &kernel_size = + details::GetExprVecFromData(kernel_size_shape_or_data); + infer_context->SetShapeOrDataForValue( + op->result(0), + Pool2dRawInferSymbolicShape(op, kernel_size, infer_context)); + return true; +} + +bool ProdOpInferSymbolicShape(pir::Operation *op, + pir::InferSymbolicShapeContext *infer_context) { + bool keepdim = GetBoolAttr(op, "keepdim"); + bool reduce_all = GetBoolAttr(op, "reduce_all"); + + auto axis_gen_op = op->operand_source(1).defining_op(); + if (axis_gen_op->isa()) { + std::vector axis = details::GetVectorAttr( + axis_gen_op->dyn_cast(), "value"); + return details::ReduceInferDim( + op, infer_context, axis, keepdim, reduce_all); + } else { + // TODO(lanxianghit): deal with other source: pir::VectorType, + // paddle::dialect::DenseTensorType + PADDLE_THROW( + common::errors::Unimplemented("ProdOpInferSymbolicShape: 'axis' only " + "support FullIntArrayOp's result now.")); + } + + return true; +} + +bool RepeatInterleaveOpInferSymbolicShape( + pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) { + pir::Value operand_source = op->operand_source(0); + const symbol::ShapeOrDataDimExprs &operand_shape_or_data = + infer_context->GetShapeOrDataForValue(operand_source); + + const auto &attributes = op->attributes(); + int repeats = attributes.at("repeats").dyn_cast().data(); + // what should I do if axis is null + int axis = attributes.at("axis").dyn_cast().data(); + + const std::vector &in_dims_sym = [&] { + std::vector dims; + if (operand_shape_or_data.data().has_value()) { + dims = operand_shape_or_data.data().value(); + } else { + dims = operand_shape_or_data.shape(); + } + return dims; + }(); + + int x_rank = in_dims_sym.size(); + if (axis < 0) axis += x_rank; + + const auto &out_sym_shape = [&] { + std::vector out_sym_shape; + for (int i = 0; i < x_rank; i++) { + if (i == axis) { + out_sym_shape.push_back(in_dims_sym.at(i) * repeats); + } else { + out_sym_shape.push_back(in_dims_sym.at(i)); + } + } + return out_sym_shape; + }(); + + infer_context->SetShapeOrDataForValue( + op->result(0), + symbol::ShapeOrDataDimExprs{ + symbol::TensorShapeOrDataDimExprs(out_sym_shape)}); + + return true; +} + +bool ReshapeOpInferSymbolicShape( + pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) { + const symbol::ShapeOrDataDimExprs &x_dim_expr = + infer_context->GetShapeOrDataForValue(op->operand_source(0)); + const symbol::ShapeOrDataDimExprs &shape_dim_expr = + infer_context->GetShapeOrDataForValue(op->operand_source(1)); + + const auto &GetProduct = [&](const auto &dim_exprs, const auto &Filter) { + symbol::DimExpr product{1}; + for (const auto &dim_expr : dim_exprs) { + if (Filter(dim_expr)) { + product = product * dim_expr; + } + } + return product; + }; + + const auto &IsNotMinusOne = [&](const symbol::DimExpr &dim_expr) { + if (dim_expr.isa()) { + return dim_expr.dyn_cast() != static_cast(-1); + } + return true; + }; + + const auto &IsPositiveInteger = [&](const symbol::DimExpr &dim_expr) { + if (dim_expr.isa()) { + return dim_expr.dyn_cast() > static_cast(0); + } + return true; + }; + + const auto &IsZero = [&](const symbol::DimExpr &dim_expr) { + if (dim_expr.isa()) { + return dim_expr.dyn_cast() == static_cast(0); + } + return false; + }; + + const std::vector out_dims = [&] { + const auto &original_shape = + infer_context->GetShapeOrDataForValue(op->operand_source(0)).shape(); + ExprVec target_shape = details::GetExprVecFromData(shape_dim_expr); + + // replace '0' with original shape + for (size_t i = 0; i < target_shape.size(); i++) { + if (i < original_shape.size() && IsZero(target_shape.at(i))) { + target_shape.at(i) = original_shape.at(i); + } + } + + // replace '-1' with infered shape + const auto &numel = + GetProduct(original_shape, [](const auto &) { return true; }); + const auto &product_exclude_minus_one = + GetProduct(target_shape, IsPositiveInteger); + const auto &input_dims = target_shape; + + std::vector out_dims; + out_dims.reserve(input_dims.size()); + for (size_t i = 0; i < input_dims.size(); ++i) { + auto out_dim_expr = IsNotMinusOne(input_dims.at(i)) + ? input_dims.at(i) + : (numel / product_exclude_minus_one); + out_dims.emplace_back(out_dim_expr); + } + return out_dims; + }(); + + symbol::ShapeOrDataDimExprs shape_data = [&] { + if (x_dim_expr.data().has_value()) { + return symbol::TensorShapeOrDataDimExprs(out_dims, + x_dim_expr.data().value()); + } + return symbol::TensorShapeOrDataDimExprs(out_dims); + }(); + + infer_context->SetShapeOrDataForValue(op->result(0), shape_data); + return true; +} + +bool Reshape_OpInferSymbolicShape( + pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) { + return ReshapeOpInferSymbolicShape(op, infer_context); +} + +bool ShapeOpInferSymbolicShape(pir::Operation *op, + pir::InferSymbolicShapeContext *infer_context) { + const symbol::ShapeOrDataDimExprs &operand_shape_or_data = + infer_context->GetShapeOrDataForValue(op->operand_source(0)); + const auto &out_data = operand_shape_or_data.shape(); + const std::vector shape{std::int64_t(out_data.size())}; + symbol::ShapeOrDataDimExprs shape_or_data{ + symbol::TensorShapeOrDataDimExprs(shape, out_data)}; + + infer_context->SetShapeOrDataForValue(op->result(0), shape_or_data); + return true; +} + +bool ShapeSrOpInferSymbolicShape( + pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) { + return ShapeOpInferSymbolicShape(op, infer_context); +} + +bool SliceOpInferSymbolicShape(pir::Operation *op, + pir::InferSymbolicShapeContext *infer_context) { + pir::Value operand_source = op->operand_source(0); + pir::Value operand_starts = op->operand_source(1); + pir::Value operand_ends = op->operand_source(2); + pir::Value res = op->result(0); + + const symbol::ShapeOrDataDimExprs &starts_shape_data = + infer_context->GetShapeOrDataForValue(operand_starts); + const symbol::ShapeOrDataDimExprs &ends_shape_data = + infer_context->GetShapeOrDataForValue(operand_ends); + + std::vector axes_vec = details::GetVectorAttr(op, "axes"); + + ExprVec starts = slice_utils::GetExprVecFromData(starts_shape_data); + ExprVec ends = slice_utils::GetExprVecFromData(ends_shape_data); + + std::vector infer_flags = details::GetVectorAttr(op, "infer_flags"); + const std::vector decrease_axis = + details::GetVectorAttr(op, "decrease_axis"); + + infer_context->SetShapeOrDataForValue( + res, + slice_utils::SliceRawInferSymbolicShape(operand_source, + res, + starts, + ends, + axes_vec, + infer_flags, + decrease_axis, + infer_context)); + + return true; +} + +bool SplitOpInferSymbolicShape(pir::Operation *op, + pir::InferSymbolicShapeContext *infer_context) { + // input + const auto &x_shape_or_data = + infer_context->GetShapeOrDataForValue(op->operand_source(0)); + PADDLE_ENFORCE_EQ(x_shape_or_data.data().has_value(), + false, + common::errors::InvalidArgument( + "InferSymbolicShape of SplitOp only support input with " + "value now.")); + const auto &x_dims_sym = x_shape_or_data.shape(); + + // axis + CHECK(op->operand_source(2).defining_op()->isa()); + + int64_t axis = op->operand_source(2) + .defining_op() + .attributes() + .at("value") + .dyn_cast() + .data() + .to(); + size_t rank = x_dims_sym.size(); + axis = axis >= 0 ? axis : std::max(int64_t(0), int64_t(axis + rank)); + + // sections + const std::vector §ions_sym = + details::GetExprVecFromData( + infer_context->GetShapeOrDataForValue(op->operand_source(1))); + + // output + const symbol::TensorListShapeOrDataDimExprs &output_shape_data_list = [&] { + const auto &GetSum = [&](const auto &dim_exprs, const auto &Filter) { + symbol::DimExpr sum{0}; + for (const auto &dim_expr : dim_exprs) { + if (Filter(dim_expr)) { + sum = sum + dim_expr; + } + } + return sum; + }; + const auto &All = [&](const auto &dim_exprs, const auto &Cond) { + for (const auto &dim_expr : dim_exprs) { + if (!Cond(dim_expr)) { + return false; + } + } + return true; + }; + const auto &IsNotMinusOne = [&](const symbol::DimExpr &dim_expr) { + if (dim_expr.isa()) { + return dim_expr.dyn_cast() != static_cast(-1); + } + return true; + }; + const auto &sum_exclude_minus_one = GetSum(sections_sym, IsNotMinusOne); + + const bool &all_sections_sym_not_minus_one = + All(sections_sym, IsNotMinusOne); + if (all_sections_sym_not_minus_one) { + infer_context->AddEqualCstr(x_dims_sym.at(axis), sum_exclude_minus_one); + } + + symbol::TensorListShapeOrDataDimExprs shape_data_list; + std::vector output_dims_sym = x_dims_sym; + if (!all_sections_sym_not_minus_one && sections_sym.size() == 1) { + VLOG(3) << "[SplitOp]-1 is the only split section. The output shape is " + "identical to the input shape."; + shape_data_list.push_back( + symbol::TensorShapeOrDataDimExprs(output_dims_sym)); + return shape_data_list; + } + for (uint32_t idx = 0; idx < sections_sym.size(); idx++) { + const auto §ion_sym = sections_sym.at(idx); + output_dims_sym.at(axis) = + IsNotMinusOne(section_sym) + ? section_sym + : x_dims_sym.at(axis) - sum_exclude_minus_one; + + shape_data_list.push_back( + symbol::TensorShapeOrDataDimExprs(output_dims_sym)); + } + return shape_data_list; + }(); + + infer_context->SetShapeOrDataForValue( + op->result(0), symbol::ShapeOrDataDimExprs{output_shape_data_list}); + + return true; +} + +bool SplitWithNumOpInferSymbolicShape( + pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) { + const symbol::ShapeOrDataDimExprs &axis_shape_data = + infer_context->GetShapeOrDataForValue(op->operand_source(1)); + PADDLE_ENFORCE_EQ( + axis_shape_data.data().has_value(), + true, + common::errors::InvalidArgument( + "In InferSymbolicShape, axis of SplitWithNumOp is null")); + const std::vector &axis_data = + axis_shape_data.data().value(); + PADDLE_ENFORCE_EQ( + axis_data.size() == 1, + true, + common::errors::InvalidArgument( + "In SplitWithNumOp, data of axis should be one dimension")); + + const auto &attributes = op->attributes(); + int num = attributes.at("num").dyn_cast().data(); + + const auto &x_s_or_d = + infer_context->GetShapeOrDataForValue(op->operand_source(0)); + int rank = x_s_or_d.shape().size(); + + const auto &out_s_d = [&](int64_t split_axis, int64_t res_num) { + symbol::DimExpr input_axis_dim = x_s_or_d.shape().at(split_axis); + symbol::DimExpr axis_shape = input_axis_dim / symbol::DimExpr{res_num}; + + std::vector res_s_d; + for (size_t i = 0; i < x_s_or_d.shape().size(); ++i) { + const auto &sym_dim = split_axis == static_cast(i) + ? axis_shape + : x_s_or_d.shape().at(i); + res_s_d.push_back(sym_dim); + } + return symbol::TensorShapeOrDataDimExprs(res_s_d); + }; + + if (axis_data.at(0).isa()) { + // case 1: DimExpr of axis is int. axis_shape_or_data: {shape:{1}, + // data:{3}} eg: axis generator op is full_op and assign_op + int64_t axis = axis_data[0].dyn_cast(); + axis = axis < 0 ? axis + rank : axis; + symbol::TensorListShapeOrDataDimExprs res_list_s_d(num, out_s_d(axis, num)); + infer_context->SetShapeOrDataForValue( + op->result(0), symbol::ShapeOrDataDimExprs{res_list_s_d}); + } else if (axis_data.at(0).isa()) { + // case 2: DimExpr of axis is a symbol(string). axis_shape_or_data: + // {shape:{1}, data:{s0}} eg: axis generator op is data_op + int candidate_axis = -1; + int count = 0; + for (size_t i = 0; i < x_s_or_d.shape().size(); ++i) { + if (x_s_or_d.shape().at(i).isa()) { + if (x_s_or_d.shape().at(i).dyn_cast() % num == 0) { + count++; + candidate_axis = i; + } + } else { + PADDLE_THROW( + common::errors::InvalidArgument("The type of X must be int64_t.")); + } + } + if (count == 1) { + // caculate the axis of split_with_num_op + symbol::TensorListShapeOrDataDimExprs res_list_s_d( + num, out_s_d(candidate_axis, num)); + infer_context->SetShapeOrDataForValue( + op->result(0), symbol::ShapeOrDataDimExprs{res_list_s_d}); + } else { + // create new Symbol + std::vector res_s; + for (size_t i = 0; i < x_s_or_d.shape().size(); ++i) { + const auto &s_dim = + x_s_or_d.shape().at(i).dyn_cast() % num == 0 + ? symbol::DimExpr(infer_context->GetNextSymName()) + : x_s_or_d.shape().at(i); + res_s.emplace_back(s_dim); + } + const symbol::TensorShapeOrDataDimExprs &res_s_d = + symbol::TensorShapeOrDataDimExprs(res_s); + symbol::TensorListShapeOrDataDimExprs res_list_s_d(num, res_s_d); + infer_context->SetShapeOrDataForValue( + op->result(0), symbol::ShapeOrDataDimExprs{res_list_s_d}); + } + } else { + PADDLE_THROW(common::errors::InvalidArgument( + "The type of axis must be int64_t or string.")); + } + return true; +} + +bool SumOpInferSymbolicShape(pir::Operation *op, + pir::InferSymbolicShapeContext *infer_context) { + bool keepdim = GetBoolAttr(op, "keepdim"); + bool reduce_all = false; + + auto axis_gen_op = op->operand_source(1).defining_op(); + if (axis_gen_op->isa()) { + std::vector axis = details::GetVectorAttr( + axis_gen_op->dyn_cast(), "value"); + if (axis.size() == 0) { + reduce_all = true; + } + return details::ReduceInferDim( + op, infer_context, axis, keepdim, reduce_all); + } else { + // TODO(lanxianghit): deal with other source: pir::VectorType, + // paddle::dialect::DenseTensorType + PADDLE_THROW( + common::errors::Unimplemented("SumOpInferSymbolicShape: 'axis' only " + "support FullIntArrayOp's result now.")); + } + + return true; +} + +bool TileOpInferSymbolicShape(pir::Operation *op, + pir::InferSymbolicShapeContext *infer_context) { + pir::Value operand_x = op->operand_source(0); + symbol::ShapeOrDataDimExprs x_shape_or_data = + infer_context->GetShapeOrDataForValue(operand_x); + pir::Value operand_repeat_times = op->operand_source(1); + symbol::ShapeOrDataDimExprs repeat_times_shape_or_data = + infer_context->GetShapeOrDataForValue(operand_repeat_times); + + std::vector x_dimexpr = x_shape_or_data.shape(); + std::vector repeat_times_dimexpr = + details::GetExprVecFromData(repeat_times_shape_or_data); + if (repeat_times_dimexpr.empty()) { + repeat_times_dimexpr = std::vector(x_dimexpr.size(), 1); + } + + auto out_rank = std::max(static_cast(x_dimexpr.size()), + repeat_times_dimexpr.size()); + std::vector out_shape(out_rank); + if (x_dimexpr.size() > repeat_times_dimexpr.size()) { + auto diff = x_dimexpr.size() - repeat_times_dimexpr.size(); + repeat_times_dimexpr.insert(repeat_times_dimexpr.begin(), diff, 1); + } else { + auto diff = repeat_times_dimexpr.size() - x_dimexpr.size(); + x_dimexpr.insert(x_dimexpr.begin(), diff, 1); + } + + for (size_t i = 0; i < repeat_times_dimexpr.size(); ++i) { + out_shape.at(i) = x_dimexpr.at(i) * repeat_times_dimexpr.at(i); + } + + symbol::ShapeOrDataDimExprs shape_data{ + symbol::TensorShapeOrDataDimExprs(out_shape)}; + + pir::Value res = op->result(0); + infer_context->SetShapeOrDataForValue(res, shape_data); + + return true; +} + +bool TopkOpInferSymbolicShape(pir::Operation *op, + pir::InferSymbolicShapeContext *infer_context) { + symbol::ShapeOrDataDimExprs x_shape_or_data = + infer_context->GetShapeOrDataForValue(op->operand_source(0)); + symbol::ShapeOrDataDimExprs k_shape_or_data = + infer_context->GetShapeOrDataForValue(op->operand_source(1)); + const auto &attributes = op->attributes(); + int axis = attributes.at("axis").dyn_cast().data(); + const std::vector &in_dims_sym = [&] { + std::vector dims; + if (x_shape_or_data.data().has_value()) { + dims = x_shape_or_data.data().value(); + } else { + dims = x_shape_or_data.shape(); + } + return dims; + }(); + + int x_rank = in_dims_sym.size(); + + int k = k_shape_or_data.data().value().at(0).Get(); + + if (axis < 0) axis += x_rank; + const auto &out_sym_shape = [&] { + std::vector out_sym_shape; + for (int i = 0; i < x_rank; ++i) { + if (i == axis) { + out_sym_shape.push_back(symbol::DimExpr(k)); + } else { + out_sym_shape.push_back(in_dims_sym.at(i)); + } + } + return out_sym_shape; + }(); + + symbol::ShapeOrDataDimExprs shape_data{ + symbol::TensorShapeOrDataDimExprs(out_sym_shape)}; + + infer_context->SetShapeOrDataForValue(op->result(0), shape_data); + infer_context->SetShapeOrDataForValue(op->result(1), shape_data); + + return true; +} + +bool TopkV1OpInferSymbolicShape(pir::Operation *op, + pir::InferSymbolicShapeContext *infer_context) { + return TopkOpInferSymbolicShape(op, infer_context); +} + +bool TransposeOpInferSymbolicShape( + pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) { + std::vector perm = + op->attributes().at("perm").dyn_cast().AsVector(); + if (perm.size() == 1) { + // perm must be [0], which means nothing to do with input, just copy the + // info from input + infer_context->SetShapeOrDataForValue( + op->result(0), + infer_context->GetShapeOrDataForValue(op->operand_source(0))); + return true; + } + const std::vector &x_dims = [&] { + std::vector dims; + const auto &x_shape_or_data = + infer_context->GetShapeOrDataForValue(op->operand_source(0)); + if (x_shape_or_data.data().has_value()) { + dims = x_shape_or_data.data().value(); + } else { + dims = x_shape_or_data.shape(); + } + return dims; + }(); + + int x_rank = x_dims.size(); + + const std::vector formatted_axis = [x_rank, &perm] { + std::vector out(perm.size(), 0); + std::transform(perm.begin(), + perm.end(), + out.begin(), + [](pir::Attribute &p) -> int32_t { + return p.dyn_cast().data(); + }); + + // format the negative axis + std::for_each(out.begin(), out.end(), [x_rank](int32_t &v) { + if (v < 0) { + v += x_rank; + } + }); + return out; + }(); + + int axis_size = static_cast(formatted_axis.size()); + + std::vector out_dims(x_dims); + for (int i = 0; i < axis_size; ++i) { + out_dims.at(i) = x_dims.at(formatted_axis.at(i)); + } + + infer_context->SetShapeOrDataForValue(op->result(0), + ShapeOrData{TensorExprs(out_dims)}); + + return true; +} + +bool Transpose_OpInferSymbolicShape( + pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) { + return TransposeOpInferSymbolicShape(op, infer_context); +} + +bool SqueezeOpInferSymbolicShape( + pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) { + PADDLE_ENFORCE_EQ( + op->num_operands(), + 2, + common::errors::InvalidArgument( + "SqueezeOpInferSymbolicShape ONLY support num_operands() == 2 " + "now, but got %d operands", + op->num_operands())); + + auto x_shape_or_data = + infer_context->GetShapeOrDataForValue(op->operand_source(0)); + auto axes_shape_or_data = + infer_context->GetShapeOrDataForValue(op->operand_source(1)); + + std::vector in_dims_sym; + if (x_shape_or_data.data().has_value()) { + in_dims_sym = x_shape_or_data.data().value(); + } else { + in_dims_sym = x_shape_or_data.shape(); + } + + std::vector squeeze_dims_sym; + if (axes_shape_or_data.data().has_value()) { + squeeze_dims_sym = axes_shape_or_data.data().value(); + } else { + squeeze_dims_sym = axes_shape_or_data.shape(); + } + + std::vector squeeze_dims; + for (auto squeeze_dim : squeeze_dims_sym) { + PADDLE_ENFORCE_EQ( + squeeze_dim.Has(), + true, + common::errors::InvalidArgument( + "in SqueezeOpInferSymbolicShape, axes must be known int type, " + "but got: %s", + symbol::ToString(squeeze_dim))); + squeeze_dims.emplace_back( + static_cast(squeeze_dim.Get())); + } + + // GetOutputSqueezeShape + size_t num_squeeze_dims = squeeze_dims.size(); + std::vector should_squeeze(in_dims_sym.size(), false); + // Mark dimensions need to be squeezed. + if (num_squeeze_dims == 0) { + for (size_t i = 0; i < in_dims_sym.size(); ++i) { + // TODO(lanxianghit): if symbol here, maybe we need the result of dim expr + // simplification + if (in_dims_sym.at(i) == 1) { + should_squeeze.at(i) = true; + } + } + } else { + for (size_t i = 0; i < num_squeeze_dims; ++i) { + if (in_dims_sym.size() == 0) { + continue; + } + int current = squeeze_dims.at(i) < 0 + ? squeeze_dims.at(i) + in_dims_sym.size() + : squeeze_dims.at(i); + + if (!should_squeeze.at(current)) { + // At compile time, dim of SYMBOL is allowed to squeeze? + if (in_dims_sym.at(current) == 1) { + should_squeeze.at(current) = true; + } else if (!in_dims_sym.at(current).Has()) { + should_squeeze.at(current) = true; + } else { + should_squeeze.at(current) = true; + } + } + } + } + + // Make output dimensions + std::vector output_shape_sym; + for (size_t i = 0; i < in_dims_sym.size(); ++i) { + if (!should_squeeze.at(i)) { + output_shape_sym.emplace_back(in_dims_sym.at(i)); + } + } + + symbol::ShapeOrDataDimExprs shape_data{ + symbol::TensorShapeOrDataDimExprs(output_shape_sym)}; + + pir::Value res = op->result(0); + infer_context->SetShapeOrDataForValue(res, shape_data); + infer_context->SetShapeOrDataForValue( + op->result(1), CreateShapeOrDataForXShape(x_shape_or_data)); + + return true; +} +bool Squeeze_OpInferSymbolicShape( + pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) { + return SqueezeOpInferSymbolicShape(op, infer_context); +} + +bool UnbindOpInferSymbolicShape(pir::Operation *op, + pir::InferSymbolicShapeContext *infer_context) { + // input + const auto &x_shape_or_data = + infer_context->GetShapeOrDataForValue(op->operand_source(0)); + PADDLE_ENFORCE_EQ( + x_shape_or_data.data().has_value(), + false, + common::errors::InvalidArgument( + "InferSymbolicShape of UnbindOp only support input with " + "value now.")); + const auto &x_dims_sym = x_shape_or_data.shape(); + + // axis + int axis = op->attributes().at("axis").dyn_cast().data(); + int rank = x_dims_sym.size(); + axis = axis >= 0 ? axis : axis + rank; + + // output + const symbol::TensorListShapeOrDataDimExprs &output_shape_data_list = [&] { + symbol::TensorListShapeOrDataDimExprs shape_data_list; + std::vector output_dims_sym = x_dims_sym; + + const symbol::DimExpr &unbound_dim = x_dims_sym.at(axis); + PADDLE_ENFORCE_EQ(unbound_dim.isa(), + true, + common::errors::InvalidArgument( + "InferSymbolicShape of UnbindOp only support unbound " + "dim with constant length!")); + output_dims_sym.erase(output_dims_sym.begin() + axis); + const int64_t unbound_dim_length = unbound_dim.dyn_cast(); + + for (uint32_t idx = 0; idx < unbound_dim_length; idx++) { + shape_data_list.push_back( + symbol::TensorShapeOrDataDimExprs(output_dims_sym)); + } + return shape_data_list; + }(); + + infer_context->SetShapeOrDataForValue( + op->result(0), symbol::ShapeOrDataDimExprs{output_shape_data_list}); + + return true; +} + +bool UniqueOpInferSymbolicShape(pir::Operation *op, + pir::InferSymbolicShapeContext *infer_context) { + const auto &x_shape_or_data = + infer_context->GetShapeOrDataForValue(op->operand_source(0)); + PADDLE_ENFORCE_EQ( + x_shape_or_data.data().has_value(), + false, + common::errors::InvalidArgument( + "InferSymbolicShape of UniqueOp only support input with " + "value now.")); + const auto &x_dims_sym = x_shape_or_data.shape(); + const size_t rank = x_dims_sym.size(); + std::vector axes = + paddle::dialect::details::GetVectorAttr(op, "axis"); + + symbol::DimExpr unique_dim_sym = + infer_context->GetNextSymName(); // unknown until runtime + + const std::vector &counts_dims = [&] { + std::vector out_dims; + out_dims.push_back(unique_dim_sym); + return out_dims; + }(); + + const std::vector &index_dims = counts_dims; + + const std::vector &out_dims = [&] { + if (axes.empty()) { + return counts_dims; + } + std::vector out_dims = x_dims_sym; + int axis = axes.at(0); + axis = axis >= 0 ? axis : axis + rank; + out_dims.at(axis) = unique_dim_sym; + return out_dims; + }(); + + const std::vector &inverse_dims = [&] { + std::vector inverse_dims; + if (axes.empty()) { + // flatten before unique + symbol::DimExpr product{1}; + for (const auto &x_dim : x_dims_sym) { + product = product * x_dim; + } + inverse_dims.push_back(product); + } else { + int axis = axes.at(0); + axis = axis >= 0 ? axis : axis + rank; + inverse_dims.push_back(x_dims_sym.at(axis)); + } + return inverse_dims; + }(); + + bool return_index = GetBoolAttr(op, "return_index"); + bool return_inverse = GetBoolAttr(op, "return_inverse"); + bool return_counts = GetBoolAttr(op, "return_counts"); + + symbol::ShapeOrDataDimExprs empty{symbol::TensorShapeOrDataDimExprs{}}; + infer_context->SetShapeOrDataForValue( + op->result(0), symbol::TensorShapeOrDataDimExprs{out_dims}); + infer_context->SetShapeOrDataForValue( + op->result(1), + return_index ? symbol::TensorShapeOrDataDimExprs{index_dims} : empty); + infer_context->SetShapeOrDataForValue( + op->result(2), + return_inverse ? symbol::TensorShapeOrDataDimExprs{inverse_dims} : empty); + infer_context->SetShapeOrDataForValue( + op->result(3), + return_counts ? symbol::TensorShapeOrDataDimExprs{counts_dims} : empty); + + return true; +} + +bool UniqueConsecutiveOpInferSymbolicShape( + pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) { + const auto &x_shape_or_data = + infer_context->GetShapeOrDataForValue(op->operand_source(0)); + PADDLE_ENFORCE_EQ( + x_shape_or_data.data().has_value(), + false, + common::errors::InvalidArgument( + "InferSymbolicShape of UniqueConsecutiveOp only support input with " + "value now.")); + const auto &x_dims_sym = x_shape_or_data.shape(); + const size_t rank = x_dims_sym.size(); + std::vector axes = + paddle::dialect::details::GetVectorAttr(op, "axis"); + + symbol::DimExpr unique_dim_sym = + infer_context->GetNextSymName(); // unknown until runtime + + const std::vector &counts_dims = [&] { + std::vector out_dims; + out_dims.push_back(unique_dim_sym); + return out_dims; + }(); + + const std::vector &out_dims = [&] { + if (axes.empty()) { + return counts_dims; + } + std::vector out_dims = x_dims_sym; + int axis = axes.at(0); + axis = axis >= 0 ? axis : axis + rank; + out_dims.at(axis) = unique_dim_sym; + return out_dims; + }(); + + const std::vector &inverse_dims = [&] { + std::vector inverse_dims; + if (axes.empty()) { + // flatten before unique + symbol::DimExpr product{1}; + for (const auto &x_dim : x_dims_sym) { + product = product * x_dim; + } + inverse_dims.push_back(product); + } else { + int axis = axes.at(0); + axis = axis >= 0 ? axis : axis + rank; + inverse_dims.push_back(x_dims_sym.at(axis)); + } + return inverse_dims; + }(); + + bool return_inverse = GetBoolAttr(op, "return_inverse"); + bool return_counts = GetBoolAttr(op, "return_counts"); + + symbol::ShapeOrDataDimExprs empty{symbol::TensorShapeOrDataDimExprs{}}; + infer_context->SetShapeOrDataForValue( + op->result(0), symbol::TensorShapeOrDataDimExprs{out_dims}); + infer_context->SetShapeOrDataForValue( + op->result(1), + return_inverse ? symbol::TensorShapeOrDataDimExprs{inverse_dims} : empty); + infer_context->SetShapeOrDataForValue( + op->result(2), + return_counts ? symbol::TensorShapeOrDataDimExprs{counts_dims} : empty); + + return true; +} + +bool UnsqueezeOpInferSymbolicShape( + pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) { + PADDLE_ENFORCE_EQ( + op->num_operands(), + 2, + common::errors::InvalidArgument( + "UnsqueezeOp InferSymbolicShape ONLY support num_operands() == 2 " + "now, but got %d operands", + op->num_operands())); + + auto x_shape_or_data = + infer_context->GetShapeOrDataForValue(op->operand_source(0)); + auto axes_shape_or_data = + infer_context->GetShapeOrDataForValue(op->operand_source(1)); + + std::vector x_sym_shape; + if (x_shape_or_data.data().has_value()) { + x_sym_shape = x_shape_or_data.data().value(); + } else { + x_sym_shape = x_shape_or_data.shape(); + } + int x_dims_size = x_sym_shape.size(); + + std::vector axes_sym; + if (axes_shape_or_data.data().has_value()) { + axes_sym = axes_shape_or_data.data().value(); + } else { + axes_sym = axes_shape_or_data.shape(); + } + int axes_sym_size = axes_sym.size(); + + // GetUnsqueezeShape + int output_rank = x_dims_size + axes_sym_size; + std::vector result_sym_dims(output_rank, 0); + + int cur_output_rank = x_dims_size; + for (auto axis_expr : axes_sym) { + PADDLE_ENFORCE_EQ( + axis_expr.Has(), + true, + common::errors::InvalidArgument( + "in UnsqueezeOpInferSymbolicShape, axes must be known int type, " + "but got: %s", + symbol::ToString(axis_expr))); + int axis = static_cast(axis_expr.Get()); + int cur = axis < 0 ? axis + cur_output_rank + 1 : axis; + + // Move old axis, and insert new axis + for (int i = cur_output_rank; i >= cur; --i) { + if (result_sym_dims.at(i) == 1) { + // Move axis + result_sym_dims.at(i + 1) = 1; + result_sym_dims.at(i) = 0; + } + } + result_sym_dims.at(cur) = 1; + // Add the output size. + cur_output_rank++; + } + + // Make output shape + for (int in_idx = 0, out_idx = 0; out_idx < output_rank; ++out_idx) { + if (result_sym_dims.at(out_idx) == 0) { + result_sym_dims.at(out_idx) = x_sym_shape.at(in_idx++); + } + } + + symbol::ShapeOrDataDimExprs shape_data{ + symbol::TensorShapeOrDataDimExprs(result_sym_dims)}; + + pir::Value res = op->result(0); + infer_context->SetShapeOrDataForValue(res, shape_data); + infer_context->SetShapeOrDataForValue( + op->result(1), CreateShapeOrDataForXShape(x_shape_or_data)); + + return true; +} +bool Unsqueeze_OpInferSymbolicShape( + pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) { + return UnsqueezeOpInferSymbolicShape(op, infer_context); +} + +} // namespace paddle::dialect diff --git a/paddle/fluid/pir/dialect/operator/interface/infer_symbolic_shape/.ipynb_checkpoints/unary_infer_sym-checkpoint.h b/paddle/fluid/pir/dialect/operator/interface/infer_symbolic_shape/.ipynb_checkpoints/unary_infer_sym-checkpoint.h new file mode 100644 index 0000000000000..c8509042d3764 --- /dev/null +++ b/paddle/fluid/pir/dialect/operator/interface/infer_symbolic_shape/.ipynb_checkpoints/unary_infer_sym-checkpoint.h @@ -0,0 +1,96 @@ +// Copyright (c) 2024 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include "paddle/pir/include/dialect/shape/utils/shape_analysis.h" + +namespace paddle::dialect { +OP_DECLARE_INFER_SYMBOLIC_SHAPE(All) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Amax) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Amin) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Any) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Argmax) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Argmin) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(AsComplex) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(AsReal) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Assign) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Assign_) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(AsStrided) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(BipartiteMatch) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Cast) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Cast_) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Cholesky) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(ClipByNorm) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(ClipByNormSr) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Cummax) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Cummin) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Cumprod) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Cumprod_) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Cumsum) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Cumsum_) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(ChannelShuffle) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(DiagEmbed) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Diagonal) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(DistributeFpnProposals) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Eigh) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Eigvalsh) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(FakeChannelWiseQuantizeAbsMax) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(FftC2c) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(FftC2r) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(FftR2c) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(FillDiagonal) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(FillDiagonal_) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Flatten) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Flatten_) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Fold) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(IdentityLoss) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(IdentityLoss_) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Kthvalue) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(LpPool2d) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Logcumsumexp) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Logsumexp) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Max) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Maxout) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Min) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(MeanAll) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Nonzero) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Numel) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Pad) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Pad3d) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Pool2d) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Prod) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(RepeatInterleave) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Reshape) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Reshape_) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Shape) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(ShapeSr) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Slice) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Split) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(SplitWithNum) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Squeeze) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Squeeze_) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Sum) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Tile) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Topk) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(TopkV1) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Transpose) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Transpose_) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Unbind) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Unique) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(UniqueConsecutive) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Unsqueeze) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(Unsqueeze_) + +} // namespace paddle::dialect diff --git a/paddle/fluid/pir/dialect/operator/interface/infer_symbolic_shape/multiary_infer_sym.cc b/paddle/fluid/pir/dialect/operator/interface/infer_symbolic_shape/multiary_infer_sym.cc index 1c5d3ae07be03..480ec530a9ea2 100644 --- a/paddle/fluid/pir/dialect/operator/interface/infer_symbolic_shape/multiary_infer_sym.cc +++ b/paddle/fluid/pir/dialect/operator/interface/infer_symbolic_shape/multiary_infer_sym.cc @@ -1203,74 +1203,73 @@ bool Where_OpInferSymbolicShape(pir::Operation *op, bool YoloLossOpInferSymbolicShape( pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) { - const auto &dim_x = + const auto &x_shape = infer_context->GetShapeOrDataForValue(op->operand_source(0)).shape(); - const auto &dim_gtbox = + const auto &box_shape = infer_context->GetShapeOrDataForValue(op->operand_source(1)).shape(); - const auto &dim_gtlabel = + const auto &label_shape = infer_context->GetShapeOrDataForValue(op->operand_source(2)).shape(); - std::vector anchors_mask = - paddle::dialect::details::GetVectorAttr(op, "anchors_mask"); - int mask_num = static_cast(anchors_mask.size()); + const std::vector &anchors_mask = + paddle::dialect::details::GetVectorAttr(op, "anchor_mask"); + int mask_num = anchors_mask.size(); int class_num = op->attribute("class_num").data(); - PADDLE_ENFORCE_EQ(dim_x.size(), + PADDLE_ENFORCE_EQ(x_shape.size(), 4, phi::errors::InvalidArgument( "Input(X) should be a 4-D tensor. But received " "X dimension size(%s)", - dim_x.size())); + x_shape.size())); PADDLE_ENFORCE_EQ( - dim_gtbox.size(), + box_shape.size(), 3, phi::errors::InvalidArgument("Input(GTBox) should be a 3-D tensor, but " "received gtbox dimension size(%s)", - dim_gtbox.size())); - PADDLE_ENFORCE_EQ( - dim_gtbox[2], - 4, - phi::errors::InvalidArgument("Input(GTBox) dim[2] should be 4", - "But receive dim[2](%s) != 5. ", - dim_gtbox[2])); - PADDLE_ENFORCE_EQ(dim_gtlabel.size(), + box_shape.size())); + PADDLE_ENFORCE_EQ(label_shape.size(), 2, phi::errors::InvalidArgument( "Input(GTLabel) should be a 2-D tensor," "But received Input(GTLabel) dimension size(%s) != 2.", - dim_gtlabel.size())); - infer_context->AddEqualCstr(dim_x[2], dim_x[3]); - infer_context->AddEqualCstr(dim_x[1], mask_num * (5 + class_num)); - infer_context->AddEqualCstr(dim_gtlabel[0], dim_gtbox[0]); - infer_context->AddEqualCstr(dim_gtlabel[1], dim_gtbox[1]); - - const auto &dim_gtscore = - infer_context->GetShapeOrDataForValue(op->operand_source(3)).shape(); - PADDLE_ENFORCE_EQ( - dim_gtscore.size(), - 2, - phi::errors::InvalidArgument("Input(GTScore) should be a 2-D tensor" - "But received GTScore dimension(%s)", - dim_gtbox.size())); - infer_context->AddEqualCstr(dim_gtscore[0], dim_gtbox[0]); - infer_context->AddEqualCstr(dim_gtscore[1], dim_gtbox[1]); + label_shape.size())); + infer_context->AddEqualCstr(box_shape[2], symbol::DimExpr(4)); + infer_context->AddEqualCstr(x_shape[2], x_shape[3]); + infer_context->AddEqualCstr(x_shape[1], + symbol::DimExpr(mask_num * (5 + class_num))); + infer_context->AddEqualCstr(label_shape[0], box_shape[0]); + infer_context->AddEqualCstr(label_shape[1], box_shape[1]); + + if (op->operand_source(3) != nullptr) { + const auto &score_shape = + infer_context->GetShapeOrDataForValue(op->operand_source(3)).shape(); + PADDLE_ENFORCE_EQ( + score_shape.size(), + 2, + phi::errors::InvalidArgument("Input(GTScore) should be a 2-D tensor" + "But received GTScore dimension(%s)", + box_shape.size())); + infer_context->AddEqualCstr(score_shape[0], box_shape[0]); + infer_context->AddEqualCstr(score_shape[1], box_shape[1]); + } - std::vector dim_out = {dim_x[0]}; + std::vector out_shape = {x_shape[0]}; infer_context->SetShapeOrDataForValue( op->result(0), - symbol::ShapeOrDataDimExprs{symbol::TensorShapeOrDataDimExprs(dim_out)}); + symbol::ShapeOrDataDimExprs{ + symbol::TensorShapeOrDataDimExprs(out_shape)}); - std::vector dim_obj_mask = { - dim_x[0], mask_num, dim_x[2], dim_x[3]}; + std::vector obj_mask_shape = { + x_shape[0], symbol::DimExpr(mask_num), x_shape[2], x_shape[3]}; infer_context->SetShapeOrDataForValue( op->result(1), symbol::ShapeOrDataDimExprs{ - symbol::TensorShapeOrDataDimExprs(dim_obj_mask)}); + symbol::TensorShapeOrDataDimExprs(obj_mask_shape)}); - std::vector dim_gt_match_mask = {dim_gtbox[0], dim_gtbox[1]}; + std::vector match_mask_shape = {box_shape[0], box_shape[1]}; infer_context->SetShapeOrDataForValue( op->result(2), symbol::ShapeOrDataDimExprs{ - symbol::TensorShapeOrDataDimExprs(dim_gt_match_mask)}); + symbol::TensorShapeOrDataDimExprs(match_mask_shape)}); return true; } diff --git a/paddle/fluid/pir/dialect/operator/interface/infer_symbolic_shape/same_operands_result.cc b/paddle/fluid/pir/dialect/operator/interface/infer_symbolic_shape/same_operands_result.cc index 22d202775eb17..3eb6b62bdc1fd 100644 --- a/paddle/fluid/pir/dialect/operator/interface/infer_symbolic_shape/same_operands_result.cc +++ b/paddle/fluid/pir/dialect/operator/interface/infer_symbolic_shape/same_operands_result.cc @@ -43,7 +43,6 @@ OP_SAME_OPERANDS_AND_RESULT(Atan) OP_SAME_OPERANDS_AND_RESULT(Atan_) OP_SAME_OPERANDS_AND_RESULT(Atanh) OP_SAME_OPERANDS_AND_RESULT(Atanh_) -OP_SAME_OPERANDS_AND_RESULT(AsStrided) OP_SAME_OPERANDS_AND_RESULT(Bernoulli) OP_SAME_OPERANDS_AND_RESULT(BitwiseNot) OP_SAME_OPERANDS_AND_RESULT(BitwiseNot_) diff --git a/paddle/fluid/pir/dialect/operator/interface/infer_symbolic_shape/same_operands_result.h b/paddle/fluid/pir/dialect/operator/interface/infer_symbolic_shape/same_operands_result.h index ed3565456c841..2e84c7297643f 100644 --- a/paddle/fluid/pir/dialect/operator/interface/infer_symbolic_shape/same_operands_result.h +++ b/paddle/fluid/pir/dialect/operator/interface/infer_symbolic_shape/same_operands_result.h @@ -31,7 +31,6 @@ OP_DECLARE_INFER_SYMBOLIC_SHAPE(Asinh) OP_DECLARE_INFER_SYMBOLIC_SHAPE(Asinh_) OP_DECLARE_INFER_SYMBOLIC_SHAPE(Assign) OP_DECLARE_INFER_SYMBOLIC_SHAPE(Assign_) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(AsStrided) OP_DECLARE_INFER_SYMBOLIC_SHAPE(Atan) OP_DECLARE_INFER_SYMBOLIC_SHAPE(Atan_) OP_DECLARE_INFER_SYMBOLIC_SHAPE(Atanh) diff --git a/paddle/fluid/pir/dialect/operator/interface/infer_symbolic_shape/unary_infer_sym.cc b/paddle/fluid/pir/dialect/operator/interface/infer_symbolic_shape/unary_infer_sym.cc index 56df979e6cd44..6f3cbef0e5346 100644 --- a/paddle/fluid/pir/dialect/operator/interface/infer_symbolic_shape/unary_infer_sym.cc +++ b/paddle/fluid/pir/dialect/operator/interface/infer_symbolic_shape/unary_infer_sym.cc @@ -361,6 +361,25 @@ bool Assign_OpInferSymbolicShape( return AssignOpInferSymbolicShape(op, infer_context); } +bool AsStridedOpInferSymbolicShape( + pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) { + const std::vector &shape = + paddle::dialect::details::GetVectorAttr(op, "dims"); + + int rank = shape.size(); + std::vector out_shape; + for (int i = 0; i < rank; ++i) { + out_shape.push_back(symbol::DimExpr(shape[i])); + } + + infer_context->SetShapeOrDataForValue( + op->result(0), + symbol::ShapeOrDataDimExprs{ + symbol::TensorShapeOrDataDimExprs(out_shape)}); + + return true; +} + bool BipartiteMatchOpInferSymbolicShape( pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) { const auto &dist_mat_shape_or_data = @@ -888,18 +907,77 @@ bool Flatten_OpInferSymbolicShape( bool FoldOpInferSymbolicShape(pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) { - const auto &in_dims = + const auto &x_shape = infer_context->GetShapeOrDataForValue(op->operand_source(0)).shape(); - std::vector out_dims; - out_dims.push_back(in_dims[0]); - std::vector kernel_sizes = + std::vector out_shape; + out_shape.push_back(x_shape[0]); + + const std::vector &output_sizes = + paddle::dialect::details::GetVectorAttr(op, "output_sizes"); + PADDLE_ENFORCE_EQ( + output_sizes.size(), + 2, + common::errors::InvalidArgument( + "It is expected output_size equals to 2, but got size %d", + output_sizes.size())); + infer_context->AddGreatThanOneCstr(output_sizes[0]); + infer_context->AddGreatThanOneCstr(output_sizes[1]); + + const std::vector &kernel_sizes = paddle::dialect::details::GetVectorAttr(op, "kernel_sizes"); - out_dims.push_back(in_dims[1] / (kernel_sizes[0] * kernel_sizes[1])); + const std::vector &dilations = + paddle::dialect::details::GetVectorAttr(op, "dilations"); + const std::vector &strides = + paddle::dialect::details::GetVectorAttr(op, "strides"); + const std::vector &paddings = + paddle::dialect::details::GetVectorAttr(op, "paddings"); + + PADDLE_ENFORCE_EQ( + kernel_sizes.size(), + 2, + common::errors::InvalidArgument( + "It is expected kernel_size equals to 2, but got size %d", + kernel_sizes.size())); + PADDLE_ENFORCE_EQ( + strides.size(), + 2, + common::errors::InvalidArgument( + "It is expected strides_size equals to 2, but got size %d", + strides.size())); + PADDLE_ENFORCE_EQ( + paddings.size(), + 4, + common::errors::InvalidArgument( + "It is expected paddings_size equals to 4, but got size %d", + paddings.size())); + PADDLE_ENFORCE_EQ( + dilations.size(), + 2, + common::errors::InvalidArgument( + "It is expected dilations_size equals to 2, but got size %d", + dilations.size())); + + int blocks_height = (output_sizes[0] + 2 * paddings[0] - + (dilations[0] * (kernel_sizes[0] - 1) + 1)) / + strides[0] + + 1; + int blocks_width = (output_sizes[1] + 2 * paddings[1] - + (dilations[1] * (kernel_sizes[1] - 1) + 1)) / + strides[1] + + 1; + + infer_context->AddEqualCstr((blocks_height * blocks_width), x_shape[2]); + + out_shape.push_back(x_shape[1] / (kernel_sizes[0] * kernel_sizes[1])); + + out_shape.push_back(symbol::DimExpr(output_sizes[0])); + out_shape.push_back(symbol::DimExpr(output_sizes[1])); infer_context->SetShapeOrDataForValue( op->result(0), - symbol::ShapeOrDataDimExprs{symbol::TensorShapeOrDataDimExprs(out_dims)}); + symbol::ShapeOrDataDimExprs{ + symbol::TensorShapeOrDataDimExprs(out_shape)}); return true; } diff --git a/paddle/fluid/pir/dialect/operator/interface/infer_symbolic_shape/unary_infer_sym.h b/paddle/fluid/pir/dialect/operator/interface/infer_symbolic_shape/unary_infer_sym.h index 4390c63f99ec4..c8509042d3764 100644 --- a/paddle/fluid/pir/dialect/operator/interface/infer_symbolic_shape/unary_infer_sym.h +++ b/paddle/fluid/pir/dialect/operator/interface/infer_symbolic_shape/unary_infer_sym.h @@ -27,6 +27,7 @@ OP_DECLARE_INFER_SYMBOLIC_SHAPE(AsComplex) OP_DECLARE_INFER_SYMBOLIC_SHAPE(AsReal) OP_DECLARE_INFER_SYMBOLIC_SHAPE(Assign) OP_DECLARE_INFER_SYMBOLIC_SHAPE(Assign_) +OP_DECLARE_INFER_SYMBOLIC_SHAPE(AsStrided) OP_DECLARE_INFER_SYMBOLIC_SHAPE(BipartiteMatch) OP_DECLARE_INFER_SYMBOLIC_SHAPE(Cast) OP_DECLARE_INFER_SYMBOLIC_SHAPE(Cast_) From ca6bd5d0fb91b555be68217f319d0a09d2cc8185 Mon Sep 17 00:00:00 2001 From: Fripping <15010770306@163.com> Date: Wed, 14 Aug 2024 13:10:07 +0800 Subject: [PATCH 6/8] update -1 sign --- .../unary_infer_sym-checkpoint.cc | 78 +++++++++++++++++-- .../infer_symbolic_shape/unary_infer_sym.cc | 7 +- 2 files changed, 77 insertions(+), 8 deletions(-) diff --git a/paddle/fluid/pir/dialect/operator/interface/infer_symbolic_shape/.ipynb_checkpoints/unary_infer_sym-checkpoint.cc b/paddle/fluid/pir/dialect/operator/interface/infer_symbolic_shape/.ipynb_checkpoints/unary_infer_sym-checkpoint.cc index dace8ed3286d2..8af7fbc36289c 100644 --- a/paddle/fluid/pir/dialect/operator/interface/infer_symbolic_shape/.ipynb_checkpoints/unary_infer_sym-checkpoint.cc +++ b/paddle/fluid/pir/dialect/operator/interface/infer_symbolic_shape/.ipynb_checkpoints/unary_infer_sym-checkpoint.cc @@ -366,10 +366,15 @@ bool AsStridedOpInferSymbolicShape( const std::vector &shape = paddle::dialect::details::GetVectorAttr(op, "dims"); + symbol::DimExpr out_unknown = infer_context->GetNextSymName(); int rank = shape.size(); std::vector out_shape; for (int i = 0; i < rank; ++i) { - out_shape.push_back(symbol::DimExpr(shape[i])); + if (shape[i] == -1) { + out_shape.push_back(out_unknown); + } else { + out_shape.push_back(symbol::DimExpr(shape[i])); + } } infer_context->SetShapeOrDataForValue( @@ -907,18 +912,77 @@ bool Flatten_OpInferSymbolicShape( bool FoldOpInferSymbolicShape(pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) { - const auto &in_dims = + const auto &x_shape = infer_context->GetShapeOrDataForValue(op->operand_source(0)).shape(); - std::vector out_dims; - out_dims.push_back(in_dims[0]); - std::vector kernel_sizes = + std::vector out_shape; + out_shape.push_back(x_shape[0]); + + const std::vector &output_sizes = + paddle::dialect::details::GetVectorAttr(op, "output_sizes"); + PADDLE_ENFORCE_EQ( + output_sizes.size(), + 2, + common::errors::InvalidArgument( + "It is expected output_size equals to 2, but got size %d", + output_sizes.size())); + infer_context->AddGreatThanOneCstr(output_sizes[0]); + infer_context->AddGreatThanOneCstr(output_sizes[1]); + + const std::vector &kernel_sizes = paddle::dialect::details::GetVectorAttr(op, "kernel_sizes"); - out_dims.push_back(in_dims[1] / (kernel_sizes[0] * kernel_sizes[1])); + const std::vector &dilations = + paddle::dialect::details::GetVectorAttr(op, "dilations"); + const std::vector &strides = + paddle::dialect::details::GetVectorAttr(op, "strides"); + const std::vector &paddings = + paddle::dialect::details::GetVectorAttr(op, "paddings"); + + PADDLE_ENFORCE_EQ( + kernel_sizes.size(), + 2, + common::errors::InvalidArgument( + "It is expected kernel_size equals to 2, but got size %d", + kernel_sizes.size())); + PADDLE_ENFORCE_EQ( + strides.size(), + 2, + common::errors::InvalidArgument( + "It is expected strides_size equals to 2, but got size %d", + strides.size())); + PADDLE_ENFORCE_EQ( + paddings.size(), + 4, + common::errors::InvalidArgument( + "It is expected paddings_size equals to 4, but got size %d", + paddings.size())); + PADDLE_ENFORCE_EQ( + dilations.size(), + 2, + common::errors::InvalidArgument( + "It is expected dilations_size equals to 2, but got size %d", + dilations.size())); + + int blocks_height = (output_sizes[0] + 2 * paddings[0] - + (dilations[0] * (kernel_sizes[0] - 1) + 1)) / + strides[0] + + 1; + int blocks_width = (output_sizes[1] + 2 * paddings[1] - + (dilations[1] * (kernel_sizes[1] - 1) + 1)) / + strides[1] + + 1; + + infer_context->AddEqualCstr((blocks_height * blocks_width), x_shape[2]); + + out_shape.push_back(x_shape[1] / (kernel_sizes[0] * kernel_sizes[1])); + + out_shape.push_back(symbol::DimExpr(output_sizes[0])); + out_shape.push_back(symbol::DimExpr(output_sizes[1])); infer_context->SetShapeOrDataForValue( op->result(0), - symbol::ShapeOrDataDimExprs{symbol::TensorShapeOrDataDimExprs(out_dims)}); + symbol::ShapeOrDataDimExprs{ + symbol::TensorShapeOrDataDimExprs(out_shape)}); return true; } diff --git a/paddle/fluid/pir/dialect/operator/interface/infer_symbolic_shape/unary_infer_sym.cc b/paddle/fluid/pir/dialect/operator/interface/infer_symbolic_shape/unary_infer_sym.cc index 6f3cbef0e5346..5497048872fe4 100644 --- a/paddle/fluid/pir/dialect/operator/interface/infer_symbolic_shape/unary_infer_sym.cc +++ b/paddle/fluid/pir/dialect/operator/interface/infer_symbolic_shape/unary_infer_sym.cc @@ -366,10 +366,15 @@ bool AsStridedOpInferSymbolicShape( const std::vector &shape = paddle::dialect::details::GetVectorAttr(op, "dims"); + symbol::DimExpr out_unknown = infer_context->GetNextSymName(); int rank = shape.size(); std::vector out_shape; for (int i = 0; i < rank; ++i) { - out_shape.push_back(symbol::DimExpr(shape[i])); + if (shape[i] == -1) { + out_shape.push_back(out_unknown); + } else { + out_shape.push_back(symbol::DimExpr(shape[i])); + } } infer_context->SetShapeOrDataForValue( From 5a28eeac55743ac9c849a09e558e4e61bed45f09 Mon Sep 17 00:00:00 2001 From: Fripping <124574028+Fripping@users.noreply.github.com> Date: Wed, 14 Aug 2024 13:10:57 +0800 Subject: [PATCH 7/8] Delete paddle/fluid/pir/dialect/operator/interface/infer_symbolic_shape/.ipynb_checkpoints directory --- .../multiary_infer_sym-checkpoint.cc | 1307 ---------- .../same_operands_result-checkpoint.cc | 214 -- .../same_operands_result-checkpoint.h | 164 -- .../unary_infer_sym-checkpoint.cc | 2221 ----------------- .../unary_infer_sym-checkpoint.h | 96 - 5 files changed, 4002 deletions(-) delete mode 100644 paddle/fluid/pir/dialect/operator/interface/infer_symbolic_shape/.ipynb_checkpoints/multiary_infer_sym-checkpoint.cc delete mode 100644 paddle/fluid/pir/dialect/operator/interface/infer_symbolic_shape/.ipynb_checkpoints/same_operands_result-checkpoint.cc delete mode 100644 paddle/fluid/pir/dialect/operator/interface/infer_symbolic_shape/.ipynb_checkpoints/same_operands_result-checkpoint.h delete mode 100644 paddle/fluid/pir/dialect/operator/interface/infer_symbolic_shape/.ipynb_checkpoints/unary_infer_sym-checkpoint.cc delete mode 100644 paddle/fluid/pir/dialect/operator/interface/infer_symbolic_shape/.ipynb_checkpoints/unary_infer_sym-checkpoint.h diff --git a/paddle/fluid/pir/dialect/operator/interface/infer_symbolic_shape/.ipynb_checkpoints/multiary_infer_sym-checkpoint.cc b/paddle/fluid/pir/dialect/operator/interface/infer_symbolic_shape/.ipynb_checkpoints/multiary_infer_sym-checkpoint.cc deleted file mode 100644 index 8045ab2210f71..0000000000000 --- a/paddle/fluid/pir/dialect/operator/interface/infer_symbolic_shape/.ipynb_checkpoints/multiary_infer_sym-checkpoint.cc +++ /dev/null @@ -1,1307 +0,0 @@ -// Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#include "paddle/common/ddim.h" -#include "paddle/common/layout.h" -#include "paddle/fluid/pir/dialect/operator/interface/infer_symbolic_shape/infer_sym_slice_utils.h" -#include "paddle/fluid/pir/dialect/operator/interface/infer_symbolic_shape/infer_sym_utils.h" -#include "paddle/fluid/pir/dialect/operator/interface/infer_symbolic_shape/multiary_infer_sym.h" -#include "paddle/fluid/pir/dialect/operator/ir/op_attribute.h" - -namespace paddle::dialect { - -bool AccuracyOpInferSymbolicShape( - pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) { - const symbol::ShapeOrDataDimExprs &out_shape = - infer_context->GetShapeOrDataForValue(op->operand_source(0)); - const symbol::ShapeOrDataDimExprs &label_shape = - infer_context->GetShapeOrDataForValue(op->operand_source(2)); - - // Assume indices has same shape as inference, because - // it's the output of topk. - PADDLE_ENFORCE_EQ( - label_shape.shape().size(), - 2UL, - common::errors::InvalidArgument( - "ShapeError: label's dimensions of AccuracyOp must be 2. " - "But received label's dimensions = %d", - label_shape.shape().size())); - - infer_context->AddEqualCstr(label_shape.shape()[1], symbol::DimExpr{1}); - infer_context->AddEqualCstr(out_shape.shape()[0], label_shape.shape()[0]); - - std::vector accuracy_shape = {}; - infer_context->SetShapeOrDataForValue( - op->result(0), - symbol::ShapeOrDataDimExprs{ - symbol::TensorShapeOrDataDimExprs(accuracy_shape)}); - - std::vector correct_shape = {}; - infer_context->SetShapeOrDataForValue( - op->result(1), - symbol::ShapeOrDataDimExprs{ - symbol::TensorShapeOrDataDimExprs(correct_shape)}); - - std::vector total_shape = {}; - infer_context->SetShapeOrDataForValue( - op->result(2), - symbol::ShapeOrDataDimExprs{ - symbol::TensorShapeOrDataDimExprs(total_shape)}); - - return true; -} - -bool AddNOpInferSymbolicShape(pir::Operation *op, - pir::InferSymbolicShapeContext *infer_context) { - const auto &input_list_shape = - infer_context->GetShapeOrDataForValue(op->operand_source(0)); - PADDLE_ENFORCE_EQ( - input_list_shape.isa(), - true, - common::errors::InvalidArgument( - "The type of inputs shape should be TensorListShapeOrDataDimExprs")); - const auto &inputs_shape = - input_list_shape.dyn_cast(); - PADDLE_ENFORCE_GT( - inputs_shape.size(), - 0, - common::errors::InvalidArgument( - "The input tensor X's dimensions of AddNOp " - "should be larger than 0. But received X's dimensions %d.", - inputs_shape.size())); - symbol::TensorShapeOrDataDimExprs candidate_shape = inputs_shape.front(); - for (size_t i = 1; i < inputs_shape.size(); ++i) { - // 0D tensor - if (inputs_shape[i].shape().size() == 0) { - continue; - } - if (candidate_shape.shape().size() == 0) { - candidate_shape = inputs_shape[i]; - continue; - } - for (size_t j = 0; j < candidate_shape.shape().size(); ++j) { - infer_context->AddEqualCstr(candidate_shape.shape()[j], - inputs_shape[i].shape()[j]); - } - } - infer_context->SetShapeOrDataForValue( - op->result(0), symbol::ShapeOrDataDimExprs{candidate_shape}); - - return true; -} - -bool AddmmOpInferSymbolicShape(pir::Operation *op, - pir::InferSymbolicShapeContext *infer_context) { - const auto &input_shape = - infer_context->GetShapeOrDataForValue(op->operand_source(0)); - const auto &x_shape = - infer_context->GetShapeOrDataForValue(op->operand_source(1)); - const auto &y_shape = - infer_context->GetShapeOrDataForValue(op->operand_source(2)); - - auto ndim_input = input_shape.shape().size(); - auto ndim_x = x_shape.shape().size(); - auto ndim_y = y_shape.shape().size(); - - PADDLE_ENFORCE_EQ(ndim_input == 2 || ndim_input == 1, - true, - common::errors::InvalidArgument( - "The input tensor input's dimension must be 2 or 1. " - "But received input's dimension = [%d].", - ndim_input)); - PADDLE_ENFORCE_EQ(ndim_x, - 2, - common::errors::InvalidArgument( - "The input tensor x's dimension must be 2. " - "But received x's dimension = [%d].", - ndim_x)); - PADDLE_ENFORCE_EQ(ndim_y, - 2, - common::errors::InvalidArgument( - "The input tensor y's dimension must be 2. " - "But received y's dimension = [%d].", - ndim_y)); - - std::vector output_shape; - output_shape.push_back(x_shape.shape()[0]); - output_shape.push_back(y_shape.shape()[1]); - - infer_context->SetShapeOrDataForValue( - op->result(0), - symbol::ShapeOrDataDimExprs{ - symbol::TensorShapeOrDataDimExprs(output_shape)}); - - infer_context->AddEqualCstr(x_shape.shape()[1], y_shape.shape()[0]); - - if (ndim_input == 2) { - infer_context->AddBroadcastableCstr(input_shape.shape()[0], - x_shape.shape()[0]); - infer_context->AddBroadcastableCstr(input_shape.shape()[1], - y_shape.shape()[1]); - } else if (ndim_input == 1) { - infer_context->AddBroadcastableCstr(input_shape.shape()[0], - y_shape.shape()[1]); - } - - return true; -} - -bool Addmm_OpInferSymbolicShape(pir::Operation *op, - pir::InferSymbolicShapeContext *infer_context) { - return AddmmOpInferSymbolicShape(op, infer_context); -} - -bool AucOpInferSymbolicShape(pir::Operation *op, - pir::InferSymbolicShapeContext *infer_context) { - const auto &predict_shape = - infer_context->GetShapeOrDataForValue(op->operand_source(0)); - const auto &label_shape = - infer_context->GetShapeOrDataForValue(op->operand_source(1)); - - PADDLE_ENFORCE_GE( - predict_shape.shape().size(), - 2, - common::errors::InvalidArgument( - "The Input(Predict) has not been initialized properly. The " - "shape of Input(Predict) = [%s], the shape size must be " - "greater_equal 2.", - predict_shape.shape())); - - const auto &predict_height = predict_shape.shape()[0]; - const auto &label_height = label_shape.shape()[0]; - - infer_context->AddEqualCstr(predict_height, label_height); - - int num_thresholds = - op->attribute("num_thresholds").data(); - int slide_steps = op->attribute("slide_steps").data(); - - int num_pred_buckets = num_thresholds + 1; - - PADDLE_ENFORCE_GE( - num_pred_buckets, - 1, - common::errors::InvalidArgument("num_thresholds must larger than 1")); - PADDLE_ENFORCE_GE( - slide_steps, - 0, - common::errors::InvalidArgument("slide_steps must be natural number")); - - infer_context->SetShapeOrDataForValue( - op->result(0), - symbol::ShapeOrDataDimExprs{ - symbol::TensorShapeOrDataDimExprs(std::vector{})}); - - if (slide_steps) { - infer_context->SetShapeOrDataForValue( - op->result(1), - symbol::ShapeOrDataDimExprs{ - symbol::TensorShapeOrDataDimExprs(std::vector{ - (1 + slide_steps) * num_pred_buckets + 1})}); - infer_context->SetShapeOrDataForValue( - op->result(2), - symbol::ShapeOrDataDimExprs{ - symbol::TensorShapeOrDataDimExprs(std::vector{ - (1 + slide_steps) * num_pred_buckets + 1})}); - } else { - infer_context->SetShapeOrDataForValue( - op->result(1), - symbol::ShapeOrDataDimExprs{symbol::TensorShapeOrDataDimExprs( - std::vector{1, num_pred_buckets})}); - infer_context->SetShapeOrDataForValue( - op->result(2), - symbol::ShapeOrDataDimExprs{symbol::TensorShapeOrDataDimExprs( - std::vector{1, num_pred_buckets})}); - } - - return true; -} - -bool BatchNormOpInferSymbolicShape( - pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) { - const auto &x_shape_or_data = - infer_context->GetShapeOrDataForValue(op->operand_source(0)); - const auto &scale_shape_or_data = - infer_context->GetShapeOrDataForValue(op->operand_source(3)); - const auto &bias_shape_or_data = - infer_context->GetShapeOrDataForValue(op->operand_source(4)); - - std::vector x_dims = x_shape_or_data.shape(); - - std::string data_layout_str = - op->attribute("data_format").AsString(); - const DataLayout data_layout = common::StringToDataLayout(data_layout_str); - - PADDLE_ENFORCE_GE( - x_dims.size(), - 2, - phi::errors::InvalidArgument( - "ShapeError: the dimension of input " - "X must greater than or equal to 2. But received: the shape of input " - "X = [%s], the dimension of input X =[%d]", - x_dims, - x_dims.size())); - PADDLE_ENFORCE_LE( - x_dims.size(), - 5, - phi::errors::InvalidArgument( - "ShapeError: the dimension of input X " - "must smaller than or equal to 5. But received: the shape of input X " - "= [%s], the dimension of input X = [%d]", - x_dims, - x_dims.size())); - - symbol::DimExpr C = (data_layout == DataLayout::kNCHW) - ? x_dims[1] - : x_dims[x_dims.size() - 1]; - - if (!scale_shape_or_data.isa()) { - std::vector scale_dims = scale_shape_or_data.shape(); - PADDLE_ENFORCE_EQ(scale_dims.size(), - 1UL, - phi::errors::InvalidArgument( - "ShapeError: the dimension of scale must equal to 1." - "But received: the dimension of scale is [%d]", - scale_dims.size())); - infer_context->AddEqualCstr(scale_dims[0], C); - } - - if (!bias_shape_or_data.isa()) { - std::vector bias_dims = bias_shape_or_data.shape(); - PADDLE_ENFORCE_EQ(bias_dims.size(), - 1UL, - phi::errors::InvalidArgument( - "ShapeError: the dimension of bias must equal to 1." - "But received: the dimension of bias is [%d]", - bias_dims.size())); - infer_context->AddEqualCstr(bias_dims[0], C); - } - - // Set output shapes - infer_context->SetShapeOrDataForValue( - op->result(0), - symbol::ShapeOrDataDimExprs{symbol::TensorShapeOrDataDimExprs(x_dims)}); - - std::vector param_dims = {C}; - infer_context->SetShapeOrDataForValue( - op->result(1), - symbol::ShapeOrDataDimExprs{ - symbol::TensorShapeOrDataDimExprs(param_dims)}); - infer_context->SetShapeOrDataForValue( - op->result(2), - symbol::ShapeOrDataDimExprs{ - symbol::TensorShapeOrDataDimExprs(param_dims)}); - - if (op->result(3) && op->result(3).type()) { - infer_context->SetShapeOrDataForValue( - op->result(3), - symbol::ShapeOrDataDimExprs{ - symbol::TensorShapeOrDataDimExprs(param_dims)}); - } - if (op->result(4) && op->result(4).type()) { - infer_context->SetShapeOrDataForValue( - op->result(4), - symbol::ShapeOrDataDimExprs{ - symbol::TensorShapeOrDataDimExprs(param_dims)}); - } - if (op->result(5) && op->result(5).type()) { - std::vector reserve_space_dims{ - symbol::DimExpr{infer_context->GetNextSymName()}}; - infer_context->SetShapeOrDataForValue( - op->result(5), - symbol::ShapeOrDataDimExprs{ - symbol::TensorShapeOrDataDimExprs(reserve_space_dims)}); - } - - return true; -} - -bool BatchNorm_OpInferSymbolicShape( - pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) { - return BatchNormOpInferSymbolicShape(op, infer_context); -} - -bool BicubicInterpOpInferSymbolicShape( - pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) { - const symbol::ShapeOrDataDimExprs &x = - infer_context->GetShapeOrDataForValue(op->operand_source(0)); - - const auto &attributes = op->attributes(); - - const std::string data_format = - attributes.at("data_format").dyn_cast().AsString(); - int out_d = attributes.at("out_d").dyn_cast().data(); - int out_h = attributes.at("out_h").dyn_cast().data(); - int out_w = attributes.at("out_w").dyn_cast().data(); - const std::vector &scale = details::GetVectorAttr(op, "scale"); - - const bool has_size_tensor = [&] { - pir::Value size_tensor = op->operand_source(2); - if (!size_tensor || !size_tensor.type()) { - return false; - } - const auto &list_size_tensor = - size_tensor.type().dyn_cast(); - return list_size_tensor && !list_size_tensor.empty(); - }(); - auto GetSizeTensorDataExpr = - [&](pir::Value value) -> std::vector { - const symbol::ShapeOrDataDimExprs &size_tensor_shape = - infer_context->GetShapeOrDataForValue(value); - PADDLE_ENFORCE_EQ( - size_tensor_shape.isa(), - true, - common::errors::InvalidArgument( - "The size_tensor of Interpolation should be type of " - "TensorListShapeOrDataDimExprs")); - return details::GetOrCreateExprVecFromData(size_tensor_shape, - infer_context); - }; - auto GetOutSizeDataExpr = - [&](pir::Value value) -> std::vector { - const symbol::ShapeOrDataDimExprs &out_size_tensor_shape = - infer_context->GetShapeOrDataForValue(value); - return details::GetOrCreateExprVecFromData(out_size_tensor_shape, - infer_context); - }; - auto GetOutDimByScale = [&](const symbol::DimExpr &in_dim, - float scale) -> symbol::DimExpr { - PADDLE_ENFORCE_GT(scale, - 0, - common::errors::InvalidArgument( - "The scale in Attr(scale) of Operator(interpolate) " - "should be greater than 0, but received value is %d.", - scale)); - if (in_dim.isa()) { - return symbol::DimExpr{ - static_cast(in_dim.dyn_cast() * scale)}; - } - return symbol::DimExpr{infer_context->GetNextSymName()}; - }; - - std::vector size_tensor; - if (out_d != -1) size_tensor.push_back(out_d); - if (out_h != -1) size_tensor.push_back(out_h); - if (out_w != -1) size_tensor.push_back(out_w); - - const DataLayout data_layout = common::StringToDataLayout(data_format); - - if (x.shape().size() == 3) { - // shape check for 1D interpolate for input tensor shape NCHW - if (!size_tensor.empty()) { - // top priority size - std::vector dim_out; - if (data_layout == DataLayout::kNCHW) { - dim_out = {x.shape()[0], x.shape()[1], symbol::DimExpr{out_w}}; - } else { - dim_out = {x.shape()[0], symbol::DimExpr{out_w}, x.shape()[2]}; - } - - symbol::ShapeOrDataDimExprs shape_data{ - symbol::TensorShapeOrDataDimExprs(dim_out)}; - - pir::Value res = op->result(0); - infer_context->SetShapeOrDataForValue(res, shape_data); - return true; - } - - symbol::DimExpr out_w_tmp{0}; - const auto &next_sym = infer_context->GetNextSymName(); - out_w_tmp = symbol::DimExpr(next_sym); - - std::vector dim_out; - if (data_layout == DataLayout::kNCHW) { - dim_out = {x.shape()[0], x.shape()[1], out_w_tmp}; - } else { - dim_out = {x.shape()[0], out_w_tmp, x.shape()[2]}; - } - - symbol::ShapeOrDataDimExprs shape_data{ - symbol::TensorShapeOrDataDimExprs(dim_out)}; - - pir::Value res = op->result(0); - infer_context->SetShapeOrDataForValue(res, shape_data); - return true; - } else if (x.shape().size() == 4) { - // shape check for 2D interpolate for input tensor shape NCHW - auto GetOutHW = [&]() -> std::tuple { - // top priority size - if (has_size_tensor) { - const auto &size_tensor_list_shape = - GetSizeTensorDataExpr(op->operand_source(2)); - PADDLE_ENFORCE_EQ(size_tensor_list_shape.size(), - 2, - common::errors::InvalidArgument( - "The size of size_tensor list should be 2.")); - return std::make_tuple(size_tensor_list_shape.at(0), - size_tensor_list_shape.at(1)); - } - // has out_size tensor - if (op->operand_source(1)) { - const auto &out_size_shape_or_data = - infer_context->GetShapeOrDataForValue(op->operand_source(1)); - PADDLE_ENFORCE_EQ( - out_size_shape_or_data.shape().size(), - 1, - common::errors::InvalidArgument( - "The rank of input out_size tensor should be 1.")); - infer_context->AddEqualCstr(out_size_shape_or_data.shape()[0], - symbol::DimExpr{2}); - const auto &out_size_data = GetOutSizeDataExpr(op->operand_source(1)); - return std::make_tuple(symbol::DimExpr{out_size_data[0]}, - symbol::DimExpr{out_size_data[1]}); - } - // has scale - if (scale.size() == 2) { - float scale_h = scale[0]; - float scale_w = scale[1]; - const auto &in_h = - data_layout == DataLayout::kNCHW ? x.shape()[2] : x.shape()[1]; - const auto &in_w = - data_layout == DataLayout::kNCHW ? x.shape()[3] : x.shape()[2]; - return std::make_tuple(GetOutDimByScale(in_h, scale_h), - GetOutDimByScale(in_w, scale_w)); - } - - return std::make_tuple(symbol::DimExpr{out_h}, symbol::DimExpr{out_w}); - }; - - const std::vector dim_out = [&] { - const auto &[out_h_sym, out_w_sym] = GetOutHW(); - if (data_layout == DataLayout::kNCHW) { - return std::vector{ - x.shape()[0], x.shape()[1], out_h_sym, out_w_sym}; - } else { - return std::vector{ - x.shape()[0], out_h_sym, out_w_sym, x.shape()[3]}; - } - }(); - - symbol::ShapeOrDataDimExprs shape_data{ - symbol::TensorShapeOrDataDimExprs(dim_out)}; - infer_context->SetShapeOrDataForValue(op->result(0), shape_data); - - return true; - } else if (x.shape().size() == 5) { - auto GetOutDHW = - [&]() -> std::tuple { - // top priority size - if (has_size_tensor) { - const auto &size_tensor_list_shape = - GetSizeTensorDataExpr(op->operand_source(2)); - PADDLE_ENFORCE_EQ(size_tensor_list_shape.size(), - 3, - common::errors::InvalidArgument( - "The size of size_tensor list should be 3.")); - return std::make_tuple(size_tensor_list_shape.at(0), - size_tensor_list_shape.at(1), - size_tensor_list_shape.at(2)); - } - // has out_size tensor - if (op->operand_source(1)) { - const auto &out_size_data = GetOutSizeDataExpr(op->operand_source(1)); - return std::make_tuple(symbol::DimExpr{out_size_data[0]}, - symbol::DimExpr{out_size_data[1]}, - symbol::DimExpr{out_size_data[2]}); - } - // has scale - if (scale.size() == 3) { - float scale_d = scale[0]; - float scale_h = scale[1]; - float scale_w = scale[2]; - const auto &in_d = - data_layout == DataLayout::kNCHW ? x.shape()[2] : x.shape()[1]; - const auto &in_h = - data_layout == DataLayout::kNCHW ? x.shape()[3] : x.shape()[2]; - const auto &in_w = - data_layout == DataLayout::kNCHW ? x.shape()[4] : x.shape()[3]; - return std::make_tuple(GetOutDimByScale(in_d, scale_d), - GetOutDimByScale(in_h, scale_h), - GetOutDimByScale(in_w, scale_w)); - } - - return std::make_tuple(symbol::DimExpr{out_d}, - symbol::DimExpr{out_h}, - symbol::DimExpr{out_w}); - }; - - const std::vector dim_out = [&] { - const auto &[out_d_sym, out_h_sym, out_w_sym] = GetOutDHW(); - if (data_layout == DataLayout::kNCHW) { - return std::vector{ - x.shape()[0], x.shape()[1], out_d_sym, out_h_sym, out_w_sym}; - } else { - return std::vector{ - x.shape()[0], out_d_sym, out_h_sym, out_w_sym, x.shape()[4]}; - } - }(); - - symbol::ShapeOrDataDimExprs shape_data{ - symbol::TensorShapeOrDataDimExprs(dim_out)}; - infer_context->SetShapeOrDataForValue(op->result(0), shape_data); - return true; - } else { - PADDLE_THROW( - common::errors::Fatal("Input(X) dimension must be 3, 4 or 5!")); - } - - return true; -} - -bool BilinearOpInferSymbolicShape( - pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) { - const auto &x_shape = - infer_context->GetShapeOrDataForValue(op->operand_source(0)); - const auto &y_shape = - infer_context->GetShapeOrDataForValue(op->operand_source(1)); - const auto &weight_shape = - infer_context->GetShapeOrDataForValue(op->operand_source(2)); - - PADDLE_ENFORCE_EQ( - x_shape.shape().size(), - 2UL, - common::errors::InvalidArgument("The input(X) must be a 2D Tensor.")); - PADDLE_ENFORCE_EQ( - y_shape.shape().size(), - 2UL, - common::errors::InvalidArgument("The input(Y) must be a 2D Tensor.")); - PADDLE_ENFORCE_EQ( - weight_shape.shape().size(), - 3UL, - common::errors::InvalidArgument( - "Expected the input(Weight) is a 3D tensor. But received %dD tensor.", - weight_shape.shape().size())); - - infer_context->AddEqualCstr(x_shape.shape()[0], y_shape.shape()[0]); - - infer_context->AddEqualCstr(x_shape.shape()[1], weight_shape.shape()[1]); - infer_context->AddEqualCstr(y_shape.shape()[1], weight_shape.shape()[2]); - - if (op->operand_source(3)) { // has bias - const auto &bias_shape = - infer_context->GetShapeOrDataForValue(op->operand_source(3)); - PADDLE_ENFORCE_EQ(bias_shape.shape().size(), - 2UL, - common::errors::InvalidArgument( - "The Input(Bias) must be a 2-D tensor with " - "the 2nd dimension fixed to 1 (a row vector).")); - infer_context->AddEqualCstr(bias_shape.shape()[0], symbol::DimExpr{1}); - infer_context->AddEqualCstr(bias_shape.shape()[1], weight_shape.shape()[0]); - } - - infer_context->SetShapeOrDataForValue( - op->result(0), - symbol::ShapeOrDataDimExprs{symbol::TensorShapeOrDataDimExprs( - {x_shape.shape()[0], weight_shape.shape()[0]})}); - - return true; -} - -bool BilinearInterpOpInferSymbolicShape( - pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) { - return BicubicInterpOpInferSymbolicShape(op, infer_context); -} - -bool CrossEntropyWithSoftmaxOpInferSymbolicShape( - pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) { - const symbol::ShapeOrDataDimExprs &input_shape = - infer_context->GetShapeOrDataForValue(op->operand_source(0)); - const symbol::ShapeOrDataDimExprs &index_shape = - infer_context->GetShapeOrDataForValue(op->operand_source(1)); - - const auto &input_dim = input_shape.shape(); - const auto &index_dim = index_shape.shape(); - const auto &attributes = op->attributes(); - int axis = attributes.at("axis").dyn_cast().data(); - if (axis < 0) axis += input_shape.shape().size(); - bool soft_label = - attributes.at("soft_label").dyn_cast().data(); - PADDLE_ENFORCE(!soft_label || input_dim.size() == index_dim.size(), - common::errors::InvalidArgument( - "The input and index should have the same rank when " - "soft_label is true. But received input rank(%d) and " - "index rank(%d)", - input_dim.size(), - index_dim.size())); - - auto softmax_dim = index_dim; - auto out_dim = index_dim; - - if (index_dim.size() == input_dim.size()) { - if (soft_label) { - out_dim[axis] = 1; - } - softmax_dim[axis] = input_dim[axis]; - } else { - softmax_dim.insert(softmax_dim.begin() + axis, input_dim[axis]); - if (soft_label) { - out_dim.insert(out_dim.begin() + axis, 1); - } - } - - infer_context->SetShapeOrDataForValue( - op->result(0), symbol::TensorShapeOrDataDimExprs(softmax_dim)); - infer_context->SetShapeOrDataForValue( - op->result(1), symbol::TensorShapeOrDataDimExprs(out_dim)); - - return true; -} - -bool CrossEntropyWithSoftmax_OpInferSymbolicShape( - pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) { - return CrossEntropyWithSoftmaxOpInferSymbolicShape(op, infer_context); -} - -bool ConcatOpInferSymbolicShape(pir::Operation *op, - pir::InferSymbolicShapeContext *infer_context) { - const auto &axis_expr = - infer_context->GetShapeOrDataForValue(op->operand_source(1)); - if (!axis_expr.data() || !axis_expr.data()->at(0).isa()) { - pir::Value res = op->result(0); - infer_context->SetSymbolForValueByStaticShape(res); - return true; - } - - pir::Value operand_source = op->operand_source(0); - const auto &shape_data_list = - infer_context->GetShapeOrDataForValue(operand_source) - .dyn_cast(); - - size_t rank = shape_data_list.at(0).shape().size(); - const int64_t axis = [&] { - int64_t axis = axis_expr.data()->at(0).dyn_cast(); - return axis >= 0 ? axis : std::max(int64_t(0), int64_t(axis + rank)); - }(); - - if (shape_data_list.at(0).data().has_value()) { - if (rank == 1) { - const auto &s_or_d = - infer_context->GetShapeOrDataForValue(operand_source); - ExprVec data = details::GetExprVecFromData(s_or_d); - - const std::vector shape{std::int64_t(data.size())}; - symbol::ShapeOrDataDimExprs shape_data{ - symbol::TensorShapeOrDataDimExprs(shape, data)}; - pir::Value res = op->result(0); - infer_context->SetShapeOrDataForValue(res, shape_data); - - return true; - } else { - PADDLE_THROW(common::errors::Unimplemented( - op->name() + - " 's InferSymbolicShape can NOT deal with rank > 1 now.")); - } - std::vector data; - data.reserve(shape_data_list.size()); - for (auto &data_elem : shape_data_list) { - data.push_back(data_elem.data().value().at(0)); - } - const std::vector shape{std::int64_t(data.size())}; - symbol::ShapeOrDataDimExprs shape_data{ - symbol::TensorShapeOrDataDimExprs(shape, data)}; - pir::Value res = op->result(0); - infer_context->SetShapeOrDataForValue(res, shape_data); - - return true; - } - - const std::vector &out_dims = [&] { - std::vector out_dims = shape_data_list.at(0).shape(); - for (size_t i = 0; i < rank; ++i) { - if (i != static_cast(axis)) { - details::BuildCstrEqForTensorListAlongAxis( - infer_context, shape_data_list, i); - continue; - } - for (size_t j = 1; j < shape_data_list.size(); ++j) { - out_dims.at(axis) = - out_dims.at(axis) + shape_data_list.at(j).shape().at(axis); - } - } - return out_dims; - }(); - - symbol::ShapeOrDataDimExprs shape_data{ - symbol::TensorShapeOrDataDimExprs(out_dims)}; - - pir::Value res = op->result(0); - infer_context->SetShapeOrDataForValue(res, shape_data); - - return true; -} - -bool FullWithTensorOpInferSymbolicShape( - pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) { - pir::Value operand_source = op->operand_source(1); - const symbol::ShapeOrDataDimExprs &operand_shape_or_data = - infer_context->GetShapeOrDataForValue(operand_source); - - const auto &out_shape = operand_shape_or_data.data().has_value() - ? operand_shape_or_data.data().value() - : operand_shape_or_data.shape(); - - infer_context->SetShapeOrDataForValue( - op->result(0), symbol::TensorShapeOrDataDimExprs(out_shape)); - return true; -} - -bool FlashAttnOpInferSymbolicShape( - pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) { - pir::Value operand_source = op->operand_source(0); - const symbol::ShapeOrDataDimExprs &q = - infer_context->GetShapeOrDataForValue(operand_source); - - const symbol::ShapeOrDataDimExprs &k = - infer_context->GetShapeOrDataForValue(op->operand_source(1)); - - const symbol::ShapeOrDataDimExprs &v = - infer_context->GetShapeOrDataForValue(op->operand_source(2)); - - PADDLE_ENFORCE_EQ(q.shape().size(), - 4, - common::errors::InvalidArgument( - "flash_attn receive input with dim " - "[batch_size, seq_len, num_heads, head_dim]")); - - infer_context->AddEqualCstr(q.shape()[0], k.shape()[0]); - infer_context->AddEqualCstr(q.shape()[0], v.shape()[0]); - infer_context->AddEqualCstr(k.shape()[1], v.shape()[1]); - - if (op->operand_source(4)) { - const symbol::ShapeOrDataDimExprs &attn_mask = - infer_context->GetShapeOrDataForValue(op->operand_source(4)); - infer_context->AddEqualCstr(attn_mask.shape()[0], q.shape()[0]); - infer_context->AddEqualCstr(attn_mask.shape()[2], q.shape()[1]); - infer_context->AddEqualCstr(attn_mask.shape()[3], k.shape()[1]); - } - - std::vector out_shape = q.shape(); - - out_shape.back() = v.shape().back(); - - infer_context->SetShapeOrDataForValue( - op->result(0), symbol::TensorShapeOrDataDimExprs(out_shape)); - - // GPU has round for seqlen, but XPU has not. Here we align with the GPU - // version. - auto round_multiple = [](symbol::DimExpr x) { - auto m = symbol::DimExpr{128}; - auto m_minus_one = symbol::DimExpr{127}; - return (x + m_minus_one) / m * m; - }; - auto batch_size_expr = q.shape()[0]; - auto num_heads_expr = q.shape()[2]; - auto seqlen_q_rounded_expr = round_multiple(q.shape()[1]); - auto seqlen_k_rounded_expr = round_multiple(k.shape()[1]); - if (op->result(1)) { - std::vector softmax_shape{batch_size_expr, - num_heads_expr, - seqlen_q_rounded_expr, - seqlen_k_rounded_expr}; - infer_context->SetShapeOrDataForValue( - op->result(1), symbol::TensorShapeOrDataDimExprs(softmax_shape)); - } - if (op->result(2)) { - std::vector softmax_lse_shape{ - batch_size_expr, num_heads_expr, seqlen_q_rounded_expr}; - infer_context->SetShapeOrDataForValue( - op->result(2), symbol::TensorShapeOrDataDimExprs(softmax_lse_shape)); - } - if (op->result(3)) { - std::vector seed_offset_shape{symbol::DimExpr{2}}; - infer_context->SetShapeOrDataForValue( - op->result(3), symbol::TensorShapeOrDataDimExprs(out_shape)); - } - return true; -} - -bool GroupNormOpInferSymbolicShape( - pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) { - const symbol::ShapeOrDataDimExprs &x_shape = - infer_context->GetShapeOrDataForValue(op->operand_source(0)); - - infer_context->SetShapeOrDataForValue(op->result(0), x_shape); - - const symbol::DimExpr &batch_size = x_shape.shape()[0]; - int groups = op->attribute("groups").data(); - symbol::TensorShapeOrDataDimExprs mean_shape( - std::vector{batch_size, groups}); - if (op->result(1)) { - infer_context->SetShapeOrDataForValue(op->result(1), mean_shape); - } - if (op->result(2)) { - infer_context->SetShapeOrDataForValue(op->result(2), mean_shape); - } - return true; -} - -bool LerpOpInferSymbolicShape(pir::Operation *op, - pir::InferSymbolicShapeContext *infer_context) { - const auto &x_shape_or_data = - infer_context->GetShapeOrDataForValue(op->operand_source(0)); - const auto &y_shape_or_data = - infer_context->GetShapeOrDataForValue(op->operand_source(1)); - const auto &w_shape_or_data = - infer_context->GetShapeOrDataForValue(op->operand_source(2)); - const auto &x_shape = x_shape_or_data.shape(); - const auto &y_shape = y_shape_or_data.shape(); - const auto &w_shape = w_shape_or_data.shape(); - size_t x_ndims = x_shape.size(); - size_t y_ndims = y_shape.size(); - size_t w_ndims = w_shape.size(); - std::vector out1_shape; - std::vector out2_shape; - if (x_ndims > y_ndims) { - out1_shape.assign(x_shape.begin(), x_shape.end()); - } else if (x_ndims < y_ndims) { - out1_shape.assign(y_shape.begin(), y_shape.end()); - } else { - symbol::DimExprBuilder builder; - for (size_t i = 0; i < x_ndims; ++i) { - out1_shape.emplace_back(builder.Broadcast(x_shape[i], y_shape[i])); - infer_context->AddBroadcastableCstr(x_shape[i], y_shape[i]); - } - } - size_t out1_ndims = out1_shape.size(); - if (w_ndims > out1_ndims) { - out2_shape.assign(w_shape.begin(), w_shape.end()); - } else if (w_ndims < out1_ndims) { - out2_shape.assign(out1_shape.begin(), out1_shape.end()); - } else { - symbol::DimExprBuilder builder; - for (size_t i = 0; i < w_ndims; ++i) { - out2_shape.emplace_back(builder.Broadcast(w_shape[i], out1_shape[i])); - infer_context->AddBroadcastableCstr(w_shape[i], out1_shape[i]); - } - } - infer_context->SetShapeOrDataForValue( - op->result(0), - symbol::ShapeOrDataDimExprs{ - symbol::TensorShapeOrDataDimExprs(out2_shape)}); - return true; -} - -bool Lerp_OpInferSymbolicShape(pir::Operation *op, - pir::InferSymbolicShapeContext *infer_context) { - return LerpOpInferSymbolicShape(op, infer_context); -} - -bool LayerNormOpInferSymbolicShape( - pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) { - // Get the shapes of input tensors - const auto &x_shape_or_data = - infer_context->GetShapeOrDataForValue(op->operand_source(0)); - const auto &scale_shape_or_data = - infer_context->GetShapeOrDataForValue(op->operand_source(1)); - const auto &bias_shape_or_data = - infer_context->GetShapeOrDataForValue(op->operand_source(2)); - - std::vector x_dims = x_shape_or_data.shape(); - int begin_norm_axis = - op->attribute("begin_norm_axis").data(); - - // Flatten x_dims to 2D and get dim[1] - symbol::DimExpr matrix_dim_1 = x_dims[begin_norm_axis]; - for (std::size_t i = begin_norm_axis + 1; i < x_dims.size(); ++i) { - matrix_dim_1 = matrix_dim_1 * x_dims[i]; - } - - if (!scale_shape_or_data.isa()) { - std::vector scale_dims = scale_shape_or_data.shape(); - infer_context->AddEqualCstr(scale_dims[0], matrix_dim_1); - } - if (!bias_shape_or_data.isa()) { - std::vector bias_dims = bias_shape_or_data.shape(); - infer_context->AddEqualCstr(bias_dims[0], matrix_dim_1); - } - - // Set output shapes - infer_context->SetShapeOrDataForValue( - op->result(0), - symbol::ShapeOrDataDimExprs{symbol::TensorShapeOrDataDimExprs(x_dims)}); - - // Set mean and variance shapes - std::vector before_norm_dims( - x_dims.begin(), x_dims.begin() + begin_norm_axis); - infer_context->SetShapeOrDataForValue( - op->result(1), - symbol::ShapeOrDataDimExprs{ - symbol::TensorShapeOrDataDimExprs(before_norm_dims)}); - infer_context->SetShapeOrDataForValue( - op->result(2), - symbol::ShapeOrDataDimExprs{ - symbol::TensorShapeOrDataDimExprs(before_norm_dims)}); - - return true; -} - -bool LinspaceOpInferSymbolicShape( - pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) { - const auto &num_shape_or_data = - infer_context->GetShapeOrDataForValue(op->operand_source(2)); - const auto step = [&] { - symbol::DimExpr expr; - if (num_shape_or_data.data().has_value()) { - expr = num_shape_or_data.data().value()[0]; - } else { - expr = num_shape_or_data.shape()[0]; - } - return expr; - }(); - const symbol::ShapeOrDataDimExprs &shape_data = [&] { - std::vector out_dims{step}; - return symbol::ShapeOrDataDimExprs{ - symbol::TensorShapeOrDataDimExprs(out_dims)}; - }(); - infer_context->SetShapeOrDataForValue(op->result(0), shape_data); - return true; -} - -bool LinearInterpOpInferSymbolicShape( - pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) { - return BicubicInterpOpInferSymbolicShape(op, infer_context); -} - -bool LogspaceOpInferSymbolicShape( - pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) { - return LinspaceOpInferSymbolicShape(op, infer_context); -} - -bool NearestInterpOpInferSymbolicShape( - pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) { - return BicubicInterpOpInferSymbolicShape(op, infer_context); -} - -bool MemoryEfficientAttentionOpInferSymbolicShape( - pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) { - const auto &q_shape = - infer_context->GetShapeOrDataForValue(op->operand_source(0)).shape(); - const auto &k_shape = - infer_context->GetShapeOrDataForValue(op->operand_source(1)).shape(); - const auto &v_shape = - infer_context->GetShapeOrDataForValue(op->operand_source(2)).shape(); - PADDLE_ENFORCE_EQ( - q_shape.size(), - 4, - common::errors::InvalidArgument("Query should be a 4-D tensor" - "But received Query dimension(%d)", - q_shape.size())); - PADDLE_ENFORCE_EQ( - k_shape.size(), - 4, - common::errors::InvalidArgument("Key should be a 4-D tensor" - "But received Key dimension(%d)", - k_shape.size())); - PADDLE_ENFORCE_EQ( - v_shape.size(), - 4, - common::errors::InvalidArgument("Value should be a 4-D tensor" - "But received Value dimension(%d)", - v_shape.size())); - - const auto &query_batch_size = q_shape[0]; - const auto &query_seq_length = q_shape[1]; - const auto &query_num_head = q_shape[2]; - const auto &query_head_size = q_shape[3]; - - const auto &key_batch_size = k_shape[0]; - const auto &key_seq_length = k_shape[1]; - const auto &key_num_head = k_shape[2]; - const auto &key_head_size = k_shape[3]; - - const auto &value_batch_size = v_shape[0]; - const auto &value_seq_length = v_shape[1]; - const auto &value_num_head = v_shape[2]; - const auto &value_head_size = v_shape[3]; - - infer_context->AddEqualCstr(query_batch_size, key_batch_size); - infer_context->AddEqualCstr(key_batch_size, value_batch_size); - - infer_context->AddEqualCstr(query_num_head, key_num_head); - infer_context->AddEqualCstr(key_num_head, value_num_head); - - infer_context->AddEqualCstr(query_head_size, key_head_size); - - infer_context->AddEqualCstr(key_seq_length, value_seq_length); - - const std::vector out_dims{ - query_batch_size, query_seq_length, query_num_head, value_head_size}; - const std::vector logsumexp_dims{query_num_head, - query_batch_size}; - const std::vector seed_and_offset_dims{2}; - - infer_context->SetShapeOrDataForValue( - op->result(0), symbol::TensorShapeOrDataDimExprs(out_dims)); - infer_context->SetShapeOrDataForValue( - op->result(1), symbol::TensorShapeOrDataDimExprs(logsumexp_dims)); - infer_context->SetShapeOrDataForValue( - op->result(2), symbol::TensorShapeOrDataDimExprs(seed_and_offset_dims)); - - return true; -} - -bool RoiAlignOpInferSymbolicShape( - pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) { - const auto &x = op->operand_source(0); - const auto &boxes = op->operand_source(1); - - const auto &num_boxes = - infer_context->GetShapeOrDataForValue(boxes).shape()[0]; - symbol::DimExpr channel_num = - infer_context->GetShapeOrDataForValue(x).shape()[1]; - - int32_t out_h = op->attribute("pooled_height").data(); - int32_t out_w = op->attribute("pooled_width").data(); - - std::vector out_dim = {num_boxes, channel_num, out_h, out_w}; - infer_context->SetShapeOrDataForValue( - op->result(0), symbol::TensorShapeOrDataDimExprs(out_dim)); - return true; -} - -bool MeshgridOpInferSymbolicShape( - pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) { - const symbol::TensorListShapeOrDataDimExprs &shape_data_list = - infer_context->GetShapeOrDataForValue(op->operand_source(0)) - .dyn_cast(); - - const symbol::ShapeOrDataDimExprs sym_shape_dim_exprs = [&] { - symbol::TensorListShapeOrDataDimExprs shape_dim_exprs_list; - std::vector vec; - - for (auto &shape_data : shape_data_list) { - if (shape_data.shape().size() == 0) { - vec.emplace_back(1); - } else { - vec.emplace_back(shape_data.shape()[0]); - } - } - - auto shape_dim_exprs = symbol::TensorShapeOrDataDimExprs(vec); - - for (size_t i = 0; i < shape_data_list.size(); i++) { - shape_dim_exprs_list.emplace_back(shape_dim_exprs); - } - - return symbol::ShapeOrDataDimExprs(shape_dim_exprs_list); - }(); - - pir::Value res = op->result(0); - infer_context->SetShapeOrDataForValue(res, sym_shape_dim_exprs); - return true; -} - -bool StackOpInferSymbolicShape(pir::Operation *op, - pir::InferSymbolicShapeContext *infer_context) { - pir::Value operand_source = op->operand_source(0); - - const auto &attributes = op->attributes(); - int axis = attributes.at("axis").dyn_cast().data(); - const symbol::TensorListShapeOrDataDimExprs &shape_data_list = - infer_context->GetShapeOrDataForValue(operand_source) - .dyn_cast(); - - size_t rank = shape_data_list.at(0).shape().size(); - if (axis < 0) axis += rank + 1; - const symbol::ShapeOrDataDimExprs shape_data = [&] { - std::vector result_shape = {}; - std::vector result_data = {}; - const symbol::TensorShapeOrDataDimExprs &x_shape_data = - shape_data_list.at(0); - - const bool data_flag = [&] { - for (const auto &shape_data : shape_data_list) { - if (!shape_data.data().has_value()) { - return false; - } - } - return true; - }(); - - if (data_flag) { - // case 1: data is not empty, eg: shape_data_list = - // [[shape:{3},data:{S0,6,7}],...] - if (axis == 0 && x_shape_data.data().value().size() <= 1) { - for (const auto &shape_data : shape_data_list) { - result_data.emplace_back(shape_data.data().value().at(0)); - } - } else { - PADDLE_THROW(common::errors::Unimplemented( - op->name() + - " 's InferSymbolicShape can NOT deal with data size > 1 now.")); - } - result_shape.emplace_back( - static_cast(shape_data_list.size())); - } else { - // case 2: data is empty, eg: shape_data_list = - // [[shape:{5,6,7},data:{}],...] - for (size_t i = 0; i < rank; ++i) { - details::BuildCstrEqForTensorListAlongAxis( - infer_context, shape_data_list, i); - } - for (const symbol::DimExpr &dim : x_shape_data.shape()) { - result_shape.emplace_back(dim); - } - result_shape.insert(result_shape.begin() + axis, - static_cast(shape_data_list.size())); - } - - if (result_data.empty()) { - return symbol::ShapeOrDataDimExprs( - symbol::TensorShapeOrDataDimExprs(result_shape)); - } - return symbol::ShapeOrDataDimExprs( - symbol::TensorShapeOrDataDimExprs(result_shape, result_data)); - }(); - - pir::Value res = op->result(0); - infer_context->SetShapeOrDataForValue(res, shape_data); - return true; -} - -bool TrilinearInterpOpInferSymbolicShape( - pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) { - return BicubicInterpOpInferSymbolicShape(op, infer_context); -} - -bool WhereOpInferSymbolicShape(pir::Operation *op, - pir::InferSymbolicShapeContext *infer_context) { - infer_context->SetShapeOrDataForValue( - op->result(0), - infer_context->GetShapeOrDataForValue(op->operand_source(0))); - - const std::vector &operands = {op->operand_source(0), - op->operand_source(1)}; - - size_t rank = infer_context->GetShapeOrDataForValue(op->operand_source(0)) - .shape() - .size(); - - for (size_t i = 0; i < rank; ++i) { - paddle::dialect::details::BuildCstrEqForTensorListAlongAxis( - infer_context, operands, i); - } - - return true; -} - -bool Where_OpInferSymbolicShape(pir::Operation *op, - pir::InferSymbolicShapeContext *infer_context) { - return WhereOpInferSymbolicShape(op, infer_context); -} - -bool YoloLossOpInferSymbolicShape( - pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) { - const auto &x_shape = - infer_context->GetShapeOrDataForValue(op->operand_source(0)).shape(); - const auto &box_shape = - infer_context->GetShapeOrDataForValue(op->operand_source(1)).shape(); - const auto &label_shape = - infer_context->GetShapeOrDataForValue(op->operand_source(2)).shape(); - const std::vector &anchors_mask = - paddle::dialect::details::GetVectorAttr(op, "anchor_mask"); - int mask_num = anchors_mask.size(); - int class_num = op->attribute("class_num").data(); - - PADDLE_ENFORCE_EQ(x_shape.size(), - 4, - phi::errors::InvalidArgument( - "Input(X) should be a 4-D tensor. But received " - "X dimension size(%s)", - x_shape.size())); - PADDLE_ENFORCE_EQ( - box_shape.size(), - 3, - phi::errors::InvalidArgument("Input(GTBox) should be a 3-D tensor, but " - "received gtbox dimension size(%s)", - box_shape.size())); - PADDLE_ENFORCE_EQ(label_shape.size(), - 2, - phi::errors::InvalidArgument( - "Input(GTLabel) should be a 2-D tensor," - "But received Input(GTLabel) dimension size(%s) != 2.", - label_shape.size())); - infer_context->AddEqualCstr(box_shape[2], symbol::DimExpr(4)); - infer_context->AddEqualCstr(x_shape[2], x_shape[3]); - infer_context->AddEqualCstr(x_shape[1], - symbol::DimExpr(mask_num * (5 + class_num))); - infer_context->AddEqualCstr(label_shape[0], box_shape[0]); - infer_context->AddEqualCstr(label_shape[1], box_shape[1]); - - if (op->operand_source(3) != nullptr) { - const auto &score_shape = - infer_context->GetShapeOrDataForValue(op->operand_source(3)).shape(); - PADDLE_ENFORCE_EQ( - score_shape.size(), - 2, - phi::errors::InvalidArgument("Input(GTScore) should be a 2-D tensor" - "But received GTScore dimension(%s)", - box_shape.size())); - infer_context->AddEqualCstr(score_shape[0], box_shape[0]); - infer_context->AddEqualCstr(score_shape[1], box_shape[1]); - } - - std::vector out_shape = {x_shape[0]}; - infer_context->SetShapeOrDataForValue( - op->result(0), - symbol::ShapeOrDataDimExprs{ - symbol::TensorShapeOrDataDimExprs(out_shape)}); - - std::vector obj_mask_shape = { - x_shape[0], symbol::DimExpr(mask_num), x_shape[2], x_shape[3]}; - infer_context->SetShapeOrDataForValue( - op->result(1), - symbol::ShapeOrDataDimExprs{ - symbol::TensorShapeOrDataDimExprs(obj_mask_shape)}); - - std::vector match_mask_shape = {box_shape[0], box_shape[1]}; - infer_context->SetShapeOrDataForValue( - op->result(2), - symbol::ShapeOrDataDimExprs{ - symbol::TensorShapeOrDataDimExprs(match_mask_shape)}); - - return true; -} - -bool FakeChannelWiseDequantizeMaxAbsOpInferSymbolicShape( - pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) { - const auto &x_shape_or_data = - infer_context->GetShapeOrDataForValue(op->operand_source(0)); - - int quant_axis = op->attribute("quant_axis").data(); - int x_num_col_dims = - op->attribute("x_num_col_dims").data(); - - PADDLE_ENFORCE_EQ( - quant_axis == 0 || quant_axis == 1, - true, - common::errors::InvalidArgument("'quant_axis' should be 0 or 1, but " - "the received is %d", - quant_axis)); - PADDLE_ENFORCE_EQ(x_num_col_dims == 0, - false, - common::errors::InvalidArgument( - "'x_num_col_dims' should be larger than 0, but " - "the received is %d", - x_num_col_dims)); - - infer_context->SetShapeOrDataForValue( - op->result(0), - symbol::ShapeOrDataDimExprs{ - symbol::TensorShapeOrDataDimExprs(x_shape_or_data.shape())}); - - return true; -} - -} // namespace paddle::dialect diff --git a/paddle/fluid/pir/dialect/operator/interface/infer_symbolic_shape/.ipynb_checkpoints/same_operands_result-checkpoint.cc b/paddle/fluid/pir/dialect/operator/interface/infer_symbolic_shape/.ipynb_checkpoints/same_operands_result-checkpoint.cc deleted file mode 100644 index 3eb6b62bdc1fd..0000000000000 --- a/paddle/fluid/pir/dialect/operator/interface/infer_symbolic_shape/.ipynb_checkpoints/same_operands_result-checkpoint.cc +++ /dev/null @@ -1,214 +0,0 @@ -// Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#include "paddle/fluid/pir/dialect/operator/interface/infer_symbolic_shape/same_operands_result.h" - -#define OP_SAME_OPERANDS_AND_RESULT(name) \ - bool name##OpInferSymbolicShape( \ - pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) { \ - const auto &operand_shape = \ - infer_context->GetShapeOrDataForValue(op->operand_source(0)).shape(); \ - infer_context->SetShapeOrDataForValue( \ - op->result(0), \ - symbol::ShapeOrDataDimExprs{ \ - symbol::TensorShapeOrDataDimExprs(operand_shape)}); \ - return true; \ - } - -namespace paddle::dialect { - -OP_SAME_OPERANDS_AND_RESULT(Abs) -OP_SAME_OPERANDS_AND_RESULT(Abs_) -OP_SAME_OPERANDS_AND_RESULT(Acos) -OP_SAME_OPERANDS_AND_RESULT(Acos_) -OP_SAME_OPERANDS_AND_RESULT(Acosh) -OP_SAME_OPERANDS_AND_RESULT(Acosh_) -OP_SAME_OPERANDS_AND_RESULT(Angle) -OP_SAME_OPERANDS_AND_RESULT(Asin) -OP_SAME_OPERANDS_AND_RESULT(Asin_) -OP_SAME_OPERANDS_AND_RESULT(Asinh) -OP_SAME_OPERANDS_AND_RESULT(Asinh_) -OP_SAME_OPERANDS_AND_RESULT(Atan) -OP_SAME_OPERANDS_AND_RESULT(Atan_) -OP_SAME_OPERANDS_AND_RESULT(Atanh) -OP_SAME_OPERANDS_AND_RESULT(Atanh_) -OP_SAME_OPERANDS_AND_RESULT(Bernoulli) -OP_SAME_OPERANDS_AND_RESULT(BitwiseNot) -OP_SAME_OPERANDS_AND_RESULT(BitwiseNot_) -OP_SAME_OPERANDS_AND_RESULT(Ceil) -OP_SAME_OPERANDS_AND_RESULT(Ceil_) -OP_SAME_OPERANDS_AND_RESULT(Celu) -OP_SAME_OPERANDS_AND_RESULT(Clip) -OP_SAME_OPERANDS_AND_RESULT(Clip_) -OP_SAME_OPERANDS_AND_RESULT(Conj) -OP_SAME_OPERANDS_AND_RESULT(CopyTo) -OP_SAME_OPERANDS_AND_RESULT(Cos) -OP_SAME_OPERANDS_AND_RESULT(Cos_) -OP_SAME_OPERANDS_AND_RESULT(Cosh) -OP_SAME_OPERANDS_AND_RESULT(Cosh_) -OP_SAME_OPERANDS_AND_RESULT(DequantizeLog) -OP_SAME_OPERANDS_AND_RESULT(Digamma) -OP_SAME_OPERANDS_AND_RESULT(Digamma_) -OP_SAME_OPERANDS_AND_RESULT(Dirichlet) -OP_SAME_OPERANDS_AND_RESULT(EmptyLike) -OP_SAME_OPERANDS_AND_RESULT(Erf) -OP_SAME_OPERANDS_AND_RESULT(Erf_) -OP_SAME_OPERANDS_AND_RESULT(Erfinv) -OP_SAME_OPERANDS_AND_RESULT(Erfinv_) -OP_SAME_OPERANDS_AND_RESULT(Exp) -OP_SAME_OPERANDS_AND_RESULT(Exp_) -OP_SAME_OPERANDS_AND_RESULT(Expm1) -OP_SAME_OPERANDS_AND_RESULT(Expm1_) -OP_SAME_OPERANDS_AND_RESULT(Exponential_) -OP_SAME_OPERANDS_AND_RESULT(Fill) -OP_SAME_OPERANDS_AND_RESULT(Fill_) -OP_SAME_OPERANDS_AND_RESULT(Fetch) -OP_SAME_OPERANDS_AND_RESULT(Flip) -OP_SAME_OPERANDS_AND_RESULT(Floor) -OP_SAME_OPERANDS_AND_RESULT(Floor_) -OP_SAME_OPERANDS_AND_RESULT(FullLike) -OP_SAME_OPERANDS_AND_RESULT(Imag) -OP_SAME_OPERANDS_AND_RESULT(Increment) -OP_SAME_OPERANDS_AND_RESULT(Increment_) -OP_SAME_OPERANDS_AND_RESULT(Isfinite) -OP_SAME_OPERANDS_AND_RESULT(IsfiniteSr) -OP_SAME_OPERANDS_AND_RESULT(Isinf) -OP_SAME_OPERANDS_AND_RESULT(IsinfSr) -OP_SAME_OPERANDS_AND_RESULT(Isnan) -OP_SAME_OPERANDS_AND_RESULT(IsnanSr) -OP_SAME_OPERANDS_AND_RESULT(I0) -OP_SAME_OPERANDS_AND_RESULT(I0_) -OP_SAME_OPERANDS_AND_RESULT(I0e) -OP_SAME_OPERANDS_AND_RESULT(I1) -OP_SAME_OPERANDS_AND_RESULT(I1e) -OP_SAME_OPERANDS_AND_RESULT(Lgamma) -OP_SAME_OPERANDS_AND_RESULT(Lgamma_) -OP_SAME_OPERANDS_AND_RESULT(Log1p) -OP_SAME_OPERANDS_AND_RESULT(Log1p_) -OP_SAME_OPERANDS_AND_RESULT(Log) -OP_SAME_OPERANDS_AND_RESULT(Log_) -OP_SAME_OPERANDS_AND_RESULT(LogicalNot) -OP_SAME_OPERANDS_AND_RESULT(LogicalNot_) -OP_SAME_OPERANDS_AND_RESULT(Logit) -OP_SAME_OPERANDS_AND_RESULT(Logit_) -OP_SAME_OPERANDS_AND_RESULT(Logsigmoid) -OP_SAME_OPERANDS_AND_RESULT(Logsigmoid_) -OP_SAME_OPERANDS_AND_RESULT(Pow) -OP_SAME_OPERANDS_AND_RESULT(Poisson) -OP_SAME_OPERANDS_AND_RESULT(Pow_) -OP_SAME_OPERANDS_AND_RESULT(Prelu) -OP_SAME_OPERANDS_AND_RESULT(Print) -OP_SAME_OPERANDS_AND_RESULT(PutAlongAxis) -OP_SAME_OPERANDS_AND_RESULT(PutAlongAxis_) -OP_SAME_OPERANDS_AND_RESULT(Real) -OP_SAME_OPERANDS_AND_RESULT(Reciprocal) -OP_SAME_OPERANDS_AND_RESULT(Reciprocal_) -OP_SAME_OPERANDS_AND_RESULT(Relu) -OP_SAME_OPERANDS_AND_RESULT(Relu6) -OP_SAME_OPERANDS_AND_RESULT(Relu_) -OP_SAME_OPERANDS_AND_RESULT(Reverse) -OP_SAME_OPERANDS_AND_RESULT(Roll) -OP_SAME_OPERANDS_AND_RESULT(Round) -OP_SAME_OPERANDS_AND_RESULT(Round_) -OP_SAME_OPERANDS_AND_RESULT(RowConv) -OP_SAME_OPERANDS_AND_RESULT(Rsqrt) -OP_SAME_OPERANDS_AND_RESULT(Rsqrt_) -OP_SAME_OPERANDS_AND_RESULT(ScaleSr) -OP_SAME_OPERANDS_AND_RESULT(ScaleSr_) -OP_SAME_OPERANDS_AND_RESULT(Scale_) -OP_SAME_OPERANDS_AND_RESULT(ScatterNdAdd) -OP_SAME_OPERANDS_AND_RESULT(Scatter) -OP_SAME_OPERANDS_AND_RESULT(Scatter_) -OP_SAME_OPERANDS_AND_RESULT(Select) -OP_SAME_OPERANDS_AND_RESULT(Sign) -OP_SAME_OPERANDS_AND_RESULT(Sin) -OP_SAME_OPERANDS_AND_RESULT(Sin_) -OP_SAME_OPERANDS_AND_RESULT(Sinh) -OP_SAME_OPERANDS_AND_RESULT(Sinh_) -OP_SAME_OPERANDS_AND_RESULT(Softmax) -OP_SAME_OPERANDS_AND_RESULT(Softmax_) -OP_SAME_OPERANDS_AND_RESULT(Swish) -OP_SAME_OPERANDS_AND_RESULT(Tan) -OP_SAME_OPERANDS_AND_RESULT(Tan_) -OP_SAME_OPERANDS_AND_RESULT(Tanh) -OP_SAME_OPERANDS_AND_RESULT(Tanh_) -OP_SAME_OPERANDS_AND_RESULT(Tril) -OP_SAME_OPERANDS_AND_RESULT(Tril_) -OP_SAME_OPERANDS_AND_RESULT(Triu) -OP_SAME_OPERANDS_AND_RESULT(Triu_) -OP_SAME_OPERANDS_AND_RESULT(Trunc) -OP_SAME_OPERANDS_AND_RESULT(Trunc_) -OP_SAME_OPERANDS_AND_RESULT(Sigmoid) -OP_SAME_OPERANDS_AND_RESULT(Sigmoid_) -OP_SAME_OPERANDS_AND_RESULT(LeakyRelu) -OP_SAME_OPERANDS_AND_RESULT(LeakyRelu_) -OP_SAME_OPERANDS_AND_RESULT(ThresholdedRelu) -OP_SAME_OPERANDS_AND_RESULT(ThresholdedRelu_) -OP_SAME_OPERANDS_AND_RESULT(SquareSr) -OP_SAME_OPERANDS_AND_RESULT(Square) -OP_SAME_OPERANDS_AND_RESULT(Polygamma) -OP_SAME_OPERANDS_AND_RESULT(Polygamma_) -OP_SAME_OPERANDS_AND_RESULT(EnableCheckModelNanInf) -OP_SAME_OPERANDS_AND_RESULT(ViewShape) - -bool ScaleOpInferSymbolicShape(pir::Operation *op, - pir::InferSymbolicShapeContext *infer_context) { - pir::Value operand_source = op->operand_source(0); - const symbol::ShapeOrDataDimExprs &operand_shape_or_data = - infer_context->GetShapeOrDataForValue(operand_source); - std::vector shape(operand_shape_or_data.shape()); - - if (operand_shape_or_data.data()) { - const std::vector data = [&] { - const symbol::DimExpr scale = [&]() -> symbol::DimExpr { - if (op->num_operands() == 2) { - return infer_context->GetShapeOrDataForValue(op->operand_source(1)) - .data() - ->at(0); - } - return static_cast( - op->attribute("scale").dyn_cast().data()); - }(); - int bias = op->attribute("bias").dyn_cast().data(); - - std::vector data; - for (auto &val : *(operand_shape_or_data.data())) { - data.push_back(val * scale + bias); - } - return data; - }(); - - infer_context->SetShapeOrDataForValue( - op->result(0), symbol::TensorShapeOrDataDimExprs(shape, data)); - } else { - infer_context->SetShapeOrDataForValue(op->result(0), operand_shape_or_data); - } - - return true; -} - -bool ArgsortOpInferSymbolicShape( - pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) { - const symbol::ShapeOrDataDimExprs &operand_shape_or_data = - infer_context->GetShapeOrDataForValue(op->operand_source(0)); - infer_context->SetShapeOrDataForValue(op->result(0), operand_shape_or_data); - infer_context->SetShapeOrDataForValue(op->result(1), operand_shape_or_data); - return true; -} - -} // namespace paddle::dialect - -namespace cinn::dialect {} // namespace cinn::dialect - -#undef OP_SAME_OPERANDS_AND_RESULT diff --git a/paddle/fluid/pir/dialect/operator/interface/infer_symbolic_shape/.ipynb_checkpoints/same_operands_result-checkpoint.h b/paddle/fluid/pir/dialect/operator/interface/infer_symbolic_shape/.ipynb_checkpoints/same_operands_result-checkpoint.h deleted file mode 100644 index 2e84c7297643f..0000000000000 --- a/paddle/fluid/pir/dialect/operator/interface/infer_symbolic_shape/.ipynb_checkpoints/same_operands_result-checkpoint.h +++ /dev/null @@ -1,164 +0,0 @@ -// Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#pragma once - -#include "paddle/pir/include/dialect/shape/utils/shape_analysis.h" - -namespace paddle::dialect { -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Abs) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Abs_) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Acos) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Acos_) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Acosh) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Acosh_) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Angle) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Argsort) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Asin) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Asin_) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Asinh) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Asinh_) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Assign) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Assign_) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Atan) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Atan_) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Atanh) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Atanh_) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Bernoulli) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(BitwiseNot) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(BitwiseNot_) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Ceil) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Ceil_) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Celu) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Clip) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Clip_) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Conj) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(CopyTo) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Cos) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Cos_) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Cosh) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Cosh_) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(DequantizeLog) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Digamma) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Digamma_) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Dirichlet) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(EmptyLike) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Erf) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Erf_) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Erfinv) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Erfinv_) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Exp) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Exp_) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Expm1) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Expm1_) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Exponential_) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Fetch) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Fill) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Fill_) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Flip) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Floor) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Floor_) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(FullLike) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Imag) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Increment) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Increment_) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Isfinite) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(IsfiniteSr) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Isinf) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(IsinfSr) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Isnan) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(IsnanSr) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(I0) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(I0_) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(I0e) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(I1) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(I1e) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Lgamma) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Lgamma_) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Log1p) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Log1p_) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Log) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Log_) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(LogicalNot) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(LogicalNot_) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Logit) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Logit_) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Logsigmoid) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Logsigmoid_) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Poisson) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Pow) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Pow_) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Prelu) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Print) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(PutAlongAxis) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(PutAlongAxis_) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Real) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Reciprocal) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Reciprocal_) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Relu) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Relu6) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Relu_) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Reverse) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Roll) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Round) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Round_) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(RowConv) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Rsqrt) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Rsqrt_) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Scale) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(ScaleSr) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(ScaleSr_) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Scale_) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(ScatterNdAdd) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Scatter) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Scatter_) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Select) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Sign) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Sin) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Sin_) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Sinh) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Sinh_) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Softmax) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Softmax_) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Swish) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Tan) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Tan_) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Tanh) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Tanh_) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Tril) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Tril_) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Triu) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Triu_) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Trunc) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Trunc_) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Sigmoid) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Sigmoid_) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(LeakyRelu) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(LeakyRelu_) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(ThresholdedRelu) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(ThresholdedRelu_) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(SquareSr) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Square) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Polygamma) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Polygamma_) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(EnableCheckModelNanInf) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(ViewShape) - -} // namespace paddle::dialect - -namespace cinn::dialect { -using paddle::dialect::ReverseOpInferSymbolicShape; -using paddle::dialect::ScaleOpInferSymbolicShape; -using paddle::dialect::SelectOpInferSymbolicShape; -} // namespace cinn::dialect diff --git a/paddle/fluid/pir/dialect/operator/interface/infer_symbolic_shape/.ipynb_checkpoints/unary_infer_sym-checkpoint.cc b/paddle/fluid/pir/dialect/operator/interface/infer_symbolic_shape/.ipynb_checkpoints/unary_infer_sym-checkpoint.cc deleted file mode 100644 index 8af7fbc36289c..0000000000000 --- a/paddle/fluid/pir/dialect/operator/interface/infer_symbolic_shape/.ipynb_checkpoints/unary_infer_sym-checkpoint.cc +++ /dev/null @@ -1,2221 +0,0 @@ -// Copyright (c) 2024 PaddlePaddle Authors. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#include "paddle/fluid/pir/dialect/operator/interface/infer_symbolic_shape/infer_sym_slice_utils.h" -#include "paddle/fluid/pir/dialect/operator/interface/infer_symbolic_shape/infer_sym_utils.h" -#include "paddle/fluid/pir/dialect/operator/interface/infer_symbolic_shape/unary_infer_sym.h" - -namespace { -std::vector GetRealPadding( - const std::vector &origin_paddings, - const bool global_pooling, - const bool adaptive, - const std::string padding_algorithm, - const std::vector data_dims, - const std::vector &strides, - const std::vector &kernel_size) { - const auto &GetInitPadding = [&]() -> std::vector { - std::vector res; - // set padding size == data_dims.size() * 2 - if (origin_paddings.size() == data_dims.size()) { - for (std::size_t i = 0; i < origin_paddings.size(); ++i) { - res.emplace_back(symbol::DimExpr{origin_paddings.at(i)}); - res.emplace_back(symbol::DimExpr{origin_paddings.at(i)}); - } - } else { - PADDLE_ENFORCE_EQ( - data_dims.size() * 2, - origin_paddings.size(), - phi::errors::InvalidArgument( - "Paddings size %d should be the same or twice as the " - "pooling size %d.", - origin_paddings.size(), - data_dims.size() * 2)); - for (std::size_t i = 0; i < origin_paddings.size(); ++i) { - res.emplace_back(symbol::DimExpr{origin_paddings.at(i)}); - } - } - return res; - }; - - std::vector real_padding = GetInitPadding(); - - const auto &UpdataPadding = [&]() { - symbol::DimExpr one_dimexpr{1}; - symbol::DimExpr zero_dimexpr{0}; - // when padding_algorithm is "VALID" or "SAME" - if (padding_algorithm == "SAME") { - for (std::size_t i = 0; i < data_dims.size(); ++i) { - symbol::DimExpr stride_dimexpr = symbol::DimExpr{strides[i]}; - - symbol::DimExpr out_size = - (data_dims[i] + stride_dimexpr - one_dimexpr) / stride_dimexpr; - symbol::DimExprBuilder builder; - symbol::DimExpr pad_sum = - builder.Max((out_size - one_dimexpr) * stride_dimexpr + - kernel_size[i] - data_dims[i], - zero_dimexpr); - symbol::DimExpr pad_0 = pad_sum / symbol::DimExpr{2}; - symbol::DimExpr pad_1 = pad_sum - pad_0; - real_padding[i * 2] = pad_0; - real_padding[i * 2 + 1] = pad_1; - } - } else if (padding_algorithm == "VALID") { - real_padding.assign(real_padding.size(), zero_dimexpr); - } - - // if global_pooling == true or adaptive == true, padding will be ignore - if (global_pooling || adaptive) { - real_padding.assign(real_padding.size(), zero_dimexpr); - } - }; - - UpdataPadding(); - return real_padding; -} - -symbol::ShapeOrDataDimExprs Pool2dRawInferSymbolicShape( - pir::Operation *op, - const std::vector &kernel_size, - pir::InferSymbolicShapeContext *infer_context) { - const auto &x_shape_or_data = - infer_context->GetShapeOrDataForValue(op->operand_source(0)); - - const auto &x_dims = x_shape_or_data.shape(); - PADDLE_ENFORCE_EQ( - x_dims.size() == 4 || x_dims.size() == 5, - true, - phi::errors::InvalidArgument( - "the input of Op(pool) should be 4-D or 5-D Tensor. But " - "received: %u-D Tensor.", - x_dims.size())); - - PADDLE_ENFORCE_EQ(x_dims.size() - kernel_size.size(), - 2U, - phi::errors::InvalidArgument( - "the rank of input minus the size of kernel_size " - "must be equal to 2 in Op(pool). " - "But received: the rank of input is %d and the " - "rank of kernel_size is %d.", - x_dims.size(), - kernel_size.size())); - - std::vector strides = [&]() { - std::vector res; - const auto &stride_attr = - op->attributes().at("strides").dyn_cast(); - for (size_t i = 0; i < stride_attr.size(); i++) { - res.emplace_back( - stride_attr.at(i).dyn_cast().data()); - } - return res; - }(); - - PADDLE_ENFORCE_EQ( - kernel_size.size(), - strides.size(), - phi::errors::InvalidArgument( - "the rank of kernel_size and strides in Op(pool) must be equal. " - "But received: the rank of kernel_size is %d and the rank of stride " - "is %d.", - kernel_size.size(), - strides.size())); - - const std::string &data_format = - op->attribute("data_format").AsString(); - const bool channel_last = data_format == "NHWC" || data_format == "NDHWC"; - - const auto &data_dims = [&]() -> std::vector { - if (channel_last) { - return std::vector(x_dims.begin() + 1, x_dims.end() - 1); - } else { - return std::vector(x_dims.begin() + 2, x_dims.end()); - } - }(); - - bool global_pooling = - op->attribute("global_pooling").data(); - bool adaptive = op->attribute("adaptive").data(); - std::string padding_algorithm = - op->attribute("padding_algorithm").AsString(); - - const auto &real_paddings = [&]() -> std::vector { - std::vector paddings; - const auto &padding_attr = - op->attributes().at("paddings").dyn_cast(); - for (size_t i = 0; i < padding_attr.size(); i++) { - paddings.emplace_back( - padding_attr.at(i).dyn_cast().data()); - } - return GetRealPadding(paddings, - global_pooling, - adaptive, - padding_algorithm, - data_dims, - strides, - kernel_size - - ); - }(); - - const auto &real_kernel_size = [&]() -> std::vector { - if (global_pooling) { - return data_dims; - } - return kernel_size; - }(); - - const auto &output_shape_or_data = [&]() -> symbol::ShapeOrDataDimExprs { - std::vector output_shape; - bool ceil_mode = op->attribute("ceil_mode").data(); - if (adaptive) { - output_shape.insert( - output_shape.end(), real_kernel_size.begin(), real_kernel_size.end()); - } else { - for (size_t i = 0; i < data_dims.size(); ++i) { - symbol::DimExpr stride_dimexpr{strides[i]}; - symbol::DimExpr one_dimexpr{1}; - if (!ceil_mode) { - output_shape.emplace_back((data_dims[i] - real_kernel_size[i] + - real_paddings[2 * i] + - real_paddings[2 * i + 1]) / - stride_dimexpr + - one_dimexpr); - } else { - output_shape.emplace_back( - (data_dims[i] - real_kernel_size[i] + real_paddings[2 * i] + - real_paddings[2 * i + 1] + stride_dimexpr - one_dimexpr) / - stride_dimexpr + - one_dimexpr); - } - } - } - - // output_N = input_N - output_shape.insert(output_shape.begin(), x_dims[0]); - // output_C = input_C - if (channel_last) { - output_shape.push_back(x_dims[x_dims.size() - 1]); - } else { - output_shape.insert(output_shape.begin() + 1, x_dims[1]); - } - return symbol::ShapeOrDataDimExprs{ - symbol::TensorShapeOrDataDimExprs(output_shape)}; - }(); - - return output_shape_or_data; -} -} // namespace - -namespace paddle::dialect { -using paddle::dialect::details::CreateShapeOrDataForXShape; - -bool AllOpInferSymbolicShape(pir::Operation *op, - pir::InferSymbolicShapeContext *infer_context) { - const auto &axis = details::GetVectorAttr(op, "axis"); - return details::ReduceInferDim(op, - infer_context, - axis, - GetBoolAttr(op, "keepdim"), /*keepdim*/ - axis.size() == 0 /*reduce_all*/); -} - -bool AmaxOpInferSymbolicShape(pir::Operation *op, - pir::InferSymbolicShapeContext *infer_context) { - const auto &axis = details::GetVectorAttr(op, "axis"); - return details::ReduceInferDim(op, - infer_context, - axis, - GetBoolAttr(op, "keepdim"), /*keepdim*/ - axis.size() == 0 /*reduce_all*/); -} - -bool AminOpInferSymbolicShape(pir::Operation *op, - pir::InferSymbolicShapeContext *infer_context) { - const auto &axis = details::GetVectorAttr(op, "axis"); - return details::ReduceInferDim(op, - infer_context, - axis, - GetBoolAttr(op, "keepdim"), /*keepdim*/ - axis.size() == 0 /*reduce_all*/); -} - -bool AnyOpInferSymbolicShape(pir::Operation *op, - pir::InferSymbolicShapeContext *infer_context) { - const auto &axis = details::GetVectorAttr(op, "axis"); - return details::ReduceInferDim(op, - infer_context, - axis, - GetBoolAttr(op, "keepdim"), /*keepdim*/ - axis.size() == 0 /*reduce_all*/); -} - -bool ArgmaxOpInferSymbolicShape(pir::Operation *op, - pir::InferSymbolicShapeContext *infer_context) { - bool flatten = GetBoolAttr(op, "flatten"); - bool keepdims = GetBoolAttr(op, "keepdims"); - - const auto &input_sym_shape = - infer_context->GetShapeOrDataForValue(op->operand_source(0)).shape(); - int rank = input_sym_shape.size(); - - const auto &axis_shape_or_data = - infer_context->GetShapeOrDataForValue(op->operand_source(1)); - int axis = - static_cast(axis_shape_or_data.data().value().at(0).Get()); - if (axis < 0) axis += rank; - - const auto &out_sym_shape = [&] { - std::vector out_sym_shape; - if (flatten) { - if (keepdims) { - out_sym_shape.emplace_back(std::int64_t(rank)); - } else { - out_sym_shape.emplace_back(std::int64_t(0)); - } - } else { - for (int i = 0; i < axis; i++) { - out_sym_shape.emplace_back(input_sym_shape.at(i)); - } - if (keepdims) { - out_sym_shape.emplace_back(std::int64_t(1)); - } - - for (int i = axis + 1; i < rank; i++) { - out_sym_shape.emplace_back(input_sym_shape.at(i)); - } - } - return out_sym_shape; - }(); - - symbol::ShapeOrDataDimExprs shape_data{ - symbol::TensorShapeOrDataDimExprs(out_sym_shape)}; - - infer_context->SetShapeOrDataForValue(op->result(0), shape_data); - return true; -} - -bool ArgminOpInferSymbolicShape(pir::Operation *op, - pir::InferSymbolicShapeContext *infer_context) { - return ArgmaxOpInferSymbolicShape(op, infer_context); -} - -bool AsComplexOpInferSymbolicShape( - pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) { - pir::Value operand_source = op->operand_source(0); - const symbol::ShapeOrDataDimExprs &operand_shape_or_data = - infer_context->GetShapeOrDataForValue(operand_source); - - const std::vector out_dims = [&] { - std::vector out_dims = operand_shape_or_data.shape(); - out_dims.pop_back(); - return out_dims; - }(); - - symbol::ShapeOrDataDimExprs shape_data{ - symbol::TensorShapeOrDataDimExprs(out_dims)}; - - infer_context->SetShapeOrDataForValue(op->result(0), shape_data); - return true; -} -bool AsRealOpInferSymbolicShape(pir::Operation *op, - pir::InferSymbolicShapeContext *infer_context) { - pir::Value operand_source = op->operand_source(0); - const symbol::ShapeOrDataDimExprs &operand_shape_or_data = - infer_context->GetShapeOrDataForValue(operand_source); - - const std::vector out_dims = [&] { - std::vector out_dims = operand_shape_or_data.shape(); - out_dims.push_back(symbol::DimExpr(2)); - return out_dims; - }(); - - symbol::ShapeOrDataDimExprs shape_data{ - symbol::TensorShapeOrDataDimExprs(out_dims)}; - - infer_context->SetShapeOrDataForValue(op->result(0), shape_data); - return true; -} - -bool AssignOpInferSymbolicShape(pir::Operation *op, - pir::InferSymbolicShapeContext *infer_context) { - infer_context->SetShapeOrDataForValue( - op->result(0), - infer_context->GetShapeOrDataForValue(op->operand_source(0))); - return true; -} - -bool Assign_OpInferSymbolicShape( - pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) { - return AssignOpInferSymbolicShape(op, infer_context); -} - -bool AsStridedOpInferSymbolicShape( - pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) { - const std::vector &shape = - paddle::dialect::details::GetVectorAttr(op, "dims"); - - symbol::DimExpr out_unknown = infer_context->GetNextSymName(); - int rank = shape.size(); - std::vector out_shape; - for (int i = 0; i < rank; ++i) { - if (shape[i] == -1) { - out_shape.push_back(out_unknown); - } else { - out_shape.push_back(symbol::DimExpr(shape[i])); - } - } - - infer_context->SetShapeOrDataForValue( - op->result(0), - symbol::ShapeOrDataDimExprs{ - symbol::TensorShapeOrDataDimExprs(out_shape)}); - - return true; -} - -bool BipartiteMatchOpInferSymbolicShape( - pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) { - const auto &dist_mat_shape_or_data = - infer_context->GetShapeOrDataForValue(op->operand_source(0)); - const auto &dims = dist_mat_shape_or_data.shape(); - - PADDLE_ENFORCE_EQ( - dims.size(), - 2, - phi::errors::InvalidArgument("The rank of Input(DistMat) must be 2.")); - - infer_context->SetShapeOrDataForValue(op->result(0), dist_mat_shape_or_data); - - infer_context->SetShapeOrDataForValue(op->result(1), dist_mat_shape_or_data); - - return true; -} - -bool CastOpInferSymbolicShape(pir::Operation *op, - pir::InferSymbolicShapeContext *infer_context) { - infer_context->SetShapeOrDataForValue( - op->result(0), - infer_context->GetShapeOrDataForValue(op->operand_source(0))); - return true; -} - -bool Cast_OpInferSymbolicShape(pir::Operation *op, - pir::InferSymbolicShapeContext *infer_context) { - return CastOpInferSymbolicShape(op, infer_context); -} - -bool CholeskyOpInferSymbolicShape( - pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) { - const auto &x_shape = - infer_context->GetShapeOrDataForValue(op->operand_source(0)); - - auto rank = x_shape.shape().size(); - PADDLE_ENFORCE_GE(rank, - 2, - common::errors::InvalidArgument( - "The Input(X) should have at least 2 dimensions. But " - "received a %d dimension tensor.", - rank)); - - infer_context->AddEqualCstr(x_shape.shape()[rank - 2], - x_shape.shape()[rank - 1]); - - infer_context->SetShapeOrDataForValue(op->result(0), x_shape); - - return true; -} - -bool ClipByNormOpInferSymbolicShape( - pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) { - const auto &input_shape = - infer_context->GetShapeOrDataForValue(op->operand_source(0)); - float max_norm = op->attribute("max_norm").data(); - PADDLE_ENFORCE_GT( - max_norm, - 0, - phi::errors::InvalidArgument("max_norm should be greater than 0. " - "Received max_norm is %f.", - max_norm)); - - infer_context->SetShapeOrDataForValue(op->result(0), input_shape); - return true; -} - -bool ClipByNormSrOpInferSymbolicShape( - pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) { - return ClipByNormOpInferSymbolicShape(op, infer_context); -} - -bool CummaxOpInferSymbolicShape(pir::Operation *op, - pir::InferSymbolicShapeContext *infer_context) { - pir::Value operand_source = op->operand_source(0); - const symbol::ShapeOrDataDimExprs &operand_shape_or_data = - infer_context->GetShapeOrDataForValue(operand_source); - - infer_context->SetShapeOrDataForValue(op->result(0), operand_shape_or_data); - infer_context->SetShapeOrDataForValue(op->result(1), operand_shape_or_data); - return true; -} -bool CumminOpInferSymbolicShape(pir::Operation *op, - pir::InferSymbolicShapeContext *infer_context) { - return CummaxOpInferSymbolicShape(op, infer_context); -} -bool CumprodOpInferSymbolicShape( - pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) { - pir::Value operand_source = op->operand_source(0); - const symbol::ShapeOrDataDimExprs &operand_shape_or_data = - infer_context->GetShapeOrDataForValue(operand_source); - infer_context->SetShapeOrDataForValue(op->result(0), operand_shape_or_data); - return true; -} -bool Cumprod_OpInferSymbolicShape( - pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) { - return CumprodOpInferSymbolicShape(op, infer_context); -} -bool CumsumOpInferSymbolicShape(pir::Operation *op, - pir::InferSymbolicShapeContext *infer_context) { - pir::Value operand_source = op->operand_source(0); - - const symbol::ShapeOrDataDimExprs &operand_shape_or_data = - infer_context->GetShapeOrDataForValue(operand_source); - - bool flatten = GetBoolAttr(op, "flatten"); - if (flatten) { - symbol::DimExpr product{1}; - const auto &dim_exprs = operand_shape_or_data.shape(); - for (const auto &dim_expr : dim_exprs) { - product = product * dim_expr; - } - const std::vector out_dims = {product}; - symbol::ShapeOrDataDimExprs shape_data{ - symbol::TensorShapeOrDataDimExprs(out_dims)}; - infer_context->SetShapeOrDataForValue(op->result(0), shape_data); - - } else { - infer_context->SetShapeOrDataForValue(op->result(0), operand_shape_or_data); - } - return true; -} -bool Cumsum_OpInferSymbolicShape( - pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) { - return CumsumOpInferSymbolicShape(op, infer_context); -} -bool ChannelShuffleOpInferSymbolicShape( - pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) { - const auto &x_shape_or_data = - infer_context->GetShapeOrDataForValue(op->operand_source(0)); - const std::vector &input_dims = x_shape_or_data.shape(); - - int groups = op->attribute("groups").data(); - std::string data_format = - op->attribute("data_format").AsString(); - - PADDLE_ENFORCE_EQ( - input_dims.size(), - 4, - phi::errors::InvalidArgument("Input should be a 4-D tensor of format [N, " - "C, H, W] or [N, H, W, C], but got %u.", - input_dims.size())); - PADDLE_ENFORCE_GE( - groups, - 1, - phi::errors::InvalidArgument("groups should be larger than 0.")); - PADDLE_ENFORCE_EQ( - data_format == "NCHW" || data_format == "NHWC", - true, - phi::errors::InvalidArgument("data_format must be one of NCHW and NHWC. " - "But received data_format: %s", - data_format)); - - const bool channel_last = (data_format == "NHWC"); - - symbol::DimExpr channels; - if (!channel_last) { - channels = input_dims[1]; - } else { - channels = input_dims[3]; - } - - symbol::DimExpr groups_expr = symbol::DimExpr(groups); - symbol::DimExpr expected_channels = groups_expr * (channels / groups_expr); - - infer_context->AddEqualCstr(channels, expected_channels); - - infer_context->SetShapeOrDataForValue(op->result(0), x_shape_or_data); - - return true; -} - -bool DiagEmbedOpInferSymbolicShape( - pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) { - pir::Value operand_source = op->operand_source(0); - const symbol::ShapeOrDataDimExprs &operand_shape_or_data = - infer_context->GetShapeOrDataForValue(operand_source); - const auto &attributes = op->attributes(); - int dim1 = attributes.at("dim1").dyn_cast().data(); - int dim2 = attributes.at("dim2").dyn_cast().data(); - int offset = attributes.at("offset").dyn_cast().data(); - - const auto &x_dims = operand_shape_or_data.shape(); - int dim1_ = dim1 < 0 ? x_dims.size() + dim1 + 1 : dim1; - int dim2_ = dim2 < 0 ? x_dims.size() + dim2 + 1 : dim2; - int64_t offset_ = static_cast(std::abs(offset)); - symbol::DimExpr new_dim_len = - symbol::DimExpr(offset_) + x_dims.at(x_dims.size() - 1); - - const auto &out_dims = [&] { - std::vector out_dims = x_dims; - out_dims.pop_back(); - out_dims.insert(out_dims.begin() + std::min(dim1_, dim2_), new_dim_len); - out_dims.insert(out_dims.begin() + std::max(dim1_, dim2_), new_dim_len); - return out_dims; - }(); - symbol::ShapeOrDataDimExprs shape_data{ - symbol::TensorShapeOrDataDimExprs(out_dims)}; - infer_context->SetShapeOrDataForValue(op->result(0), shape_data); - return true; -} -bool DiagonalOpInferSymbolicShape( - pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) { - pir::Value operand_source = op->operand_source(0); - const symbol::ShapeOrDataDimExprs &operand_shape_or_data = - infer_context->GetShapeOrDataForValue(operand_source); - const auto &attributes = op->attributes(); - int axis1 = attributes.at("axis1").dyn_cast().data(); - int axis2 = attributes.at("axis2").dyn_cast().data(); - int offset = attributes.at("offset").dyn_cast().data(); - - const auto &x_dims = operand_shape_or_data.shape(); - int axis1_ = axis1 < 0 ? x_dims.size() + axis1 : axis1; - int axis2_ = axis2 < 0 ? x_dims.size() + axis2 : axis2; - - auto out_dims = x_dims; - auto axis1_size = out_dims.at(axis1_); - auto axis2_size = out_dims.at(axis2_); - out_dims.erase(out_dims.begin() + std::max(axis1_, axis2_)); - out_dims.erase(out_dims.begin() + std::min(axis1_, axis2_)); - - symbol::DimExprBuilder builder; - symbol::DimExpr zero{0}; - symbol::DimExpr res_shape; - symbol::DimExpr offset_sym{offset}; - if (offset == 0) { - res_shape = builder.Min(axis1_size, axis2_size); - } else if (offset > 0) { - if (axis2_size.isa()) { - res_shape = (axis2_size.dyn_cast() - offset) > 0 - ? builder.Min(axis1_size, axis2_size - offset_sym) - : zero; - } else { - res_shape = infer_context->GetNextSymName(); - } - } else { - if (axis1_size.isa()) { - res_shape = (axis1_size.dyn_cast() + offset) > 0 - ? builder.Min(axis1_size + offset_sym, axis2_size) - : zero; - } else { - res_shape = infer_context->GetNextSymName(); - } - } - out_dims.push_back(symbol::SimplifyDimExpr(res_shape)); - - symbol::ShapeOrDataDimExprs shape_data{ - symbol::TensorShapeOrDataDimExprs(out_dims)}; - infer_context->SetShapeOrDataForValue(op->result(0), shape_data); - return true; -} - -bool DistributeFpnProposalsOpInferSymbolicShape( - pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) { - const auto &attributes = op->attributes(); - int32_t min_level = - attributes.at("min_level").dyn_cast().data(); - int32_t max_level = - attributes.at("max_level").dyn_cast().data(); - int32_t num_levels = max_level - min_level + 1; - int64_t batch_size = 1; - - symbol::DimExpr num_rois = - infer_context->GetShapeOrDataForValue(op->operand_source(0)) - .shape() - .at(0); - - const auto &multi_rois_out_shape = [&]() { - symbol::TensorListShapeOrDataDimExprs multi_rois_out_shape; - if (num_levels == 1) { - multi_rois_out_shape.emplace_back( - symbol::TensorShapeOrDataDimExprs({num_rois, 4})); - } else { - symbol::DimExpr last_dim = num_rois; - for (int i = 0; i < num_levels - 1; i++) { - const auto &next_sym_name = infer_context->GetNextSymName(); - std::vector level_dim = {next_sym_name, 4}; - multi_rois_out_shape.emplace_back( - symbol::TensorShapeOrDataDimExprs(level_dim)); - last_dim = last_dim - level_dim.at(0); - } - multi_rois_out_shape.emplace_back(symbol::TensorShapeOrDataDimExprs( - {infer_context->GetNextSymName(), 4})); - } - - return multi_rois_out_shape; - }(); - - const auto &rois_num_per_level_out_shape = [&]() { - symbol::TensorListShapeOrDataDimExprs rois_num_per_level_out_shape; - rois_num_per_level_out_shape.resize( - num_levels, symbol::TensorShapeOrDataDimExprs({batch_size})); - return rois_num_per_level_out_shape; - }(); - - const auto &restore_ind = [&]() { - if (op->operand_source(1)) { - return symbol::TensorShapeOrDataDimExprs( - {infer_context->GetNextSymName(), 1}); - } - return symbol::TensorShapeOrDataDimExprs({num_rois, 1}); - }(); - - infer_context->SetShapeOrDataForValue(op->result(0), multi_rois_out_shape); - infer_context->SetShapeOrDataForValue(op->result(1), - rois_num_per_level_out_shape); - infer_context->SetShapeOrDataForValue(op->result(2), restore_ind); - return true; -} - -bool EighOpInferSymbolicShape(pir::Operation *op, - pir::InferSymbolicShapeContext *infer_context) { - const auto &x_shape = - infer_context->GetShapeOrDataForValue(op->operand_source(0)).shape(); - std::vector out_shape; - for (size_t i = 0; i < x_shape.size() - 1; ++i) { - out_shape.push_back(x_shape.at(i)); - } - infer_context->SetShapeOrDataForValue( - op->result(0), symbol::TensorShapeOrDataDimExprs(out_shape)); - infer_context->SetShapeOrDataForValue( - op->result(1), symbol::TensorShapeOrDataDimExprs(x_shape)); - return true; -} - -bool EigvalshOpInferSymbolicShape( - pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) { - return EighOpInferSymbolicShape(op, infer_context); -} - -bool FakeChannelWiseQuantizeAbsMaxOpInferSymbolicShape( - pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) { - const auto &x_shape_or_data = - infer_context->GetShapeOrDataForValue(op->operand_source(0)); - - int bit_length = op->attribute("bit_length").data(); - int quant_axis = op->attribute("quant_axis").data(); - - PADDLE_ENFORCE_EQ(bit_length >= 1 && bit_length <= 16, - true, - common::errors::InvalidArgument( - "'bit_length' should be between 1 and 16, but " - "the received is %d", - bit_length)); - PADDLE_ENFORCE_EQ( - quant_axis == 0 || quant_axis == 1, - true, - common::errors::InvalidArgument("'quant_axis' should be 0 or 1, but " - "the received is %d", - quant_axis)); - - infer_context->SetShapeOrDataForValue( - op->result(0), - symbol::ShapeOrDataDimExprs{ - symbol::TensorShapeOrDataDimExprs(x_shape_or_data.shape())}); - - std::vector out_scale_shape = { - x_shape_or_data.shape()[quant_axis]}; - infer_context->SetShapeOrDataForValue( - op->result(1), - symbol::ShapeOrDataDimExprs{ - symbol::TensorShapeOrDataDimExprs(out_scale_shape)}); - - return true; -} - -bool FftC2cOpInferSymbolicShape(pir::Operation *op, - pir::InferSymbolicShapeContext *infer_context) { - const auto &x_shape_or_data = - infer_context->GetShapeOrDataForValue(op->operand_source(0)); - std::vector x_dims = x_shape_or_data.shape(); - - // Set the output shape to be the same as the input shape - infer_context->SetShapeOrDataForValue( - op->result(0), - symbol::ShapeOrDataDimExprs{symbol::TensorShapeOrDataDimExprs(x_dims)}); - - return true; -} - -bool FftC2rOpInferSymbolicShape(pir::Operation *op, - pir::InferSymbolicShapeContext *infer_context) { - const auto &x_shape_or_data = - infer_context->GetShapeOrDataForValue(op->operand_source(0)); - std::vector x_dims = x_shape_or_data.shape(); - - auto axes = paddle::dialect::details::GetVectorAttr(op, "axes"); - int64_t last_dim_size = - op->attribute("last_dim_size").data(); - int last_fft_axis = static_cast(axes.back()); - - std::vector out_dims = x_dims; - - if (last_dim_size > 0) { - out_dims[last_fft_axis] = symbol::DimExpr(last_dim_size); - } else { - symbol::DimExprBuilder builder; - out_dims[last_fft_axis] = - builder.Mul(x_dims[last_fft_axis], 2) - symbol::DimExpr{1}; - } - - infer_context->SetShapeOrDataForValue( - op->result(0), - symbol::ShapeOrDataDimExprs{symbol::TensorShapeOrDataDimExprs(out_dims)}); - - return true; -} - -bool FftR2cOpInferSymbolicShape(pir::Operation *op, - pir::InferSymbolicShapeContext *infer_context) { - const auto &x_shape_or_data = - infer_context->GetShapeOrDataForValue(op->operand_source(0)); - std::vector x_dims = x_shape_or_data.shape(); - - auto axes = paddle::dialect::details::GetVectorAttr(op, "axes"); - bool onesided = op->attribute("onesided").data(); - - std::vector out_dims = x_dims; - - if (onesided) { - int last_fft_axis = static_cast(axes.back()); - symbol::DimExprBuilder builder; - out_dims[last_fft_axis] = - builder.Add(builder.Div(x_dims[last_fft_axis], 2), 1); - } - - infer_context->SetShapeOrDataForValue( - op->result(0), - symbol::ShapeOrDataDimExprs{symbol::TensorShapeOrDataDimExprs(out_dims)}); - - return true; -} - -bool FillDiagonalOpInferSymbolicShape( - pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) { - const auto &x_shape_or_data = - infer_context->GetShapeOrDataForValue(op->operand_source(0)); - std::vector x_dims = x_shape_or_data.shape(); - - infer_context->SetShapeOrDataForValue( - op->result(0), - symbol::ShapeOrDataDimExprs{symbol::TensorShapeOrDataDimExprs(x_dims)}); - - return true; -} - -bool FillDiagonal_OpInferSymbolicShape( - pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) { - return FillDiagonalOpInferSymbolicShape(op, infer_context); -} - -bool FlattenOpInferSymbolicShape( - pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) { - const auto &attributes = op->attributes(); - int start_axis = - attributes.at("start_axis").dyn_cast().data(); - int stop_axis = - attributes.at("stop_axis").dyn_cast().data(); - - const auto &x_shape = - infer_context->GetShapeOrDataForValue(op->operand_source(0)).shape(); - int in_dims_size = x_shape.size(); - - if (in_dims_size == 0) { - PADDLE_ENFORCE_EQ( - start_axis == 0 || start_axis == -1, - true, - common::errors::InvalidArgument("The start_axis should be 0 or -1 when " - "the input tensor is a 0D-Tensor")); - PADDLE_ENFORCE_EQ(stop_axis == 0 || stop_axis == -1, - true, - common::errors::InvalidArgument( - "The stop_axis should be 0 or -1 when the " - "input tensor is a 0D-Tensor")); - // this can ensure out shape {1} - start_axis = 0; - stop_axis = -1; - } - - if (start_axis < 0) { - start_axis = start_axis + in_dims_size; - } - if (stop_axis < 0) { - stop_axis = stop_axis + in_dims_size; - } - if (in_dims_size > 0) { - PADDLE_ENFORCE_GE( - stop_axis, - start_axis, - common::errors::InvalidArgument("The stop_axis should be greater" - "than or equal to start_axis.")); - } - - symbol::DimExpr outer{1}; - std::vector out_shape; - out_shape.reserve(in_dims_size - stop_axis + start_axis + 1); - for (int i = 0; i < start_axis; ++i) { - out_shape.push_back(x_shape.at(i)); - } - for (int i = start_axis; i <= stop_axis; i++) { - outer = outer * x_shape.at(i); - } - out_shape.push_back(outer); - for (int i = stop_axis + 1; i < in_dims_size; i++) { - out_shape.push_back(x_shape.at(i)); - } - - symbol::ShapeOrDataDimExprs out_shape_data{ - symbol::TensorShapeOrDataDimExprs(out_shape)}; - infer_context->SetShapeOrDataForValue(op->result(0), out_shape_data); - - std::vector xshape_shape = x_shape; - xshape_shape.insert(xshape_shape.begin(), symbol::DimExpr{0}); - symbol::ShapeOrDataDimExprs xshape_shape_data{ - symbol::TensorShapeOrDataDimExprs(xshape_shape)}; - infer_context->SetShapeOrDataForValue(op->result(1), xshape_shape_data); - return true; -} - -bool Flatten_OpInferSymbolicShape( - pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) { - return FlattenOpInferSymbolicShape(op, infer_context); -} - -bool FoldOpInferSymbolicShape(pir::Operation *op, - pir::InferSymbolicShapeContext *infer_context) { - const auto &x_shape = - infer_context->GetShapeOrDataForValue(op->operand_source(0)).shape(); - - std::vector out_shape; - out_shape.push_back(x_shape[0]); - - const std::vector &output_sizes = - paddle::dialect::details::GetVectorAttr(op, "output_sizes"); - PADDLE_ENFORCE_EQ( - output_sizes.size(), - 2, - common::errors::InvalidArgument( - "It is expected output_size equals to 2, but got size %d", - output_sizes.size())); - infer_context->AddGreatThanOneCstr(output_sizes[0]); - infer_context->AddGreatThanOneCstr(output_sizes[1]); - - const std::vector &kernel_sizes = - paddle::dialect::details::GetVectorAttr(op, "kernel_sizes"); - const std::vector &dilations = - paddle::dialect::details::GetVectorAttr(op, "dilations"); - const std::vector &strides = - paddle::dialect::details::GetVectorAttr(op, "strides"); - const std::vector &paddings = - paddle::dialect::details::GetVectorAttr(op, "paddings"); - - PADDLE_ENFORCE_EQ( - kernel_sizes.size(), - 2, - common::errors::InvalidArgument( - "It is expected kernel_size equals to 2, but got size %d", - kernel_sizes.size())); - PADDLE_ENFORCE_EQ( - strides.size(), - 2, - common::errors::InvalidArgument( - "It is expected strides_size equals to 2, but got size %d", - strides.size())); - PADDLE_ENFORCE_EQ( - paddings.size(), - 4, - common::errors::InvalidArgument( - "It is expected paddings_size equals to 4, but got size %d", - paddings.size())); - PADDLE_ENFORCE_EQ( - dilations.size(), - 2, - common::errors::InvalidArgument( - "It is expected dilations_size equals to 2, but got size %d", - dilations.size())); - - int blocks_height = (output_sizes[0] + 2 * paddings[0] - - (dilations[0] * (kernel_sizes[0] - 1) + 1)) / - strides[0] + - 1; - int blocks_width = (output_sizes[1] + 2 * paddings[1] - - (dilations[1] * (kernel_sizes[1] - 1) + 1)) / - strides[1] + - 1; - - infer_context->AddEqualCstr((blocks_height * blocks_width), x_shape[2]); - - out_shape.push_back(x_shape[1] / (kernel_sizes[0] * kernel_sizes[1])); - - out_shape.push_back(symbol::DimExpr(output_sizes[0])); - out_shape.push_back(symbol::DimExpr(output_sizes[1])); - - infer_context->SetShapeOrDataForValue( - op->result(0), - symbol::ShapeOrDataDimExprs{ - symbol::TensorShapeOrDataDimExprs(out_shape)}); - - return true; -} - -bool IdentityLossOpInferSymbolicShape( - pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) { - const auto &input_shape = - infer_context->GetShapeOrDataForValue(op->operand_source(0)); - int reduction = op->attribute("reduction").data(); - if (reduction == 2) { - infer_context->SetShapeOrDataForValue(op->result(0), input_shape); - } else { - std::vector out_shape = {}; - infer_context->SetShapeOrDataForValue( - op->result(0), - symbol::ShapeOrDataDimExprs{ - symbol::TensorShapeOrDataDimExprs(out_shape)}); - } - - return true; -} - -bool IdentityLoss_OpInferSymbolicShape( - pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) { - return IdentityLossOpInferSymbolicShape(op, infer_context); -} - -bool KthvalueOpInferSymbolicShape( - pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) { - pir::Value operand_source = op->operand_source(0); - const symbol::ShapeOrDataDimExprs &operand_shape_or_data = - infer_context->GetShapeOrDataForValue(operand_source); - const auto &attributes = op->attributes(); - int axis = attributes.at("axis").dyn_cast().data(); - bool keepdim = GetBoolAttr(op, "keepdim"); - - const auto &input_dims = operand_shape_or_data.shape(); - const int &dim_size = input_dims.size(); - if (axis < 0) axis += dim_size; - std::vector out_dims; - for (int i = 0; i < axis; i++) { - out_dims.emplace_back(input_dims.at(i)); - } - if (keepdim && dim_size > 0) { - out_dims.emplace_back(symbol::DimExpr(1)); - } - for (int i = axis + 1; i < dim_size; i++) { - out_dims.emplace_back(input_dims.at(i)); - } - symbol::ShapeOrDataDimExprs shape_data{ - symbol::TensorShapeOrDataDimExprs(out_dims)}; - infer_context->SetShapeOrDataForValue(op->result(0), shape_data); - infer_context->SetShapeOrDataForValue(op->result(1), shape_data); - return true; -} - -bool LpPool2dOpInferSymbolicShape( - pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) { - const auto &kernel_size = [&]() -> std::vector { - std::vector kernel_size_int_vec = - op->attribute("kernel_size") - .data() - .GetData(); - return details::VecInt642Expr(kernel_size_int_vec); - }(); - infer_context->SetShapeOrDataForValue( - op->result(0), - Pool2dRawInferSymbolicShape(op, kernel_size, infer_context)); - return true; -} - -bool LogcumsumexpOpInferSymbolicShape( - pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) { - // same as CumsumOpInferSymbolicShape - return CumsumOpInferSymbolicShape(op, infer_context); -} - -bool LogsumexpOpInferSymbolicShape( - pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) { - bool keepdim = GetBoolAttr(op, "keepdim"); - std::vector axis_in = details::GetVectorAttr(op, "axis"); - std::vector axis; - axis.reserve(axis_in.size()); - std::for_each(axis_in.begin(), axis_in.end(), [&axis](const int &t) { - axis.push_back(static_cast(t)); - }); - bool reduce_all = axis.size() == 0 ? true : false; - return details::ReduceInferDim(op, infer_context, axis, keepdim, reduce_all); -} - -bool MaxOpInferSymbolicShape(pir::Operation *op, - pir::InferSymbolicShapeContext *infer_context) { - bool keepdim = GetBoolAttr(op, "keepdim"); - - const std::vector axis = [&] { - pir::Operation *axis_gen_op = op->operand_source(1).defining_op(); - std::vector axis_vec; - if (axis_gen_op->isa()) { - axis_vec = details::GetVectorAttr( - axis_gen_op->dyn_cast(), "value"); - } else { - // TODO(lanxianghit): there's other source: pir::VectorType, - // paddle::dialect::DenseTensorType, but after PRIM, maybe always - // FullIntArrayOp, to be confirmed - PADDLE_THROW(common::errors::Unimplemented( - "MaxOpInferSymbolicShape: 'axis' only " - "support FullIntArrayOp's result now.")); - } - return axis_vec; - }(); - - bool reduce_all = axis.size() == 0 ? true : false; - - return details::ReduceInferDim(op, infer_context, axis, keepdim, reduce_all); -} - -bool MaxoutOpInferSymbolicShape(pir::Operation *op, - pir::InferSymbolicShapeContext *infer_context) { - const auto &x_shape_or_data = - infer_context->GetShapeOrDataForValue(op->operand_source(0)); - const std::vector &in_x_dims = x_shape_or_data.shape(); - - int groups = op->attribute("groups").data(); - int axis = op->attribute("axis").data(); - - if (axis < 0) { - axis += in_x_dims.size(); - } - - std::vector output_shape = in_x_dims; - output_shape[axis] = in_x_dims[axis] / groups; - infer_context->SetShapeOrDataForValue( - op->result(0), - symbol::ShapeOrDataDimExprs{ - symbol::TensorShapeOrDataDimExprs(output_shape)}); - - return true; -} - -bool MinOpInferSymbolicShape(pir::Operation *op, - pir::InferSymbolicShapeContext *infer_context) { - return MaxOpInferSymbolicShape(op, infer_context); -} - -bool MeanAllOpInferSymbolicShape( - pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) { - const auto &x_shape_or_data = - infer_context->GetShapeOrDataForValue(op->operand_source(0)); - const std::vector &x_dims = x_shape_or_data.shape(); - - PADDLE_ENFORCE_GT( - x_dims.size(), - 0, - phi::errors::InvalidArgument("Input(x) of MeanAllOp must have rank " - "greater than 0, but received rank 0.")); - - std::vector output_shape = {}; - - infer_context->SetShapeOrDataForValue( - op->result(0), - symbol::ShapeOrDataDimExprs{ - symbol::TensorShapeOrDataDimExprs(output_shape)}); - - return true; -} - -bool NonzeroOpInferSymbolicShape( - pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) { - const auto &x_shape_or_data = - infer_context->GetShapeOrDataForValue(op->operand_source(0)); - const auto &x_shape = x_shape_or_data.shape(); - int rank = x_shape.size(); - - PADDLE_ENFORCE_GE( - rank, - 1UL, - common::errors::InvalidArgument( - "Input(x) should have number of dimension at least 1.")); - - std::string sym_name = infer_context->GetNextSymName(); - std::vector out_shape{symbol::DimExpr{sym_name}, - symbol::DimExpr{rank}}; - - symbol::ShapeOrDataDimExprs shape_data{ - symbol::TensorShapeOrDataDimExprs(out_shape)}; - infer_context->SetShapeOrDataForValue(op->result(0), shape_data); - return true; -} - -bool NumelOpInferSymbolicShape(pir::Operation *op, - pir::InferSymbolicShapeContext *infer_context) { - std::vector out_shape = {}; - infer_context->SetShapeOrDataForValue( - op->result(0), - symbol::ShapeOrDataDimExprs{ - symbol::TensorShapeOrDataDimExprs(out_shape)}); - - return true; -} - -bool PadOpInferSymbolicShape(pir::Operation *op, - pir::InferSymbolicShapeContext *infer_context) { - // input(0): Tensor x - const auto &x_shape_or_data = - infer_context->GetShapeOrDataForValue(op->operand_source(0)); - PADDLE_ENFORCE_EQ(x_shape_or_data.data().has_value(), - false, - common::errors::InvalidArgument( - "InferSymbolicShape of PadOp only support input with " - "value now.")); - const auto &x_dims_sym = x_shape_or_data.shape(); - const size_t rank = x_dims_sym.size(); - - // input(1): int[] paddings - std::vector paddings = - paddle::dialect::details::GetVectorAttr(op, "paddings"); - PADDLE_ENFORCE_EQ(rank * 2, - paddings.size(), - common::errors::InvalidArgument( - "The size of paddings should be 2 * input's rank. But " - "got paddings.size() = %d, input's rank = %d.", - paddings.size(), - rank)); - - // output - const auto &out_dims = [&] { - std::vector out_dims; - out_dims.reserve(rank); - for (size_t i = 0; i < rank; ++i) { - out_dims.push_back(x_dims_sym.at(i) + paddings.at(2 * i) + - paddings.at(2 * i + 1)); - } - return out_dims; - }(); - - infer_context->SetShapeOrDataForValue( - op->result(0), symbol::TensorShapeOrDataDimExprs(out_dims)); - - return true; -} - -bool Pad3dOpInferSymbolicShape(pir::Operation *op, - pir::InferSymbolicShapeContext *infer_context) { - const auto &x_shape = - infer_context->GetShapeOrDataForValue(op->operand_source(0)).shape(); - PADDLE_ENFORCE_EQ(x_shape.size(), - 5, - common::errors::InvalidArgument( - "The size of Input(X)'s dimension should be equal to " - "5, but received %d. ", - x_shape.size())); - const auto &paddings_shape = - infer_context->GetShapeOrDataForValue(op->operand_source(1)); - if (!paddings_shape.data().has_value()) { - std::stringstream ss; - ss << paddings_shape; - PADDLE_THROW( - common::errors::InvalidArgument("The data of paddings's symbol shape " - "should have value, but now got [%s].", - ss.str())); - } - const std::string &data_format = - op->attribute("data_format").AsString(); - - const std::vector &out_dims = [&] { - std::vector out_dims = x_shape; - const auto &paddings = paddings_shape.data().value(); - PADDLE_ENFORCE_EQ(paddings.size(), - 6, - common::errors::InvalidArgument( - "Shape of Input(Paddings) should be equal to " - "[6], but received [%d].", - paddings.size())); - if (data_format == "NCDHW") { - out_dims.at(1) = x_shape.at(1); - out_dims.at(2) = x_shape.at(2) + paddings.at(4) + paddings.at(5); - out_dims.at(3) = x_shape.at(3) + paddings.at(2) + paddings.at(3); - out_dims.at(4) = x_shape.at(4) + paddings.at(0) + paddings.at(1); - } else { - out_dims.at(1) = x_shape.at(1) + paddings.at(4) + paddings.at(5); - out_dims.at(2) = x_shape.at(2) + paddings.at(2) + paddings.at(3); - out_dims.at(3) = x_shape.at(3) + paddings.at(0) + paddings.at(1); - out_dims.at(4) = x_shape.at(4); - } - return out_dims; - }(); - - infer_context->SetShapeOrDataForValue( - op->result(0), symbol::TensorShapeOrDataDimExprs(out_dims)); - - return true; -} - -bool Pool2dOpInferSymbolicShape(pir::Operation *op, - pir::InferSymbolicShapeContext *infer_context) { - const auto &kernel_size_shape_or_data = - infer_context->GetShapeOrDataForValue(op->operand_source(1)); - const auto &kernel_size = - details::GetExprVecFromData(kernel_size_shape_or_data); - infer_context->SetShapeOrDataForValue( - op->result(0), - Pool2dRawInferSymbolicShape(op, kernel_size, infer_context)); - return true; -} - -bool ProdOpInferSymbolicShape(pir::Operation *op, - pir::InferSymbolicShapeContext *infer_context) { - bool keepdim = GetBoolAttr(op, "keepdim"); - bool reduce_all = GetBoolAttr(op, "reduce_all"); - - auto axis_gen_op = op->operand_source(1).defining_op(); - if (axis_gen_op->isa()) { - std::vector axis = details::GetVectorAttr( - axis_gen_op->dyn_cast(), "value"); - return details::ReduceInferDim( - op, infer_context, axis, keepdim, reduce_all); - } else { - // TODO(lanxianghit): deal with other source: pir::VectorType, - // paddle::dialect::DenseTensorType - PADDLE_THROW( - common::errors::Unimplemented("ProdOpInferSymbolicShape: 'axis' only " - "support FullIntArrayOp's result now.")); - } - - return true; -} - -bool RepeatInterleaveOpInferSymbolicShape( - pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) { - pir::Value operand_source = op->operand_source(0); - const symbol::ShapeOrDataDimExprs &operand_shape_or_data = - infer_context->GetShapeOrDataForValue(operand_source); - - const auto &attributes = op->attributes(); - int repeats = attributes.at("repeats").dyn_cast().data(); - // what should I do if axis is null - int axis = attributes.at("axis").dyn_cast().data(); - - const std::vector &in_dims_sym = [&] { - std::vector dims; - if (operand_shape_or_data.data().has_value()) { - dims = operand_shape_or_data.data().value(); - } else { - dims = operand_shape_or_data.shape(); - } - return dims; - }(); - - int x_rank = in_dims_sym.size(); - if (axis < 0) axis += x_rank; - - const auto &out_sym_shape = [&] { - std::vector out_sym_shape; - for (int i = 0; i < x_rank; i++) { - if (i == axis) { - out_sym_shape.push_back(in_dims_sym.at(i) * repeats); - } else { - out_sym_shape.push_back(in_dims_sym.at(i)); - } - } - return out_sym_shape; - }(); - - infer_context->SetShapeOrDataForValue( - op->result(0), - symbol::ShapeOrDataDimExprs{ - symbol::TensorShapeOrDataDimExprs(out_sym_shape)}); - - return true; -} - -bool ReshapeOpInferSymbolicShape( - pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) { - const symbol::ShapeOrDataDimExprs &x_dim_expr = - infer_context->GetShapeOrDataForValue(op->operand_source(0)); - const symbol::ShapeOrDataDimExprs &shape_dim_expr = - infer_context->GetShapeOrDataForValue(op->operand_source(1)); - - const auto &GetProduct = [&](const auto &dim_exprs, const auto &Filter) { - symbol::DimExpr product{1}; - for (const auto &dim_expr : dim_exprs) { - if (Filter(dim_expr)) { - product = product * dim_expr; - } - } - return product; - }; - - const auto &IsNotMinusOne = [&](const symbol::DimExpr &dim_expr) { - if (dim_expr.isa()) { - return dim_expr.dyn_cast() != static_cast(-1); - } - return true; - }; - - const auto &IsPositiveInteger = [&](const symbol::DimExpr &dim_expr) { - if (dim_expr.isa()) { - return dim_expr.dyn_cast() > static_cast(0); - } - return true; - }; - - const auto &IsZero = [&](const symbol::DimExpr &dim_expr) { - if (dim_expr.isa()) { - return dim_expr.dyn_cast() == static_cast(0); - } - return false; - }; - - const std::vector out_dims = [&] { - const auto &original_shape = - infer_context->GetShapeOrDataForValue(op->operand_source(0)).shape(); - ExprVec target_shape = details::GetExprVecFromData(shape_dim_expr); - - // replace '0' with original shape - for (size_t i = 0; i < target_shape.size(); i++) { - if (i < original_shape.size() && IsZero(target_shape.at(i))) { - target_shape.at(i) = original_shape.at(i); - } - } - - // replace '-1' with infered shape - const auto &numel = - GetProduct(original_shape, [](const auto &) { return true; }); - const auto &product_exclude_minus_one = - GetProduct(target_shape, IsPositiveInteger); - const auto &input_dims = target_shape; - - std::vector out_dims; - out_dims.reserve(input_dims.size()); - for (size_t i = 0; i < input_dims.size(); ++i) { - auto out_dim_expr = IsNotMinusOne(input_dims.at(i)) - ? input_dims.at(i) - : (numel / product_exclude_minus_one); - out_dims.emplace_back(out_dim_expr); - } - return out_dims; - }(); - - symbol::ShapeOrDataDimExprs shape_data = [&] { - if (x_dim_expr.data().has_value()) { - return symbol::TensorShapeOrDataDimExprs(out_dims, - x_dim_expr.data().value()); - } - return symbol::TensorShapeOrDataDimExprs(out_dims); - }(); - - infer_context->SetShapeOrDataForValue(op->result(0), shape_data); - return true; -} - -bool Reshape_OpInferSymbolicShape( - pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) { - return ReshapeOpInferSymbolicShape(op, infer_context); -} - -bool ShapeOpInferSymbolicShape(pir::Operation *op, - pir::InferSymbolicShapeContext *infer_context) { - const symbol::ShapeOrDataDimExprs &operand_shape_or_data = - infer_context->GetShapeOrDataForValue(op->operand_source(0)); - const auto &out_data = operand_shape_or_data.shape(); - const std::vector shape{std::int64_t(out_data.size())}; - symbol::ShapeOrDataDimExprs shape_or_data{ - symbol::TensorShapeOrDataDimExprs(shape, out_data)}; - - infer_context->SetShapeOrDataForValue(op->result(0), shape_or_data); - return true; -} - -bool ShapeSrOpInferSymbolicShape( - pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) { - return ShapeOpInferSymbolicShape(op, infer_context); -} - -bool SliceOpInferSymbolicShape(pir::Operation *op, - pir::InferSymbolicShapeContext *infer_context) { - pir::Value operand_source = op->operand_source(0); - pir::Value operand_starts = op->operand_source(1); - pir::Value operand_ends = op->operand_source(2); - pir::Value res = op->result(0); - - const symbol::ShapeOrDataDimExprs &starts_shape_data = - infer_context->GetShapeOrDataForValue(operand_starts); - const symbol::ShapeOrDataDimExprs &ends_shape_data = - infer_context->GetShapeOrDataForValue(operand_ends); - - std::vector axes_vec = details::GetVectorAttr(op, "axes"); - - ExprVec starts = slice_utils::GetExprVecFromData(starts_shape_data); - ExprVec ends = slice_utils::GetExprVecFromData(ends_shape_data); - - std::vector infer_flags = details::GetVectorAttr(op, "infer_flags"); - const std::vector decrease_axis = - details::GetVectorAttr(op, "decrease_axis"); - - infer_context->SetShapeOrDataForValue( - res, - slice_utils::SliceRawInferSymbolicShape(operand_source, - res, - starts, - ends, - axes_vec, - infer_flags, - decrease_axis, - infer_context)); - - return true; -} - -bool SplitOpInferSymbolicShape(pir::Operation *op, - pir::InferSymbolicShapeContext *infer_context) { - // input - const auto &x_shape_or_data = - infer_context->GetShapeOrDataForValue(op->operand_source(0)); - PADDLE_ENFORCE_EQ(x_shape_or_data.data().has_value(), - false, - common::errors::InvalidArgument( - "InferSymbolicShape of SplitOp only support input with " - "value now.")); - const auto &x_dims_sym = x_shape_or_data.shape(); - - // axis - CHECK(op->operand_source(2).defining_op()->isa()); - - int64_t axis = op->operand_source(2) - .defining_op() - .attributes() - .at("value") - .dyn_cast() - .data() - .to(); - size_t rank = x_dims_sym.size(); - axis = axis >= 0 ? axis : std::max(int64_t(0), int64_t(axis + rank)); - - // sections - const std::vector §ions_sym = - details::GetExprVecFromData( - infer_context->GetShapeOrDataForValue(op->operand_source(1))); - - // output - const symbol::TensorListShapeOrDataDimExprs &output_shape_data_list = [&] { - const auto &GetSum = [&](const auto &dim_exprs, const auto &Filter) { - symbol::DimExpr sum{0}; - for (const auto &dim_expr : dim_exprs) { - if (Filter(dim_expr)) { - sum = sum + dim_expr; - } - } - return sum; - }; - const auto &All = [&](const auto &dim_exprs, const auto &Cond) { - for (const auto &dim_expr : dim_exprs) { - if (!Cond(dim_expr)) { - return false; - } - } - return true; - }; - const auto &IsNotMinusOne = [&](const symbol::DimExpr &dim_expr) { - if (dim_expr.isa()) { - return dim_expr.dyn_cast() != static_cast(-1); - } - return true; - }; - const auto &sum_exclude_minus_one = GetSum(sections_sym, IsNotMinusOne); - - const bool &all_sections_sym_not_minus_one = - All(sections_sym, IsNotMinusOne); - if (all_sections_sym_not_minus_one) { - infer_context->AddEqualCstr(x_dims_sym.at(axis), sum_exclude_minus_one); - } - - symbol::TensorListShapeOrDataDimExprs shape_data_list; - std::vector output_dims_sym = x_dims_sym; - if (!all_sections_sym_not_minus_one && sections_sym.size() == 1) { - VLOG(3) << "[SplitOp]-1 is the only split section. The output shape is " - "identical to the input shape."; - shape_data_list.push_back( - symbol::TensorShapeOrDataDimExprs(output_dims_sym)); - return shape_data_list; - } - for (uint32_t idx = 0; idx < sections_sym.size(); idx++) { - const auto §ion_sym = sections_sym.at(idx); - output_dims_sym.at(axis) = - IsNotMinusOne(section_sym) - ? section_sym - : x_dims_sym.at(axis) - sum_exclude_minus_one; - - shape_data_list.push_back( - symbol::TensorShapeOrDataDimExprs(output_dims_sym)); - } - return shape_data_list; - }(); - - infer_context->SetShapeOrDataForValue( - op->result(0), symbol::ShapeOrDataDimExprs{output_shape_data_list}); - - return true; -} - -bool SplitWithNumOpInferSymbolicShape( - pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) { - const symbol::ShapeOrDataDimExprs &axis_shape_data = - infer_context->GetShapeOrDataForValue(op->operand_source(1)); - PADDLE_ENFORCE_EQ( - axis_shape_data.data().has_value(), - true, - common::errors::InvalidArgument( - "In InferSymbolicShape, axis of SplitWithNumOp is null")); - const std::vector &axis_data = - axis_shape_data.data().value(); - PADDLE_ENFORCE_EQ( - axis_data.size() == 1, - true, - common::errors::InvalidArgument( - "In SplitWithNumOp, data of axis should be one dimension")); - - const auto &attributes = op->attributes(); - int num = attributes.at("num").dyn_cast().data(); - - const auto &x_s_or_d = - infer_context->GetShapeOrDataForValue(op->operand_source(0)); - int rank = x_s_or_d.shape().size(); - - const auto &out_s_d = [&](int64_t split_axis, int64_t res_num) { - symbol::DimExpr input_axis_dim = x_s_or_d.shape().at(split_axis); - symbol::DimExpr axis_shape = input_axis_dim / symbol::DimExpr{res_num}; - - std::vector res_s_d; - for (size_t i = 0; i < x_s_or_d.shape().size(); ++i) { - const auto &sym_dim = split_axis == static_cast(i) - ? axis_shape - : x_s_or_d.shape().at(i); - res_s_d.push_back(sym_dim); - } - return symbol::TensorShapeOrDataDimExprs(res_s_d); - }; - - if (axis_data.at(0).isa()) { - // case 1: DimExpr of axis is int. axis_shape_or_data: {shape:{1}, - // data:{3}} eg: axis generator op is full_op and assign_op - int64_t axis = axis_data[0].dyn_cast(); - axis = axis < 0 ? axis + rank : axis; - symbol::TensorListShapeOrDataDimExprs res_list_s_d(num, out_s_d(axis, num)); - infer_context->SetShapeOrDataForValue( - op->result(0), symbol::ShapeOrDataDimExprs{res_list_s_d}); - } else if (axis_data.at(0).isa()) { - // case 2: DimExpr of axis is a symbol(string). axis_shape_or_data: - // {shape:{1}, data:{s0}} eg: axis generator op is data_op - int candidate_axis = -1; - int count = 0; - for (size_t i = 0; i < x_s_or_d.shape().size(); ++i) { - if (x_s_or_d.shape().at(i).isa()) { - if (x_s_or_d.shape().at(i).dyn_cast() % num == 0) { - count++; - candidate_axis = i; - } - } else { - PADDLE_THROW( - common::errors::InvalidArgument("The type of X must be int64_t.")); - } - } - if (count == 1) { - // caculate the axis of split_with_num_op - symbol::TensorListShapeOrDataDimExprs res_list_s_d( - num, out_s_d(candidate_axis, num)); - infer_context->SetShapeOrDataForValue( - op->result(0), symbol::ShapeOrDataDimExprs{res_list_s_d}); - } else { - // create new Symbol - std::vector res_s; - for (size_t i = 0; i < x_s_or_d.shape().size(); ++i) { - const auto &s_dim = - x_s_or_d.shape().at(i).dyn_cast() % num == 0 - ? symbol::DimExpr(infer_context->GetNextSymName()) - : x_s_or_d.shape().at(i); - res_s.emplace_back(s_dim); - } - const symbol::TensorShapeOrDataDimExprs &res_s_d = - symbol::TensorShapeOrDataDimExprs(res_s); - symbol::TensorListShapeOrDataDimExprs res_list_s_d(num, res_s_d); - infer_context->SetShapeOrDataForValue( - op->result(0), symbol::ShapeOrDataDimExprs{res_list_s_d}); - } - } else { - PADDLE_THROW(common::errors::InvalidArgument( - "The type of axis must be int64_t or string.")); - } - return true; -} - -bool SumOpInferSymbolicShape(pir::Operation *op, - pir::InferSymbolicShapeContext *infer_context) { - bool keepdim = GetBoolAttr(op, "keepdim"); - bool reduce_all = false; - - auto axis_gen_op = op->operand_source(1).defining_op(); - if (axis_gen_op->isa()) { - std::vector axis = details::GetVectorAttr( - axis_gen_op->dyn_cast(), "value"); - if (axis.size() == 0) { - reduce_all = true; - } - return details::ReduceInferDim( - op, infer_context, axis, keepdim, reduce_all); - } else { - // TODO(lanxianghit): deal with other source: pir::VectorType, - // paddle::dialect::DenseTensorType - PADDLE_THROW( - common::errors::Unimplemented("SumOpInferSymbolicShape: 'axis' only " - "support FullIntArrayOp's result now.")); - } - - return true; -} - -bool TileOpInferSymbolicShape(pir::Operation *op, - pir::InferSymbolicShapeContext *infer_context) { - pir::Value operand_x = op->operand_source(0); - symbol::ShapeOrDataDimExprs x_shape_or_data = - infer_context->GetShapeOrDataForValue(operand_x); - pir::Value operand_repeat_times = op->operand_source(1); - symbol::ShapeOrDataDimExprs repeat_times_shape_or_data = - infer_context->GetShapeOrDataForValue(operand_repeat_times); - - std::vector x_dimexpr = x_shape_or_data.shape(); - std::vector repeat_times_dimexpr = - details::GetExprVecFromData(repeat_times_shape_or_data); - if (repeat_times_dimexpr.empty()) { - repeat_times_dimexpr = std::vector(x_dimexpr.size(), 1); - } - - auto out_rank = std::max(static_cast(x_dimexpr.size()), - repeat_times_dimexpr.size()); - std::vector out_shape(out_rank); - if (x_dimexpr.size() > repeat_times_dimexpr.size()) { - auto diff = x_dimexpr.size() - repeat_times_dimexpr.size(); - repeat_times_dimexpr.insert(repeat_times_dimexpr.begin(), diff, 1); - } else { - auto diff = repeat_times_dimexpr.size() - x_dimexpr.size(); - x_dimexpr.insert(x_dimexpr.begin(), diff, 1); - } - - for (size_t i = 0; i < repeat_times_dimexpr.size(); ++i) { - out_shape.at(i) = x_dimexpr.at(i) * repeat_times_dimexpr.at(i); - } - - symbol::ShapeOrDataDimExprs shape_data{ - symbol::TensorShapeOrDataDimExprs(out_shape)}; - - pir::Value res = op->result(0); - infer_context->SetShapeOrDataForValue(res, shape_data); - - return true; -} - -bool TopkOpInferSymbolicShape(pir::Operation *op, - pir::InferSymbolicShapeContext *infer_context) { - symbol::ShapeOrDataDimExprs x_shape_or_data = - infer_context->GetShapeOrDataForValue(op->operand_source(0)); - symbol::ShapeOrDataDimExprs k_shape_or_data = - infer_context->GetShapeOrDataForValue(op->operand_source(1)); - const auto &attributes = op->attributes(); - int axis = attributes.at("axis").dyn_cast().data(); - const std::vector &in_dims_sym = [&] { - std::vector dims; - if (x_shape_or_data.data().has_value()) { - dims = x_shape_or_data.data().value(); - } else { - dims = x_shape_or_data.shape(); - } - return dims; - }(); - - int x_rank = in_dims_sym.size(); - - int k = k_shape_or_data.data().value().at(0).Get(); - - if (axis < 0) axis += x_rank; - const auto &out_sym_shape = [&] { - std::vector out_sym_shape; - for (int i = 0; i < x_rank; ++i) { - if (i == axis) { - out_sym_shape.push_back(symbol::DimExpr(k)); - } else { - out_sym_shape.push_back(in_dims_sym.at(i)); - } - } - return out_sym_shape; - }(); - - symbol::ShapeOrDataDimExprs shape_data{ - symbol::TensorShapeOrDataDimExprs(out_sym_shape)}; - - infer_context->SetShapeOrDataForValue(op->result(0), shape_data); - infer_context->SetShapeOrDataForValue(op->result(1), shape_data); - - return true; -} - -bool TopkV1OpInferSymbolicShape(pir::Operation *op, - pir::InferSymbolicShapeContext *infer_context) { - return TopkOpInferSymbolicShape(op, infer_context); -} - -bool TransposeOpInferSymbolicShape( - pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) { - std::vector perm = - op->attributes().at("perm").dyn_cast().AsVector(); - if (perm.size() == 1) { - // perm must be [0], which means nothing to do with input, just copy the - // info from input - infer_context->SetShapeOrDataForValue( - op->result(0), - infer_context->GetShapeOrDataForValue(op->operand_source(0))); - return true; - } - const std::vector &x_dims = [&] { - std::vector dims; - const auto &x_shape_or_data = - infer_context->GetShapeOrDataForValue(op->operand_source(0)); - if (x_shape_or_data.data().has_value()) { - dims = x_shape_or_data.data().value(); - } else { - dims = x_shape_or_data.shape(); - } - return dims; - }(); - - int x_rank = x_dims.size(); - - const std::vector formatted_axis = [x_rank, &perm] { - std::vector out(perm.size(), 0); - std::transform(perm.begin(), - perm.end(), - out.begin(), - [](pir::Attribute &p) -> int32_t { - return p.dyn_cast().data(); - }); - - // format the negative axis - std::for_each(out.begin(), out.end(), [x_rank](int32_t &v) { - if (v < 0) { - v += x_rank; - } - }); - return out; - }(); - - int axis_size = static_cast(formatted_axis.size()); - - std::vector out_dims(x_dims); - for (int i = 0; i < axis_size; ++i) { - out_dims.at(i) = x_dims.at(formatted_axis.at(i)); - } - - infer_context->SetShapeOrDataForValue(op->result(0), - ShapeOrData{TensorExprs(out_dims)}); - - return true; -} - -bool Transpose_OpInferSymbolicShape( - pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) { - return TransposeOpInferSymbolicShape(op, infer_context); -} - -bool SqueezeOpInferSymbolicShape( - pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) { - PADDLE_ENFORCE_EQ( - op->num_operands(), - 2, - common::errors::InvalidArgument( - "SqueezeOpInferSymbolicShape ONLY support num_operands() == 2 " - "now, but got %d operands", - op->num_operands())); - - auto x_shape_or_data = - infer_context->GetShapeOrDataForValue(op->operand_source(0)); - auto axes_shape_or_data = - infer_context->GetShapeOrDataForValue(op->operand_source(1)); - - std::vector in_dims_sym; - if (x_shape_or_data.data().has_value()) { - in_dims_sym = x_shape_or_data.data().value(); - } else { - in_dims_sym = x_shape_or_data.shape(); - } - - std::vector squeeze_dims_sym; - if (axes_shape_or_data.data().has_value()) { - squeeze_dims_sym = axes_shape_or_data.data().value(); - } else { - squeeze_dims_sym = axes_shape_or_data.shape(); - } - - std::vector squeeze_dims; - for (auto squeeze_dim : squeeze_dims_sym) { - PADDLE_ENFORCE_EQ( - squeeze_dim.Has(), - true, - common::errors::InvalidArgument( - "in SqueezeOpInferSymbolicShape, axes must be known int type, " - "but got: %s", - symbol::ToString(squeeze_dim))); - squeeze_dims.emplace_back( - static_cast(squeeze_dim.Get())); - } - - // GetOutputSqueezeShape - size_t num_squeeze_dims = squeeze_dims.size(); - std::vector should_squeeze(in_dims_sym.size(), false); - // Mark dimensions need to be squeezed. - if (num_squeeze_dims == 0) { - for (size_t i = 0; i < in_dims_sym.size(); ++i) { - // TODO(lanxianghit): if symbol here, maybe we need the result of dim expr - // simplification - if (in_dims_sym.at(i) == 1) { - should_squeeze.at(i) = true; - } - } - } else { - for (size_t i = 0; i < num_squeeze_dims; ++i) { - if (in_dims_sym.size() == 0) { - continue; - } - int current = squeeze_dims.at(i) < 0 - ? squeeze_dims.at(i) + in_dims_sym.size() - : squeeze_dims.at(i); - - if (!should_squeeze.at(current)) { - // At compile time, dim of SYMBOL is allowed to squeeze? - if (in_dims_sym.at(current) == 1) { - should_squeeze.at(current) = true; - } else if (!in_dims_sym.at(current).Has()) { - should_squeeze.at(current) = true; - } else { - should_squeeze.at(current) = true; - } - } - } - } - - // Make output dimensions - std::vector output_shape_sym; - for (size_t i = 0; i < in_dims_sym.size(); ++i) { - if (!should_squeeze.at(i)) { - output_shape_sym.emplace_back(in_dims_sym.at(i)); - } - } - - symbol::ShapeOrDataDimExprs shape_data{ - symbol::TensorShapeOrDataDimExprs(output_shape_sym)}; - - pir::Value res = op->result(0); - infer_context->SetShapeOrDataForValue(res, shape_data); - infer_context->SetShapeOrDataForValue( - op->result(1), CreateShapeOrDataForXShape(x_shape_or_data)); - - return true; -} -bool Squeeze_OpInferSymbolicShape( - pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) { - return SqueezeOpInferSymbolicShape(op, infer_context); -} - -bool UnbindOpInferSymbolicShape(pir::Operation *op, - pir::InferSymbolicShapeContext *infer_context) { - // input - const auto &x_shape_or_data = - infer_context->GetShapeOrDataForValue(op->operand_source(0)); - PADDLE_ENFORCE_EQ( - x_shape_or_data.data().has_value(), - false, - common::errors::InvalidArgument( - "InferSymbolicShape of UnbindOp only support input with " - "value now.")); - const auto &x_dims_sym = x_shape_or_data.shape(); - - // axis - int axis = op->attributes().at("axis").dyn_cast().data(); - int rank = x_dims_sym.size(); - axis = axis >= 0 ? axis : axis + rank; - - // output - const symbol::TensorListShapeOrDataDimExprs &output_shape_data_list = [&] { - symbol::TensorListShapeOrDataDimExprs shape_data_list; - std::vector output_dims_sym = x_dims_sym; - - const symbol::DimExpr &unbound_dim = x_dims_sym.at(axis); - PADDLE_ENFORCE_EQ(unbound_dim.isa(), - true, - common::errors::InvalidArgument( - "InferSymbolicShape of UnbindOp only support unbound " - "dim with constant length!")); - output_dims_sym.erase(output_dims_sym.begin() + axis); - const int64_t unbound_dim_length = unbound_dim.dyn_cast(); - - for (uint32_t idx = 0; idx < unbound_dim_length; idx++) { - shape_data_list.push_back( - symbol::TensorShapeOrDataDimExprs(output_dims_sym)); - } - return shape_data_list; - }(); - - infer_context->SetShapeOrDataForValue( - op->result(0), symbol::ShapeOrDataDimExprs{output_shape_data_list}); - - return true; -} - -bool UniqueOpInferSymbolicShape(pir::Operation *op, - pir::InferSymbolicShapeContext *infer_context) { - const auto &x_shape_or_data = - infer_context->GetShapeOrDataForValue(op->operand_source(0)); - PADDLE_ENFORCE_EQ( - x_shape_or_data.data().has_value(), - false, - common::errors::InvalidArgument( - "InferSymbolicShape of UniqueOp only support input with " - "value now.")); - const auto &x_dims_sym = x_shape_or_data.shape(); - const size_t rank = x_dims_sym.size(); - std::vector axes = - paddle::dialect::details::GetVectorAttr(op, "axis"); - - symbol::DimExpr unique_dim_sym = - infer_context->GetNextSymName(); // unknown until runtime - - const std::vector &counts_dims = [&] { - std::vector out_dims; - out_dims.push_back(unique_dim_sym); - return out_dims; - }(); - - const std::vector &index_dims = counts_dims; - - const std::vector &out_dims = [&] { - if (axes.empty()) { - return counts_dims; - } - std::vector out_dims = x_dims_sym; - int axis = axes.at(0); - axis = axis >= 0 ? axis : axis + rank; - out_dims.at(axis) = unique_dim_sym; - return out_dims; - }(); - - const std::vector &inverse_dims = [&] { - std::vector inverse_dims; - if (axes.empty()) { - // flatten before unique - symbol::DimExpr product{1}; - for (const auto &x_dim : x_dims_sym) { - product = product * x_dim; - } - inverse_dims.push_back(product); - } else { - int axis = axes.at(0); - axis = axis >= 0 ? axis : axis + rank; - inverse_dims.push_back(x_dims_sym.at(axis)); - } - return inverse_dims; - }(); - - bool return_index = GetBoolAttr(op, "return_index"); - bool return_inverse = GetBoolAttr(op, "return_inverse"); - bool return_counts = GetBoolAttr(op, "return_counts"); - - symbol::ShapeOrDataDimExprs empty{symbol::TensorShapeOrDataDimExprs{}}; - infer_context->SetShapeOrDataForValue( - op->result(0), symbol::TensorShapeOrDataDimExprs{out_dims}); - infer_context->SetShapeOrDataForValue( - op->result(1), - return_index ? symbol::TensorShapeOrDataDimExprs{index_dims} : empty); - infer_context->SetShapeOrDataForValue( - op->result(2), - return_inverse ? symbol::TensorShapeOrDataDimExprs{inverse_dims} : empty); - infer_context->SetShapeOrDataForValue( - op->result(3), - return_counts ? symbol::TensorShapeOrDataDimExprs{counts_dims} : empty); - - return true; -} - -bool UniqueConsecutiveOpInferSymbolicShape( - pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) { - const auto &x_shape_or_data = - infer_context->GetShapeOrDataForValue(op->operand_source(0)); - PADDLE_ENFORCE_EQ( - x_shape_or_data.data().has_value(), - false, - common::errors::InvalidArgument( - "InferSymbolicShape of UniqueConsecutiveOp only support input with " - "value now.")); - const auto &x_dims_sym = x_shape_or_data.shape(); - const size_t rank = x_dims_sym.size(); - std::vector axes = - paddle::dialect::details::GetVectorAttr(op, "axis"); - - symbol::DimExpr unique_dim_sym = - infer_context->GetNextSymName(); // unknown until runtime - - const std::vector &counts_dims = [&] { - std::vector out_dims; - out_dims.push_back(unique_dim_sym); - return out_dims; - }(); - - const std::vector &out_dims = [&] { - if (axes.empty()) { - return counts_dims; - } - std::vector out_dims = x_dims_sym; - int axis = axes.at(0); - axis = axis >= 0 ? axis : axis + rank; - out_dims.at(axis) = unique_dim_sym; - return out_dims; - }(); - - const std::vector &inverse_dims = [&] { - std::vector inverse_dims; - if (axes.empty()) { - // flatten before unique - symbol::DimExpr product{1}; - for (const auto &x_dim : x_dims_sym) { - product = product * x_dim; - } - inverse_dims.push_back(product); - } else { - int axis = axes.at(0); - axis = axis >= 0 ? axis : axis + rank; - inverse_dims.push_back(x_dims_sym.at(axis)); - } - return inverse_dims; - }(); - - bool return_inverse = GetBoolAttr(op, "return_inverse"); - bool return_counts = GetBoolAttr(op, "return_counts"); - - symbol::ShapeOrDataDimExprs empty{symbol::TensorShapeOrDataDimExprs{}}; - infer_context->SetShapeOrDataForValue( - op->result(0), symbol::TensorShapeOrDataDimExprs{out_dims}); - infer_context->SetShapeOrDataForValue( - op->result(1), - return_inverse ? symbol::TensorShapeOrDataDimExprs{inverse_dims} : empty); - infer_context->SetShapeOrDataForValue( - op->result(2), - return_counts ? symbol::TensorShapeOrDataDimExprs{counts_dims} : empty); - - return true; -} - -bool UnsqueezeOpInferSymbolicShape( - pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) { - PADDLE_ENFORCE_EQ( - op->num_operands(), - 2, - common::errors::InvalidArgument( - "UnsqueezeOp InferSymbolicShape ONLY support num_operands() == 2 " - "now, but got %d operands", - op->num_operands())); - - auto x_shape_or_data = - infer_context->GetShapeOrDataForValue(op->operand_source(0)); - auto axes_shape_or_data = - infer_context->GetShapeOrDataForValue(op->operand_source(1)); - - std::vector x_sym_shape; - if (x_shape_or_data.data().has_value()) { - x_sym_shape = x_shape_or_data.data().value(); - } else { - x_sym_shape = x_shape_or_data.shape(); - } - int x_dims_size = x_sym_shape.size(); - - std::vector axes_sym; - if (axes_shape_or_data.data().has_value()) { - axes_sym = axes_shape_or_data.data().value(); - } else { - axes_sym = axes_shape_or_data.shape(); - } - int axes_sym_size = axes_sym.size(); - - // GetUnsqueezeShape - int output_rank = x_dims_size + axes_sym_size; - std::vector result_sym_dims(output_rank, 0); - - int cur_output_rank = x_dims_size; - for (auto axis_expr : axes_sym) { - PADDLE_ENFORCE_EQ( - axis_expr.Has(), - true, - common::errors::InvalidArgument( - "in UnsqueezeOpInferSymbolicShape, axes must be known int type, " - "but got: %s", - symbol::ToString(axis_expr))); - int axis = static_cast(axis_expr.Get()); - int cur = axis < 0 ? axis + cur_output_rank + 1 : axis; - - // Move old axis, and insert new axis - for (int i = cur_output_rank; i >= cur; --i) { - if (result_sym_dims.at(i) == 1) { - // Move axis - result_sym_dims.at(i + 1) = 1; - result_sym_dims.at(i) = 0; - } - } - result_sym_dims.at(cur) = 1; - // Add the output size. - cur_output_rank++; - } - - // Make output shape - for (int in_idx = 0, out_idx = 0; out_idx < output_rank; ++out_idx) { - if (result_sym_dims.at(out_idx) == 0) { - result_sym_dims.at(out_idx) = x_sym_shape.at(in_idx++); - } - } - - symbol::ShapeOrDataDimExprs shape_data{ - symbol::TensorShapeOrDataDimExprs(result_sym_dims)}; - - pir::Value res = op->result(0); - infer_context->SetShapeOrDataForValue(res, shape_data); - infer_context->SetShapeOrDataForValue( - op->result(1), CreateShapeOrDataForXShape(x_shape_or_data)); - - return true; -} -bool Unsqueeze_OpInferSymbolicShape( - pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) { - return UnsqueezeOpInferSymbolicShape(op, infer_context); -} - -} // namespace paddle::dialect diff --git a/paddle/fluid/pir/dialect/operator/interface/infer_symbolic_shape/.ipynb_checkpoints/unary_infer_sym-checkpoint.h b/paddle/fluid/pir/dialect/operator/interface/infer_symbolic_shape/.ipynb_checkpoints/unary_infer_sym-checkpoint.h deleted file mode 100644 index c8509042d3764..0000000000000 --- a/paddle/fluid/pir/dialect/operator/interface/infer_symbolic_shape/.ipynb_checkpoints/unary_infer_sym-checkpoint.h +++ /dev/null @@ -1,96 +0,0 @@ -// Copyright (c) 2024 PaddlePaddle Authors. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#pragma once - -#include "paddle/pir/include/dialect/shape/utils/shape_analysis.h" - -namespace paddle::dialect { -OP_DECLARE_INFER_SYMBOLIC_SHAPE(All) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Amax) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Amin) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Any) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Argmax) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Argmin) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(AsComplex) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(AsReal) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Assign) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Assign_) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(AsStrided) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(BipartiteMatch) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Cast) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Cast_) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Cholesky) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(ClipByNorm) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(ClipByNormSr) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Cummax) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Cummin) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Cumprod) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Cumprod_) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Cumsum) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Cumsum_) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(ChannelShuffle) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(DiagEmbed) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Diagonal) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(DistributeFpnProposals) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Eigh) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Eigvalsh) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(FakeChannelWiseQuantizeAbsMax) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(FftC2c) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(FftC2r) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(FftR2c) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(FillDiagonal) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(FillDiagonal_) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Flatten) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Flatten_) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Fold) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(IdentityLoss) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(IdentityLoss_) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Kthvalue) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(LpPool2d) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Logcumsumexp) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Logsumexp) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Max) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Maxout) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Min) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(MeanAll) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Nonzero) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Numel) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Pad) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Pad3d) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Pool2d) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Prod) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(RepeatInterleave) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Reshape) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Reshape_) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Shape) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(ShapeSr) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Slice) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Split) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(SplitWithNum) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Squeeze) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Squeeze_) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Sum) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Tile) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Topk) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(TopkV1) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Transpose) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Transpose_) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Unbind) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Unique) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(UniqueConsecutive) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Unsqueeze) -OP_DECLARE_INFER_SYMBOLIC_SHAPE(Unsqueeze_) - -} // namespace paddle::dialect From e3a61ecf887161ec662d949eefc75741f2d83d96 Mon Sep 17 00:00:00 2001 From: Fripping <124574028+Fripping@users.noreply.github.com> Date: Wed, 14 Aug 2024 13:54:05 +0800 Subject: [PATCH 8/8] Update unary_infer_sym.cc --- .../operator/interface/infer_symbolic_shape/unary_infer_sym.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/paddle/fluid/pir/dialect/operator/interface/infer_symbolic_shape/unary_infer_sym.cc b/paddle/fluid/pir/dialect/operator/interface/infer_symbolic_shape/unary_infer_sym.cc index 6a2a415f0238a..e461d03f41776 100644 --- a/paddle/fluid/pir/dialect/operator/interface/infer_symbolic_shape/unary_infer_sym.cc +++ b/paddle/fluid/pir/dialect/operator/interface/infer_symbolic_shape/unary_infer_sym.cc @@ -393,10 +393,10 @@ bool AsStridedOpInferSymbolicShape( const std::vector &shape = paddle::dialect::details::GetVectorAttr(op, "dims"); - symbol::DimExpr out_unknown = infer_context->GetNextSymName(); int rank = shape.size(); std::vector out_shape; for (int i = 0; i < rank; ++i) { + symbol::DimExpr out_unknown = infer_context->GetNextSymName(); if (shape[i] == -1) { out_shape.push_back(out_unknown); } else {