Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

【Infer Symbolic Shape BUAA】add clipbynorm, ctcalign, identityloss #66952

Merged
merged 3 commits into from
Aug 5, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -145,6 +145,18 @@ bool BceLoss_OpInferSymbolicShape(
return BceLossOpInferSymbolicShape(op, infer_context);
}

bool CtcAlignOpInferSymbolicShape(
pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) {
const auto &input_shape =
infer_context->GetShapeOrDataForValue(op->operand_source(0));
std::vector<symbol::DimExpr> out_shape = {input_shape.shape()[0], 1};
symbol::ShapeOrDataDimExprs shape_data{
symbol::TensorShapeOrDataDimExprs(out_shape)};
infer_context->SetShapeOrDataForValue(op->result(0), input_shape);
infer_context->SetShapeOrDataForValue(op->result(1), shape_data);
return true;
}

bool Conv2dOpInferSymbolicShape(pir::Operation *op,
pir::InferSymbolicShapeContext *infer_context) {
const std::vector<int> strides =
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,7 @@ OP_DECLARE_INFER_SYMBOLIC_SHAPE(Allclose)
OP_DECLARE_INFER_SYMBOLIC_SHAPE(Atan2)
OP_DECLARE_INFER_SYMBOLIC_SHAPE(BceLoss)
OP_DECLARE_INFER_SYMBOLIC_SHAPE(BceLoss_)
OP_DECLARE_INFER_SYMBOLIC_SHAPE(CtcAlign)
OP_DECLARE_INFER_SYMBOLIC_SHAPE(Conv2d)
OP_DECLARE_INFER_SYMBOLIC_SHAPE(Conv3d)
OP_DECLARE_INFER_SYMBOLIC_SHAPE(Cross)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -387,6 +387,27 @@ bool CholeskyOpInferSymbolicShape(
return true;
}

bool ClipByNormOpInferSymbolicShape(
pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) {
const auto &input_shape =
infer_context->GetShapeOrDataForValue(op->operand_source(0));
float max_norm = op->attribute<pir::FloatAttribute>("max_norm").data();
PADDLE_ENFORCE_GT(
max_norm,
0,
phi::errors::InvalidArgument("max_norm should be greater than 0. "
"Received max_norm is %f.",
max_norm));

infer_context->SetShapeOrDataForValue(op->result(0), input_shape);
return true;
}

bool ClipByNormSrOpInferSymbolicShape(
pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) {
return ClipByNormOpInferSymbolicShape(op, infer_context);
}

bool CummaxOpInferSymbolicShape(pir::Operation *op,
pir::InferSymbolicShapeContext *infer_context) {
pir::Value operand_source = op->operand_source(0);
Expand Down Expand Up @@ -794,6 +815,29 @@ bool Flatten_OpInferSymbolicShape(
return FlattenOpInferSymbolicShape(op, infer_context);
}

bool IdentityLossOpInferSymbolicShape(
pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) {
const auto &input_shape =
infer_context->GetShapeOrDataForValue(op->operand_source(0));
int reduction = op->attribute<pir::Int32Attribute>("reduction").data();
if (reduction == 2) {
infer_context->SetShapeOrDataForValue(op->result(0), input_shape);
} else {
std::vector<symbol::DimExpr> out_shape = {};
infer_context->SetShapeOrDataForValue(
op->result(0),
symbol::ShapeOrDataDimExprs{
symbol::TensorShapeOrDataDimExprs(out_shape)});
}

return true;
}

bool IdentityLoss_OpInferSymbolicShape(
pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) {
return IdentityLossOpInferSymbolicShape(op, infer_context);
}

bool KthvalueOpInferSymbolicShape(
pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) {
pir::Value operand_source = op->operand_source(0);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,8 @@ OP_DECLARE_INFER_SYMBOLIC_SHAPE(AsComplex)
OP_DECLARE_INFER_SYMBOLIC_SHAPE(AsReal)
OP_DECLARE_INFER_SYMBOLIC_SHAPE(BipartiteMatch)
OP_DECLARE_INFER_SYMBOLIC_SHAPE(Cholesky)
OP_DECLARE_INFER_SYMBOLIC_SHAPE(ClipByNorm)
OP_DECLARE_INFER_SYMBOLIC_SHAPE(ClipByNormSr)
OP_DECLARE_INFER_SYMBOLIC_SHAPE(Cummax)
OP_DECLARE_INFER_SYMBOLIC_SHAPE(Cummin)
OP_DECLARE_INFER_SYMBOLIC_SHAPE(Cumprod)
Expand All @@ -46,6 +48,8 @@ OP_DECLARE_INFER_SYMBOLIC_SHAPE(FillDiagonal)
OP_DECLARE_INFER_SYMBOLIC_SHAPE(FillDiagonal_)
OP_DECLARE_INFER_SYMBOLIC_SHAPE(Flatten)
OP_DECLARE_INFER_SYMBOLIC_SHAPE(Flatten_)
OP_DECLARE_INFER_SYMBOLIC_SHAPE(IdentityLoss)
OP_DECLARE_INFER_SYMBOLIC_SHAPE(IdentityLoss_)
OP_DECLARE_INFER_SYMBOLIC_SHAPE(Kthvalue)
OP_DECLARE_INFER_SYMBOLIC_SHAPE(LpPool2d)
OP_DECLARE_INFER_SYMBOLIC_SHAPE(Logcumsumexp)
Expand Down
3 changes: 3 additions & 0 deletions paddle/phi/ops/yaml/ops.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -904,6 +904,7 @@
kernel :
func : clip_by_norm {dense -> dense}
clip_by_norm_sr {selected_rows -> selected_rows}
interfaces : paddle::dialect::InferSymbolicShapeInterface

- op : coalesce_tensor
args : (Tensor[] input, DataType dtype, bool copy_data = false, bool set_constant = false, bool persist_output = false, float constant = 0.0, bool use_align = true, int align_size = -1, int size_of_dtype = -1, int64_t[] concated_shapes = {}, int64_t[] concated_ranks = {})
Expand Down Expand Up @@ -1116,6 +1117,7 @@
func: ctc_align
data_type: input
optional: input_length, output_length
interfaces : paddle::dialect::InferSymbolicShapeInterface

- op : cudnn_lstm
args: (Tensor x, Tensor init_h, Tensor init_c, Tensor w, Tensor[] weight_list, Tensor sequence_length, float dropout_prob = 0.0, bool is_bidirec = false, int hidden_size = 100, int num_layers = 1, bool is_test = false, int seed = 0)
Expand Down Expand Up @@ -2416,6 +2418,7 @@
func : identity_loss
inplace: (x -> out)
backward : identity_loss_grad
interfaces : paddle::dialect::InferSymbolicShapeInterface

- op : im2sequence
args: (Tensor x, Tensor y, int[] kernels, int[] strides = {1, 1}, int[] paddings
Expand Down