Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

【BUAA】【Infer Symbolic Shape No.90 92 2.8】Add CINN #66892

Merged
merged 23 commits into from
Aug 13, 2024
Merged
Original file line number Diff line number Diff line change
Expand Up @@ -807,6 +807,46 @@ bool SearchsortedOpInferSymbolicShape(
return true;
}

bool SegmentPoolOpInferSymbolicShape(
pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) {
const auto &input_shape_or_data =
infer_context->GetShapeOrDataForValue(op->operand_source(0));
const std::vector<symbol::DimExpr> &input_shape = input_shape_or_data.shape();
const auto &ids_shape_or_data =
infer_context->GetShapeOrDataForValue(op->operand_source(1));
const std::vector<symbol::DimExpr> &ids_shape = ids_shape_or_data.shape();
const std::string pool_type =
op->attribute<pir::StrAttribute>("pooltype").AsString();

std::vector<symbol::DimExpr> out_shape;
if (ids_shape_or_data.data().has_value()) {
const auto &ids_data = ids_shape_or_data.data();
out_shape.push_back(ids_data.value()[ids_shape.size() - 1] +
symbol::DimExpr{1});
} else {
symbol::DimExpr out_unknown =
infer_context->GetNextSymName(); // unknown until runtime
out_shape.push_back(out_unknown);
}
int axis = input_shape.size();
for (int i = 1; i < axis; ++i) {
out_shape.push_back(input_shape[i]);
}
symbol::ShapeOrDataDimExprs shape_data{
symbol::TensorShapeOrDataDimExprs(out_shape)};
infer_context->SetShapeOrDataForValue(op->result(0), shape_data);
if (pool_type == "MEAN") {
std::vector<symbol::DimExpr> summed_shape;
summed_shape.push_back(out_shape[0]); // same as before
summed_shape.push_back(symbol::DimExpr{1});
infer_context->SetShapeOrDataForValue(
op->result(1),
symbol::ShapeOrDataDimExprs{
symbol::TensorShapeOrDataDimExprs(summed_shape)});
}
return true;
}

// bool SequenceMaskOpInferSymbolicShape(pir::Operation *op,
// pir::InferSymbolicShapeContext
// *infer_context) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -61,6 +61,7 @@ OP_DECLARE_INFER_SYMBOLIC_SHAPE(Mv)
// OP_DECLARE_INFER_SYMBOLIC_SHAPE(PullSparseV2)
OP_DECLARE_INFER_SYMBOLIC_SHAPE(ReduceAs)
OP_DECLARE_INFER_SYMBOLIC_SHAPE(Searchsorted)
OP_DECLARE_INFER_SYMBOLIC_SHAPE(SegmentPool)
// OP_DECLARE_INFER_SYMBOLIC_SHAPE(SequenceMask)
// OP_DECLARE_INFER_SYMBOLIC_SHAPE(Swiglu)
OP_DECLARE_INFER_SYMBOLIC_SHAPE(TakeAlongAxis)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -331,6 +331,18 @@ bool GaussianOpInferSymbolicShape(
}
}

bool RandpermOpInferSymbolicShape(
pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) {
int64_t n = op->attribute<pir::Int64Attribute>("n").data();
std::vector<symbol::DimExpr> out_shape = {n};
infer_context->SetShapeOrDataForValue(
op->result(0),
symbol::ShapeOrDataDimExprs{
symbol::TensorShapeOrDataDimExprs(out_shape)});

return true;
}

bool RandintOpInferSymbolicShape(
pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) {
const auto &shape_gen_op = op->operand_source(0).defining_op();
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,7 @@ OP_DECLARE_INFER_SYMBOLIC_SHAPE(Full)
OP_DECLARE_INFER_SYMBOLIC_SHAPE(FullIntArray)
OP_DECLARE_INFER_SYMBOLIC_SHAPE(Gaussian)
OP_DECLARE_INFER_SYMBOLIC_SHAPE(Randint)
OP_DECLARE_INFER_SYMBOLIC_SHAPE(Randperm)
// OP_DECLARE_INFER_SYMBOLIC_SHAPE(ReadFile)
OP_DECLARE_INFER_SYMBOLIC_SHAPE(TrilIndices)
OP_DECLARE_INFER_SYMBOLIC_SHAPE(TriuIndices)
Expand Down
1 change: 1 addition & 0 deletions paddle/phi/ops/yaml/inconsistent/static_ops.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -788,6 +788,7 @@
kernel :
func : set_value
backward: set_value_grad
interfaces : paddle::dialect::InferSymbolicShapeInterface

- op : shadow_feed
args : (Tensor x)
Expand Down
2 changes: 2 additions & 0 deletions paddle/phi/ops/yaml/ops.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -3697,6 +3697,7 @@
data_type : dtype
backend : place
traits : pir::SideEffectTrait
interfaces : paddle::dialect::InferSymbolicShapeInterface

- op : rank_attention
args : (Tensor x, Tensor rank_offset, Tensor rank_param, int max_rank = 3, int max_size = 0)
Expand Down Expand Up @@ -4038,6 +4039,7 @@
data_type : x
intermediate : summed_ids
backward : segment_pool_grad
interfaces : paddle::dialect::InferSymbolicShapeInterface

- op : selu
args : (Tensor x, float scale=1.0507009873554804934193349852946, float alpha=1.6732632423543772848170429916717)
Expand Down
26 changes: 19 additions & 7 deletions test/legacy_test/test_segment_ops.py
Original file line number Diff line number Diff line change
Expand Up @@ -124,7 +124,7 @@ def setUp(self):
self.convert_bf16()

def test_check_output(self):
self.check_output(check_pir=True)
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

不要改pir的flag

self.check_output(check_pir=True, check_symbol_infer=False)

def test_check_grad(self):
self.check_grad(["X"], "Out", check_pir=True)
Expand Down Expand Up @@ -223,11 +223,15 @@ def setUp(self):

def test_check_output(self):
if core.is_compiled_with_cuda():
self.check_output_with_place(core.CUDAPlace(0), check_pir=True)
self.check_output_with_place(
core.CUDAPlace(0), check_pir=True, check_symbol_infer=False
)
# due to CPU kernel not implement calculate 'SummedIds'
# so cannot check 'SummedIds'
del self.outputs['SummedIds']
self.check_output_with_place(core.CPUPlace(), check_pir=True)
self.check_output_with_place(
core.CPUPlace(), check_pir=True, check_symbol_infer=False
)


class TestSegmentMean2(TestSegmentMean):
Expand Down Expand Up @@ -274,7 +278,9 @@ def prepare(self):
self.np_dtype = np.float32

def test_check_output(self):
self.check_output_with_place(self.place, check_pir=True)
self.check_output_with_place(
self.place, check_pir=True, check_symbol_infer=False
)

def test_check_grad(self):
self.check_grad_with_place(self.place, ["X"], "Out", check_pir=True)
Expand All @@ -292,7 +298,9 @@ def prepare(self):
self.np_dtype = np.float32

def test_check_output(self):
self.check_output_with_place(self.place, check_pir=True)
self.check_output_with_place(
self.place, check_pir=True, check_symbol_infer=False
)

def test_check_grad(self):
self.check_grad_with_place(
Expand All @@ -316,7 +324,9 @@ def prepare(self):
self.np_dtype = np.float32

def test_check_output(self):
self.check_output_with_place(self.place, check_pir=True)
self.check_output_with_place(
self.place, check_pir=True, check_symbol_infer=False
)

def test_check_grad(self):
self.check_grad_with_place(
Expand All @@ -340,7 +350,9 @@ def prepare(self):
self.np_dtype = np.float32

def test_check_output(self):
self.check_output_with_place(self.place, check_pir=True)
self.check_output_with_place(
self.place, check_pir=True, check_symbol_infer=False
)

def test_check_grad(self):
self.check_grad_with_place(self.place, ["X"], "Out", check_pir=True)
Expand Down