Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

【PIR OpTest Fix No.35】 fix test_batch_fc_op #62668

Merged
merged 15 commits into from
Mar 21, 2024
1 change: 1 addition & 0 deletions paddle/fluid/pir/dialect/op_generator/ops_api_gen.py
Original file line number Diff line number Diff line change
Expand Up @@ -122,6 +122,7 @@
'add_n_',
'all_reduce',
'all_reduce_',
'batch_fc',
'c_allgather',
'c_allreduce_avg',
'c_allreduce_avg_',
Expand Down
9 changes: 9 additions & 0 deletions paddle/fluid/pir/dialect/operator/ir/ops.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -133,6 +133,15 @@
data_type : dtype
backend : place > output

- op : batch_fc
args : (Tensor input, Tensor w, Tensor bias)
output : Tensor(out)
infer_meta:
func : BatchFCInferMeta
kernel :
func : batch_fc
Dmovic marked this conversation as resolved.
Show resolved Hide resolved
data_type: input

- op : batch_norm
args : (Tensor x, Tensor mean, Tensor variance, Tensor scale, Tensor bias, bool is_test, float momentum, float epsilon, str data_format, bool use_global_stats, bool trainable_statistics)
output : Tensor(out), Tensor(mean_out), Tensor(variance_out), Tensor(saved_mean), Tensor(saved_variance), Tensor(reserve_space)
Expand Down
9 changes: 9 additions & 0 deletions paddle/fluid/pir/dialect/operator/ir/ops_backward.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -81,6 +81,15 @@
func : assign
inplace : (out_grad -> x_grad)

- backward_op : batch_fc_grad
forward : batch_fc (Tensor input, Tensor w, Tensor bias) -> Tensor(out)
args : (Tensor input, Tensor w, Tensor bias, Tensor out_grad)
output : Tensor(input_grad), Tensor(w_grad), Tensor(bias_grad)
infer_meta :
func : BatchFCGradInferMeta
kernel :
func : batch_fc_grad

- backward_op : batch_norm_double_grad
forward : batch_norm_grad (Tensor x, Tensor scale, Tensor bias, Tensor out_mean, Tensor out_variance, Tensor saved_mean, Tensor saved_variance, Tensor reserve_space, Tensor grad_out, float momentum, float epsilon, str data_format, bool is_test, bool use_global_stats, bool trainable_statistics) -> Tensor(grad_x), Tensor(grad_scale), Tensor(grad_bias)
args : (Tensor x, Tensor scale, Tensor out_mean, Tensor out_variance, Tensor saved_mean, Tensor saved_variance, Tensor grad_out, Tensor grad_x_grad, Tensor grad_scale_grad, Tensor grad_bias_grad, float momentum, float epsilon, str data_format, bool is_test, bool use_global_stats, bool trainable_statistics)
Expand Down
2 changes: 2 additions & 0 deletions paddle/fluid/pir/dialect/operator/utils/utils.cc
Original file line number Diff line number Diff line change
Expand Up @@ -37,6 +37,8 @@ namespace dialect {

const std::unordered_set<std::string> LegacyOpList = {
LoadCombineOp::name(),
BatchFcOp::name(),
BatchFcGradOp::name(),
CConcatOp::name(),
CBroadcast_Op::name(),
CSyncCalcStream_Op::name(),
Expand Down
7 changes: 7 additions & 0 deletions paddle/phi/api/yaml/op_compat.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -329,6 +329,13 @@
outputs :
{auc : AUC, stat_pos_out : StatPosOut, stat_neg_out : StatNegOut}

- op : batch_fc
backward : batch_fc_grad
inputs :
{input : Input, w : W, bias : Bias}
outputs :
out : Out

- op : batch_norm
backward : batch_norm_grad, batch_norm_double_grad(batch_norm_grad_grad)
inputs:
Expand Down
56 changes: 56 additions & 0 deletions paddle/phi/infermeta/ternary.cc
Original file line number Diff line number Diff line change
Expand Up @@ -146,6 +146,62 @@ void AddmmInferMeta(const MetaTensor& input,
out->set_dtype(input.dtype());
}

void BatchFCInferMeta(const MetaTensor& input,
const MetaTensor& w,
const MetaTensor& bias,
MetaTensor* out) {
auto input_dims = input.dims();
auto w_dims = w.dims();

PADDLE_ENFORCE_EQ(
input_dims.size(),
3,
phi::errors::InvalidArgument("Input of BatchFCOp should have 3D."));
PADDLE_ENFORCE_EQ(
w_dims.size(),
3,
phi::errors::InvalidArgument("W of BatchFCOp should have 3D."));
PADDLE_ENFORCE_EQ(
input_dims[0],
w_dims[0],
phi::errors::InvalidArgument(
"Input.dim[0] and W.dim[0] of BatchFCOp should be same."));
PADDLE_ENFORCE_EQ(
input_dims[2],
w_dims[1],
phi::errors::InvalidArgument(
"Input.dim[2] and W.dim[1] of BatchFCOp should be same."));

auto bias_dims = bias.dims();
PADDLE_ENFORCE_EQ(bias_dims[0],
input_dims[0],
phi::errors::InvalidArgument(
"Bias.dim[0] should be same as input.dim[0]."));
PADDLE_ENFORCE_EQ(bias_dims[1],
w_dims[2],
phi::errors::InvalidArgument(
"Bias.dim[1] should be same as input.dim[2]."));

out->set_dims({input_dims[0], input_dims[1], w_dims[2]});
out->share_lod(input);
out->set_dtype(input.dtype());
}

void BatchFCGradInferMeta(const MetaTensor& input,
const MetaTensor& w,
const MetaTensor& bias,
const MetaTensor& out_grad,
MetaTensor* input_grad,
MetaTensor* w_grad,
MetaTensor* bias_grad) {
input_grad->set_dims(input.dims());
input_grad->set_dtype(input.dtype());
w_grad->set_dims(w.dims());
w_grad->set_dtype(w.dtype());
bias_grad->set_dims(bias.dims());
bias_grad->set_dtype(bias.dtype());
}

void BoxCoderInferMeta(const MetaTensor& prior_box,
const MetaTensor& prior_box_var,
const MetaTensor& target_box,
Expand Down
13 changes: 13 additions & 0 deletions paddle/phi/infermeta/ternary.h
Original file line number Diff line number Diff line change
Expand Up @@ -53,6 +53,19 @@ void ArangeTensorInferMeta(const MetaTensor& start,
const MetaTensor& step,
MetaTensor* out);

void BatchFCInferMeta(const MetaTensor& input,
const MetaTensor& w,
const MetaTensor& bias,
MetaTensor* out);

void BatchFCGradInferMeta(const MetaTensor& input,
Dmovic marked this conversation as resolved.
Show resolved Hide resolved
Dmovic marked this conversation as resolved.
Show resolved Hide resolved
Dmovic marked this conversation as resolved.
Show resolved Hide resolved
const MetaTensor& w,
const MetaTensor& bias,
const MetaTensor& out_grad,
MetaTensor* intput_grad,
MetaTensor* w_grad,
MetaTensor* bias_grad);

void BoxCoderInferMeta(const MetaTensor& prior_box,
const MetaTensor& prior_box_var,
const MetaTensor& target_box,
Expand Down
1 change: 1 addition & 0 deletions test/white_list/pir_op_test_white_list
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@ test_assign_value_op
test_atan2_op
test_auc_op
test_auc_single_pred_op
test_batch_fc_op
test_bce_loss
test_bernoulli_op
test_bicubic_interp_v2_op
Expand Down