Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

【Hackathon 6th Fundable Projects 3 No.18】Remove fluid operator bernoulli_p #63236

Merged
merged 3 commits into from
Apr 11, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
82 changes: 0 additions & 82 deletions paddle/fluid/operators/prim_ops/bernoulli_p_op.cc

This file was deleted.

9 changes: 0 additions & 9 deletions python/paddle/incubate/autograd/primops.py
Original file line number Diff line number Diff line change
Expand Up @@ -72,15 +72,6 @@ def fill_const(value, shape, dtype, out=None):
return out


def bernoulli(shape, dtype, p, out=None):
attrs = {'shape': shape, 'dtype': dtype, 'p': p}
helper = LayerHelper('bernoulli_p', **locals())
if out is None:
out = helper.create_variable_for_type_inference(dtype)
helper.append_op(type=helper.layer_type, outputs={'Y': out}, attrs=attrs)
return out


def neg(x, out=None):
zero = fill_const(0.0, x.shape, x.dtype)
return sub(zero, x)
Expand Down
29 changes: 0 additions & 29 deletions python/paddle/incubate/autograd/primrules.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,6 @@
from . import primops
from .primops import (
add,
bernoulli,
broadcast,
concat,
cos,
Expand Down Expand Up @@ -460,34 +459,6 @@ def gelu_orig2prim(op, x):
)


@REGISTER_ORIG2PRIM('dropout')
def dropout_orig2prim(op, seed_t, x):
assert (
seed_t is None
), 'Can not lower dropout into prim ops with seedtensor.'
mask = bernoulli(shape=x.shape, dtype=x.dtype, p=op.attr('dropout_prob'))
if op.attr('dropout_implementation') == 'upscale_in_train':
if not op.attr('is_test'):
out = div(
mul(x, mask),
fill_const(1.0 - op.attr('dropout_prob'), x.shape, x.dtype),
)
return primops.cast(mask, dtype=paddle.uint8), out
else:
return primops.cast(mask, dtype=paddle.uint8), x
elif op.attr('dropout_implementation') == 'downgrade_in_infer':
if not op.attr('is_test'):
return primops.cast(mask, dtype=paddle.uint8), mul(x, mask)
else:
return primops.cast(mask, dtype=paddle.uint8), mul(
x, fill_const(1.0 - op.attr('dropout_prob'), x.shape, x.dtype)
)
else:
raise RuntimeError(
'Unsupported dropout_implementation, only support upscale_in_train and downgrade_in_infer'
)


@REGISTER_ORIG2PRIM('uniform_random')
def uniform_random_orig2prim(op, shape_t, shape_tl):
if shape_t or shape_tl:
Expand Down
117 changes: 0 additions & 117 deletions test/autograd/test_orig2prim.py
Original file line number Diff line number Diff line change
Expand Up @@ -796,123 +796,6 @@ def init_data(self):
self.out_map = {0: self.output['Out']}


class TestDropoutOrig2PrimCase1(TestElementWiseAddOrig2Prim):
def init_data(self):
self.op_type = 'dropout'
X = paddle.static.data(name='X', shape=[5, 8], dtype='float')

self.input = {'X': X}
self.output = {
'Mask': self.layer_help.create_variable_for_type_inference(
dtype=paddle.uint8
),
'Out': self.layer_help.create_variable_for_type_inference(
dtype=X.dtype
),
}
self.attrs = {
'dropout_prob': 0.5,
'is_test': False,
'dropout_implementation': 'upscale_in_train',
}

self.orig2prim_args = (None, X)
self.all_ops = [
'bernoulli_p',
'mul_p',
'fill_constant_p',
'div_p',
'cast_p',
'dropout',
]
# { prim_op_output_index: orig_op_output_var }
self.out_map = {0: self.output['Mask'], 1: self.output['Out']}


class TestDropoutOrig2PrimCase2(TestElementWiseAddOrig2Prim):
def init_data(self):
self.op_type = 'dropout'
X = paddle.static.data(name='X', shape=[5, 8], dtype='float')

self.input = {'X': X}
self.output = {
'Mask': self.layer_help.create_variable_for_type_inference(
dtype=paddle.uint8
),
'Out': self.layer_help.create_variable_for_type_inference(
dtype=X.dtype
),
}
self.attrs = {
'dropout_prob': 0.5,
'is_test': False,
'dropout_implementation': 'downgrade_in_infer',
}

self.orig2prim_args = (None, X)
self.all_ops = ['bernoulli_p', 'mul_p', 'cast_p', 'dropout']
# { prim_op_output_index: orig_op_output_var }
self.out_map = {0: self.output['Mask'], 1: self.output['Out']}


class TestDropoutOrig2PrimCase3(TestElementWiseAddOrig2Prim):
def init_data(self):
self.op_type = 'dropout'
X = paddle.static.data(name='X', shape=[5, 8], dtype='float')

self.input = {'X': X}
self.output = {
'Mask': self.layer_help.create_variable_for_type_inference(
dtype=paddle.uint8
),
'Out': self.layer_help.create_variable_for_type_inference(
dtype=X.dtype
),
}
self.attrs = {
'dropout_prob': 0.5,
'is_test': True,
'dropout_implementation': 'upscale_in_train',
}

self.orig2prim_args = (None, X)
self.all_ops = ['bernoulli_p', 'cast_p', 'dropout']
# { prim_op_output_index: orig_op_output_var }
self.out_map = {0: self.output['Mask'], 1: self.output['Out']}


class TestDropoutOrig2PrimCase4(TestElementWiseAddOrig2Prim):
def init_data(self):
self.op_type = 'dropout'
X = paddle.static.data(name='X', shape=[5, 8], dtype='float')

self.input = {'X': X}
self.output = {
'Mask': self.layer_help.create_variable_for_type_inference(
dtype=paddle.uint8
),
'Out': self.layer_help.create_variable_for_type_inference(
dtype=X.dtype
),
}
self.attrs = {
'dropout_prob': 0.5,
'is_test': True,
'dropout_implementation': 'downgrade_in_infer',
}

self.orig2prim_args = (None, X)
self.all_ops = [
'bernoulli_p',
'fill_constant_p',
'mul_p',
'cast_p',
'dropout',
]
# { prim_op_output_index: orig_op_output_var }
self.out_map = {0: self.output['Mask'], 1: self.output['Out']}


class TestReduceSumOrig2Prim(TestElementWiseAddOrig2Prim):
def init_data(self):
self.op_type = 'reduce_sum'
Expand Down
17 changes: 0 additions & 17 deletions test/autograd/test_prim2orig.py
Original file line number Diff line number Diff line change
Expand Up @@ -677,23 +677,6 @@ def init_data(self):
self.out_map = {self.output['Z']: 0}


class TestBernoulliPPrim2Orig(TestAddPPrim2Orig):
def init_data(self):
self.op_type = 'bernoulli_p'

self.input = {}
self.output = {
'Y': self.layer_help.create_variable_for_type_inference(
dtype=paddle.float64
)
}
self.attrs = {'shape': [7, 8], 'dtype': paddle.float64, 'p': 0.5}

self.prim2orig_args = ()
self.all_ops = ['bernoulli_p', 'fill_constant', 'bernoulli']
self.out_map = {self.output['Y']: 0}


class TestCastPPrim2Orig(TestAddPPrim2Orig):
def init_data(self):
self.op_type = 'cast_p'
Expand Down