Skip to content

Commit

Permalink
fix bf16 grad check
Browse files Browse the repository at this point in the history
  • Loading branch information
Difers committed Apr 22, 2023
1 parent e0d053d commit 78e423c
Showing 1 changed file with 41 additions and 5 deletions.
46 changes: 41 additions & 5 deletions python/paddle/fluid/tests/unittests/test_pool_max_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,10 +15,16 @@
import unittest

import numpy as np
from eager_op_test import OpTest, convert_float_to_uint16, convert_uint16_to_float
from eager_op_test import (
OpTest,
convert_float_to_uint16,
convert_uint16_to_float,
get_numeric_gradient,
)

import paddle
from paddle.fluid import core
from paddle.fluid.tests.unittests.testsuite import create_op


def adaptive_start_index(index, input_size, output_size):
Expand Down Expand Up @@ -154,8 +160,10 @@ def setUp(self):

if self.is_bfloat16_op():
input = np.random.random(self.shape).astype(np.float32)
input = convert_uint16_to_float(convert_float_to_uint16(np.round(input * 100.0, 2)))

input = convert_uint16_to_float(
convert_float_to_uint16(np.round(input * 100.0, 2))
)

else:
input = np.random.random(self.shape).astype(self.dtype)
input = np.round(input * 100.0, 2)
Expand Down Expand Up @@ -188,6 +196,8 @@ def setUp(self):
'Out': convert_float_to_uint16(output),
"Mask": mask,
}
self.inputs_fp32 = {'X': input}

else:
self.inputs = {'X': input}
self.outputs = {'Out': output, "Mask": mask}
Expand Down Expand Up @@ -289,15 +299,28 @@ class TestMaxPool3dBF16(parent):
def init_dtype(self):
self.dtype = np.uint16

def get_numeric_grad(self, place, check_name):
scope = core.Scope()
self._check_grad_helper()
op = create_op(
scope, self.op_type, self.inputs, self.outputs, self.attrs
)
return get_numeric_gradient(
place, scope, op, self.inputs_fp32, check_name, ['Out']
)

def test_check_output(self):
place = core.CUDAPlace(0)
if core.is_bfloat16_supported(place):
self.check_output_with_place(place)

def test_check_grad(self):
place = core.CUDAPlace(0)
numeric_grads = self.get_numeric_grad(place, 'X')
if core.is_bfloat16_supported(place):
self.check_grad_with_place(place, {'X'}, ['Out'])
self.check_grad_with_place(
place, {'X'}, ['Out'], user_defined_grads=[numeric_grads]
)

cls_name = "{}_{}".format(parent.__name__, "BF16OP")
TestMaxPool3dBF16.__name__ = cls_name
Expand Down Expand Up @@ -411,15 +434,28 @@ class TestMaxPool2dBF16(parent):
def init_dtype(self):
self.dtype = np.uint16

def get_numeric_grad(self, place, check_name):
scope = core.Scope()
self._check_grad_helper()
op = create_op(
scope, self.op_type, self.inputs, self.outputs, self.attrs
)
return get_numeric_gradient(
place, scope, op, self.inputs_fp32, check_name, ['Out']
)

def test_check_output(self):
place = core.CUDAPlace(0)
if core.is_bfloat16_supported(place):
self.check_output_with_place(place)

def test_check_grad(self):
place = core.CUDAPlace(0)
numeric_grads = self.get_numeric_grad(place, 'X')
if core.is_bfloat16_supported(place):
self.check_grad_with_place(place, {'X'}, ['Out'])
self.check_grad_with_place(
place, {'X'}, ['Out'], user_defined_grads=[numeric_grads]
)

cls_name = "{}_{}".format(parent.__name__, "BF16OP")
TestMaxPool2dBF16.__name__ = cls_name
Expand Down

0 comments on commit 78e423c

Please sign in to comment.