Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

fix function-redefined #34510

Merged
merged 1 commit into from
Jul 30, 2021
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
10 changes: 0 additions & 10 deletions python/paddle/fluid/dataloader/dataloader_iter.py
Original file line number Diff line number Diff line change
Expand Up @@ -391,16 +391,6 @@ def _reset(self):
for _ in range(self._outstanding_capacity):
self._try_put_indices()

def _clear_and_remove_data_queue(self):
if self._data_queue is not None:
while True:
try:
self._data_queue.get_nowait()
except:
self._data_queue.cancel_join_thread()
self._data_queue.close()
break

def _shutdown_worker(self, worker_id, shutdown=False):
if self._worker_status[worker_id] or (self._persistent_workers and
shutdown):
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -219,7 +219,7 @@ def forward(self, x, **kwargs):
return x


class TestDictPop(TestNetWithDict):
class TestDictPop3(TestNetWithDict):
def setUp(self):
self.x = np.array([2, 2]).astype('float32')

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -117,9 +117,6 @@ def setUp(self):
def init_dtype(self):
self.dtype = np.float32

def init_dtype(self):
self.dtype = np.float32


class TestMKLDNNHardSwishDim2(TestHardSwish):
def setUp(self):
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -122,9 +122,6 @@ def init_shape(self):
def init_kernel_type(self):
self.use_mkldnn = True

def init_global_pool(self):
self.global_pool = False

def init_data_type(self):
self.dtype = np.float32

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -79,7 +79,7 @@ def init(self):
self.dtype = "float64"


class TestSequenceUnpadOp4(TestSequenceUnpadOp):
class TestSequenceUnpadOp5(TestSequenceUnpadOp):
def init(self):
self.length = [0, 4, 3, 0]
self.x_shape = (4, 5, 3, 3, 6)
Expand Down
2 changes: 1 addition & 1 deletion python/paddle/fluid/tests/unittests/test_argsort_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -436,7 +436,7 @@ def init(self):
self.axis = 1


class TestArgsortImperative2(TestArgsortImperative):
class TestArgsortImperative4(TestArgsortImperative):
def init(self):
self.input_shape = [2, 3, 4]
self.axis = 1
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -484,7 +484,7 @@ def test_scale_shape():
align_corners=False,
scale_factor=[1, 2, 2])

def test_scale_value():
def test_scale_value_1():
x = fluid.data(name="x", shape=[2, 3, 6, 6], dtype="float32")
out = interpolate(
x,
Expand Down Expand Up @@ -535,7 +535,7 @@ def test_input_shape():
self.assertRaises(ValueError, test_outshape_and_scale)
self.assertRaises(ValueError, test_align_corners_and_nearest)
self.assertRaises(ValueError, test_scale_shape)
self.assertRaises(ValueError, test_scale_value)
self.assertRaises(ValueError, test_scale_value_1)
self.assertRaises(ValueError, test_size_and_scale)
self.assertRaises(ValueError, test_size_and_scale2)
self.assertRaises(TypeError, test_size_type)
Expand Down
2 changes: 1 addition & 1 deletion python/paddle/fluid/tests/unittests/test_flatten2_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,7 @@ def init_attrs(self):
self.attrs = {"axis": self.axis}


class TestFlattenOp(TestFlattenOp):
class TestFlattenOp1(TestFlattenOp):
def init_test_case(self):
self.in_shape = (3, 2, 5, 4)
self.axis = 0
Expand Down
2 changes: 1 addition & 1 deletion python/paddle/fluid/tests/unittests/test_flatten_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,7 @@ def init_attrs(self):
self.attrs = {"axis": self.axis}


class TestFlattenOp(TestFlattenOp):
class TestFlattenOp1(TestFlattenOp):
def init_test_case(self):
self.in_shape = (3, 2, 2, 10)
self.axis = 0
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -209,7 +209,7 @@ def node_func():
proc_b.start()
wait([proc_a, proc_b])

def test_graph_execution_optimizer(self):
def test_graph_execution_optimizer_v2(self):
port_a = self._dist_ut_port_0 + 6
port_b = self._dist_ut_port_1 + 6
node_a = {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -292,7 +292,7 @@ def test_sharding_clone_for_test(self):
])


class TestFleetMetaOptimizer(TestFleetMetaOptimizer):
class TestFleetMetaOptimizer_V1(TestFleetMetaOptimizer):
def setUp(self):
os.environ["PADDLE_TRAINER_ID"] = "3"
os.environ[
Expand Down
12 changes: 0 additions & 12 deletions python/paddle/fluid/tests/unittests/test_imperative_auto_prune.py
Original file line number Diff line number Diff line change
Expand Up @@ -375,18 +375,6 @@ def test_case2_prune_no_grad_branch(self):
self.assertTrue(case3.linear2.weight._grad_ivar() is None)
self.assertTrue(case3.linear.weight._grad_ivar() is not None)

def test_case2_prune_no_grad_branch(self):
with fluid.dygraph.guard():
value1 = np.arange(784).reshape(1, 784)
value2 = np.arange(1).reshape(1, 1)
v1 = fluid.dygraph.to_variable(value1).astype("float32")
v2 = fluid.dygraph.to_variable(value2).astype("float32")
case3 = AutoPruneLayer2(input_size=784)
loss = case3(v1, v2)
loss.backward()
self.assertTrue(case3.linear2.weight._grad_ivar() is None)
self.assertTrue(case3.linear.weight._grad_ivar() is not None)

def test_case3_prune_no_grad_branch2(self):
with fluid.dygraph.guard():
value1 = np.arange(1).reshape(1, 1)
Expand Down
4 changes: 2 additions & 2 deletions python/paddle/fluid/tests/unittests/test_linspace.py
Original file line number Diff line number Diff line change
Expand Up @@ -143,10 +143,10 @@ def test_start_type():

self.assertRaises(TypeError, test_start_type)

def test_end_dtype():
def test_end_type():
fluid.layers.linspace(0, [10], 1, dtype="float32")

self.assertRaises(TypeError, test_end_dtype)
self.assertRaises(TypeError, test_end_type)

def test_step_dtype():
fluid.layers.linspace(0, 10, [0], dtype="float32")
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ def test_hybrid_parallel_pp_layer(self):
def test_hybrid_parallel_pp_tuple_inputs(self):
self.run_mnist_2gpu('hybrid_parallel_pp_embedding.py')

def test_hybrid_parallel_pp_tuple_inputs(self):
def test_hybrid_parallel_shared_weight(self):
self.run_mnist_2gpu('hybrid_parallel_shared_weight.py')

def test_pipeline_parallel(self):
Expand Down