Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Unable to cast Python instance to C++ type #28227

Closed
zhangyushan223706 opened this issue Oct 23, 2020 · 5 comments
Closed

Unable to cast Python instance to C++ type #28227

zhangyushan223706 opened this issue Oct 23, 2020 · 5 comments
Assignees

Comments

@zhangyushan223706
Copy link

   1)aistudio
   2)v100
   3)paddle 1.8.0

  • 复现信息:
    ack (most recent call last) in
    156 avg_loss.backward()
    157
    --> 158 optimizer.minimize(avg_loss,parameter_list=track.parameters())
    159
    160
    </opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages/decorator.py:decorator-gen-185> in minimize(self, loss, startup_program, parameter_list, no_grad_set)
    /opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages/paddle/fluid/dygraph/base.py in impl(func, *args, **kwargs)
    201 def impl(func, *args, **kwargs):
    202 with switch_tracer_mode_guard(is_train=False):
    --> 203 return func(*args, **kwargs)
    204
    205 return impl(func)
    /opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages/paddle/fluid/optimizer.py in minimize(self, loss, startup_program, parameter_list, no_grad_set)
    835
    836 optimize_ops = self.apply_optimize(
    --> 837 loss, startup_program=startup_program, params_grads=params_grads)
    838
    839 return optimize_ops, params_grads
    /opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages/paddle/fluid/optimizer.py in apply_optimize(self, loss, startup_program, params_grads)
    745 params_grads = append_regularization_ops(params_grads,
    746 self.regularization)
    --> 747 optimize_ops = self._create_optimization_pass(params_grads)
    748 else:
    749 program = loss.block.program
    /opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages/paddle/fluid/optimizer.py in _create_optimization_pass(self, parameters_and_grads)
    549 continue
    550 if param_and_grad[0].trainable is True:
    --> 551 self._append_optimize_op(target_block, param_and_grad)
    552 else:
    553 for param_and_grad in parameters_and_grads:
    /opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages/paddle/fluid/optimizer.py in _append_optimize_op(self, block, param_and_grad)
    1037 velocity_acc = self._get_accumulator(self._velocity_acc_str,
    1038 param_and_grad[0])
    -> 1039 lr = self._create_param_lr(param_and_grad)
    1040
    1041 if framework.in_dygraph_mode():
    /opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages/paddle/fluid/optimizer.py in _create_param_lr(self, param_and_grad)
    386 is_with_opt=True), framework.name_scope(
    387 'scale_with_param_lr'):
    --> 388 return self._global_learning_rate() * param_lr
    389
    390 def _create_accumulators(self, block, parameters):
    /opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages/paddle/fluid/dygraph/math_op_patch.py in impl(self, other_var)
    184 else:
    185 # add fill_op
    --> 186 other_var = create_scalar(value=other_var, dtype=lhs_dtype)
    187
    188 rhs_dtype = safe_get_dtype(other_var)
    /opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages/paddle/fluid/dygraph/math_op_patch.py in create_scalar(value, dtype)
    50
    51 def create_scalar(value, dtype):
    ---> 52 return create_tensor(value, dtype, shape=[1])
    53
    54 def astype(self, dtype):
    </opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages/decorator.py:decorator-gen-198> in create_tensor(value, dtype, shape)
    /opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages/paddle/fluid/dygraph/base.py in impl(func, *args, **kwargs)
    201 def impl(func, *args, **kwargs):
    202 with switch_tracer_mode_guard(is_train=False):
    --> 203 return func(*args, **kwargs)
    204
    205 return impl(func)
    /opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages/paddle/fluid/dygraph/math_op_patch.py in create_tensor(value, dtype, shape)
    45 out = _varbase_creator(dtype=dtype)
    46 out = core.ops.fill_constant(out, 'dtype', dtype, 'shape', shape,
    ---> 47 'value', value, 'force_cpu', False)
    48 out.stop_gradient = True
    49 return out
    RuntimeError: Unable to cast Python instance to C++ type (compile in debug mode for details)
    ack (most recent call last) in
    156 avg_loss.backward()
    157
    --> 158 optimizer.minimize(avg_loss,parameter_list=track.parameters())
    159
    160
    </opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages/decorator.py:decorator-gen-185> in minimize(self, loss, startup_program, parameter_list, no_grad_set)
    /opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages/paddle/fluid/dygraph/base.py in impl(func, *args, **kwargs)
    201 def impl(func, *args, **kwargs):
    202 with switch_tracer_mode_guard(is_train=False):
    --> 203 return func(*args, **kwargs)
    204
    205 return impl(func)
    /opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages/paddle/fluid/optimizer.py in minimize(self, loss, startup_program, parameter_list, no_grad_set)
    835
    836 optimize_ops = self.apply_optimize(
    --> 837 loss, startup_program=startup_program, params_grads=params_grads)
    838
    839 return optimize_ops, params_grads
    /opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages/paddle/fluid/optimizer.py in apply_optimize(self, loss, startup_program, params_grads)
    745 params_grads = append_regularization_ops(params_grads,
    746 self.regularization)
    --> 747 optimize_ops = self._create_optimization_pass(params_grads)
    748 else:
    749 program = loss.block.program
    /opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages/paddle/fluid/optimizer.py in _create_optimization_pass(self, parameters_and_grads)
    549 continue
    550 if param_and_grad[0].trainable is True:
    --> 551 self._append_optimize_op(target_block, param_and_grad)
    552 else:
    553 for param_and_grad in parameters_and_grads:
    /opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages/paddle/fluid/optimizer.py in _append_optimize_op(self, block, param_and_grad)
    1037 velocity_acc = self._get_accumulator(self._velocity_acc_str,
    1038 param_and_grad[0])
    -> 1039 lr = self._create_param_lr(param_and_grad)
    1040
    1041 if framework.in_dygraph_mode():
    /opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages/paddle/fluid/optimizer.py in _create_param_lr(self, param_and_grad)
    386 is_with_opt=True), framework.name_scope(
    387 'scale_with_param_lr'):
    --> 388 return self._global_learning_rate() * param_lr
    389
    390 def _create_accumulators(self, block, parameters):
    /opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages/paddle/fluid/dygraph/math_op_patch.py in impl(self, other_var)
    184 else:
    185 # add fill_op
    --> 186 other_var = create_scalar(value=other_var, dtype=lhs_dtype)
    187
    188 rhs_dtype = safe_get_dtype(other_var)
    /opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages/paddle/fluid/dygraph/math_op_patch.py in create_scalar(value, dtype)
    50
    51 def create_scalar(value, dtype):
    ---> 52 return create_tensor(value, dtype, shape=[1])
    53
    54 def astype(self, dtype):
    </opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages/decorator.py:decorator-gen-198> in create_tensor(value, dtype, shape)
    /opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages/paddle/fluid/dygraph/base.py in impl(func, *args, **kwargs)
    201 def impl(func, *args, **kwargs):
    202 with switch_tracer_mode_guard(is_train=False):
    --> 203 return func(*args, **kwargs)
    204
    205 return impl(func)
    /opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages/paddle/fluid/dygraph/math_op_patch.py in create_tensor(value, dtype, shape)
    45 out = _varbase_creator(dtype=dtype)
    46 out = core.ops.fill_constant(out, 'dtype', dtype, 'shape', shape,
    ---> 47 'value', value, 'force_cpu', False)
    48 out.stop_gradient = True
    49 return out
    RuntimeError: Unable to cast Python instance to C++ type (compile in debug mode for details)
@zhangyushan223706
Copy link
Author

动态图编程,优化时报错,怎么解决?

@juncaipeng juncaipeng assigned juncaipeng and unassigned jiweibo Oct 23, 2020
@juncaipeng
Copy link
Contributor

juncaipeng commented Oct 23, 2020

可以将代码贴一下吗?从log无法明确出来问题,可能是用法有误。

@zhangyushan223706
Copy link
Author

主要是对paddle.layers运算进行了再次函数封装

@zhangyushan223706
Copy link
Author

应该怎么进行封装运算?

@juncaipeng
Copy link
Contributor

正常封装是可以的,但是底层还是必须符合paddle api的规范哦。

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment
Labels
None yet
Projects
None yet
Development

No branches or pull requests

3 participants