You signed in with another tab or window. Reload to refresh your session.You signed out in another tab or window. Reload to refresh your session.You switched accounts on another tab or window. Reload to refresh your session.Dismiss alert
Error is as following,I do not know how to solve it
File "./tools/train.py", line 168, in
main()
File "./tools/train.py", line 157, in main
train_detector(
File "/opt/zcy/anaconda/envs/ctp/CenterPoint/det3d/torchie/apis/train.py", line 326, in train_detector
trainer.run(data_loaders, cfg.workflow, cfg.total_epochs, local_rank=cfg.local_rank)
File "/opt/zcy/anaconda/envs/ctp/CenterPoint/det3d/torchie/trainer/trainer.py", line 542, in run
epoch_runner(data_loaders[i], self.epoch, **kwargs)
File "/opt/zcy/anaconda/envs/ctp/CenterPoint/det3d/torchie/trainer/trainer.py", line 417, in train
self.call_hook("after_train_iter")
File "/opt/zcy/anaconda/envs/ctp/CenterPoint/det3d/torchie/trainer/trainer.py", line 330, in call_hook
getattr(hook, fn_name)(self)
File "/opt/zcy/anaconda/envs/ctp/CenterPoint/det3d/torchie/trainer/hooks/optimizer.py", line 18, in after_train_iter
trainer.outputs["loss"].backward()
File "/opt/zcy/anaconda/envs/ctp/lib/python3.8/site-packages/torch/_tensor.py", line 396, in backward
torch.autograd.backward(self, gradient, retain_graph, create_graph, inputs=inputs)
File "/opt/zcy/anaconda/envs/ctp/lib/python3.8/site-packages/torch/autograd/init.py", line 173, in backward
Variable._execution_engine.run_backward( # Calls into the C++ engine to run the backward pass
File "/opt/zcy/anaconda/envs/ctp/lib/python3.8/site-packages/torch/autograd/function.py", line 253, in apply
return user_fn(self, *args)
File "/opt/zcy/anaconda/envs/ctp/lib/python3.8/site-packages/torch/autograd/function.py", line 399, in wrapper
outputs = fn(ctx, *args)
File "/opt/zcy/anaconda/envs/ctp/CenterPoint/det3d/ops/dcn/deform_conv.py", line 87, in backward
deform_conv_cuda.deform_conv_backward_parameters_cuda(
RuntimeError: view size is not compatible with input tensor's size and stride (at least one dimension spans across two contiguous subspaces). Use .reshape(...) instead.
The text was updated successfully, but these errors were encountered:
Error is as following,I do not know how to solve it
File "./tools/train.py", line 168, in
main()
File "./tools/train.py", line 157, in main
train_detector(
File "/opt/zcy/anaconda/envs/ctp/CenterPoint/det3d/torchie/apis/train.py", line 326, in train_detector
trainer.run(data_loaders, cfg.workflow, cfg.total_epochs, local_rank=cfg.local_rank)
File "/opt/zcy/anaconda/envs/ctp/CenterPoint/det3d/torchie/trainer/trainer.py", line 542, in run
epoch_runner(data_loaders[i], self.epoch, **kwargs)
File "/opt/zcy/anaconda/envs/ctp/CenterPoint/det3d/torchie/trainer/trainer.py", line 417, in train
self.call_hook("after_train_iter")
File "/opt/zcy/anaconda/envs/ctp/CenterPoint/det3d/torchie/trainer/trainer.py", line 330, in call_hook
getattr(hook, fn_name)(self)
File "/opt/zcy/anaconda/envs/ctp/CenterPoint/det3d/torchie/trainer/hooks/optimizer.py", line 18, in after_train_iter
trainer.outputs["loss"].backward()
File "/opt/zcy/anaconda/envs/ctp/lib/python3.8/site-packages/torch/_tensor.py", line 396, in backward
torch.autograd.backward(self, gradient, retain_graph, create_graph, inputs=inputs)
File "/opt/zcy/anaconda/envs/ctp/lib/python3.8/site-packages/torch/autograd/init.py", line 173, in backward
Variable._execution_engine.run_backward( # Calls into the C++ engine to run the backward pass
File "/opt/zcy/anaconda/envs/ctp/lib/python3.8/site-packages/torch/autograd/function.py", line 253, in apply
return user_fn(self, *args)
File "/opt/zcy/anaconda/envs/ctp/lib/python3.8/site-packages/torch/autograd/function.py", line 399, in wrapper
outputs = fn(ctx, *args)
File "/opt/zcy/anaconda/envs/ctp/CenterPoint/det3d/ops/dcn/deform_conv.py", line 87, in backward
deform_conv_cuda.deform_conv_backward_parameters_cuda(
RuntimeError: view size is not compatible with input tensor's size and stride (at least one dimension spans across two contiguous subspaces). Use .reshape(...) instead.
The text was updated successfully, but these errors were encountered: