You signed in with another tab or window. Reload to refresh your session.You signed out in another tab or window. Reload to refresh your session.You switched accounts on another tab or window. Reload to refresh your session.Dismiss alert
Traceback (most recent call last):
File "tools/train.py", line 245, in
main()
File "tools/train.py", line 234, in main
train_segmentor(
File "/home/a8/zy/Projects/Semantic Segmentation/SCTNet/mmseg/apis/train.py", line 194, in train_segmentor
runner.run(data_loaders, cfg.workflow)
File "/home/a8/anaconda3/envs/STCNet/lib/python3.8/site-packages/mmcv/runner/iter_based_runner.py", line 144, in run
iter_runner(iter_loaders[i], **kwargs)
File "/home/a8/anaconda3/envs/STCNet/lib/python3.8/site-packages/mmcv/runner/iter_based_runner.py", line 64, in train
outputs = self.model.train_step(data_batch, self.optimizer, **kwargs)
File "/home/a8/anaconda3/envs/STCNet/lib/python3.8/site-packages/mmcv/parallel/distributed.py", line 63, in train_step
output = self.module.train_step(*inputs[0], **kwargs[0])
File "/home/a8/zy/Projects/Semantic Segmentation/SCTNet/mmseg/models/segmentors/base.py", line 138, in train_step
losses = self(**data_batch)
File "/home/a8/anaconda3/envs/STCNet/lib/python3.8/site-packages/torch/nn/modules/module.py", line 1110, in _call_impl
return forward_call(*input, **kwargs)
File "/home/a8/anaconda3/envs/STCNet/lib/python3.8/site-packages/mmcv/runner/fp16_utils.py", line 116, in new_func
return old_func(*args, **kwargs)
File "/home/a8/zy/Projects/Semantic Segmentation/SCTNet/mmseg/models/segmentors/base.py", line 108, in forward
return self.forward_train(img, img_metas, **kwargs)
File "/home/a8/zy/Projects/Semantic Segmentation/SCTNet/mmseg/models/segmentors/encoder_decoder_distill.py", line 143, in forward_train
loss_decode,decoder_feature,seg_logits = self._decode_head_forward_train(x, img_metas,
File "/home/a8/zy/Projects/Semantic Segmentation/SCTNet/mmseg/models/segmentors/encoder_decoder_distill.py", line 86, in _decode_head_forward_train
loss_decode,decoder_feature,seg_logits = self.decode_head.forward_train(x, img_metas,
File "/home/a8/zy/Projects/Semantic Segmentation/SCTNet/mmseg/models/decode_heads/sct_head.py", line 61, in forward_train
losses = self.losses(seg_logits, gt_semantic_seg)
File "/home/a8/anaconda3/envs/STCNet/lib/python3.8/site-packages/mmcv/runner/fp16_utils.py", line 205, in new_func
return old_func(*args, **kwargs)
File "/home/a8/zy/Projects/Semantic Segmentation/SCTNet/mmseg/models/decode_heads/decode_head.py", line 252, in losses
loss[loss_decode.loss_name] = loss_decode(
File "/home/a8/anaconda3/envs/STCNet/lib/python3.8/site-packages/torch/nn/modules/module.py", line 1110, in _call_impl
return forward_call(*input, **kwargs)
File "/home/a8/zy/Projects/Semantic Segmentation/SCTNet/mmseg/models/losses/ohem_loss.py", line 81, in forward
pred, ind = pred.view(-1, )[mask].contiguous().sort()
RuntimeError: CUDA error: an illegal memory access was encountered
CUDA kernel errors might be asynchronously reported at some other API call,so the stacktrace below might be incorrect.
For debugging consider passing CUDA_LAUNCH_BLOCKING=1.
The text was updated successfully, but these errors were encountered:
Traceback (most recent call last):
File "tools/train.py", line 245, in
main()
File "tools/train.py", line 234, in main
train_segmentor(
File "/home/a8/zy/Projects/Semantic Segmentation/SCTNet/mmseg/apis/train.py", line 194, in train_segmentor
runner.run(data_loaders, cfg.workflow)
File "/home/a8/anaconda3/envs/STCNet/lib/python3.8/site-packages/mmcv/runner/iter_based_runner.py", line 144, in run
iter_runner(iter_loaders[i], **kwargs)
File "/home/a8/anaconda3/envs/STCNet/lib/python3.8/site-packages/mmcv/runner/iter_based_runner.py", line 64, in train
outputs = self.model.train_step(data_batch, self.optimizer, **kwargs)
File "/home/a8/anaconda3/envs/STCNet/lib/python3.8/site-packages/mmcv/parallel/distributed.py", line 63, in train_step
output = self.module.train_step(*inputs[0], **kwargs[0])
File "/home/a8/zy/Projects/Semantic Segmentation/SCTNet/mmseg/models/segmentors/base.py", line 138, in train_step
losses = self(**data_batch)
File "/home/a8/anaconda3/envs/STCNet/lib/python3.8/site-packages/torch/nn/modules/module.py", line 1110, in _call_impl
return forward_call(*input, **kwargs)
File "/home/a8/anaconda3/envs/STCNet/lib/python3.8/site-packages/mmcv/runner/fp16_utils.py", line 116, in new_func
return old_func(*args, **kwargs)
File "/home/a8/zy/Projects/Semantic Segmentation/SCTNet/mmseg/models/segmentors/base.py", line 108, in forward
return self.forward_train(img, img_metas, **kwargs)
File "/home/a8/zy/Projects/Semantic Segmentation/SCTNet/mmseg/models/segmentors/encoder_decoder_distill.py", line 143, in forward_train
loss_decode,decoder_feature,seg_logits = self._decode_head_forward_train(x, img_metas,
File "/home/a8/zy/Projects/Semantic Segmentation/SCTNet/mmseg/models/segmentors/encoder_decoder_distill.py", line 86, in _decode_head_forward_train
loss_decode,decoder_feature,seg_logits = self.decode_head.forward_train(x, img_metas,
File "/home/a8/zy/Projects/Semantic Segmentation/SCTNet/mmseg/models/decode_heads/sct_head.py", line 61, in forward_train
losses = self.losses(seg_logits, gt_semantic_seg)
File "/home/a8/anaconda3/envs/STCNet/lib/python3.8/site-packages/mmcv/runner/fp16_utils.py", line 205, in new_func
return old_func(*args, **kwargs)
File "/home/a8/zy/Projects/Semantic Segmentation/SCTNet/mmseg/models/decode_heads/decode_head.py", line 252, in losses
loss[loss_decode.loss_name] = loss_decode(
File "/home/a8/anaconda3/envs/STCNet/lib/python3.8/site-packages/torch/nn/modules/module.py", line 1110, in _call_impl
return forward_call(*input, **kwargs)
File "/home/a8/zy/Projects/Semantic Segmentation/SCTNet/mmseg/models/losses/ohem_loss.py", line 81, in forward
pred, ind = pred.view(-1, )[mask].contiguous().sort()
RuntimeError: CUDA error: an illegal memory access was encountered
CUDA kernel errors might be asynchronously reported at some other API call,so the stacktrace below might be incorrect.
For debugging consider passing CUDA_LAUNCH_BLOCKING=1.
The text was updated successfully, but these errors were encountered: