Skip to content
Merged
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
30 changes: 0 additions & 30 deletions report/big_tensor_gpu/error_config_paddleonly.txt
Original file line number Diff line number Diff line change
Expand Up @@ -595,36 +595,6 @@ paddle.incubate.nn.functional.fused_rotary_position_embedding(Tensor([8, 2, 2, 1
paddle.incubate.nn.functional.fused_rotary_position_embedding(Tensor([8, 2, 2, 16],"float32"), Tensor([8, 2, 2, 16],"float32"), Tensor([67108864, 2, 2, 16],"float32"), Tensor([1, 8, 1, 16],"float32"), Tensor([1, 8, 1, 16],"float32"), position_ids=Tensor([2, 8],"int64"), use_neox_rotary_style=True, time_major=True, )
paddle.incubate.nn.functional.fused_rotary_position_embedding(Tensor([8, 2, 2, 16],"float32"), Tensor([8, 2, 2, 16],"float32"), Tensor([8, 16777216, 2, 16],"float32"), Tensor([1, 8, 1, 16],"float32"), Tensor([1, 8, 1, 16],"float32"), position_ids=Tensor([2, 8],"int64"), use_neox_rotary_style=True, time_major=True, )
paddle.incubate.nn.functional.fused_rotary_position_embedding(Tensor([8, 2, 2, 16],"float32"), Tensor([8, 2, 2, 16],"float32"), Tensor([8, 2, 2, 134217728],"float32"), Tensor([1, 8, 1, 16],"float32"), Tensor([1, 8, 1, 16],"float32"), position_ids=Tensor([2, 8],"int64"), use_neox_rotary_style=True, time_major=True, )
paddle.incubate.nn.functional.fused_rotary_position_embedding(Tensor([8, 8912897, 2, 16],"float32"), None, Tensor([8, 2, 2, 16],"float32"), Tensor([1, 8, 1, 16],"float32"), Tensor([1, 8, 1, 16],"float32"), position_ids=None, use_neox_rotary_style=False, time_major=True, )
paddle.incubate.nn.functional.fused_rotary_position_embedding(Tensor([8, 8912897, 2, 16],"float32"), None, Tensor([8, 2, 2, 16],"float32"), Tensor([1, 8, 1, 16],"float32"), Tensor([1, 8, 1, 16],"float32"), position_ids=None, use_neox_rotary_style=True, time_major=True, )
paddle.incubate.nn.functional.fused_rotary_position_embedding(Tensor([8, 8912897, 2, 16],"float32"), Tensor([8, 2, 2, 16],"float32"), None, Tensor([1, 8, 1, 16],"float32"), Tensor([1, 8, 1, 16],"float32"), position_ids=None, use_neox_rotary_style=False, time_major=True, )
paddle.incubate.nn.functional.fused_rotary_position_embedding(Tensor([8, 8912897, 2, 16],"float32"), Tensor([8, 2, 2, 16],"float32"), None, Tensor([1, 8, 1, 16],"float32"), Tensor([1, 8, 1, 16],"float32"), position_ids=None, use_neox_rotary_style=True, time_major=True, )
paddle.incubate.nn.functional.fused_rotary_position_embedding(Tensor([8, 8912897, 2, 16],"float32"), Tensor([8, 2, 2, 16],"float32"), Tensor([8, 2, 2, 16],"float32"), Tensor([1, 8, 1, 16],"float32"), Tensor([1, 8, 1, 16],"float32"), position_ids=None, use_neox_rotary_style=False, time_major=True, )
paddle.incubate.nn.functional.fused_rotary_position_embedding(Tensor([8, 8912897, 2, 16],"float32"), Tensor([8, 2, 2, 16],"float32"), Tensor([8, 2, 2, 16],"float32"), Tensor([1, 8, 1, 16],"float32"), Tensor([1, 8, 1, 16],"float32"), position_ids=None, use_neox_rotary_style=True, time_major=True, )
paddle.incubate.nn.functional.fused_rotary_position_embedding(Tensor([8, 8912897, 4, 8],"float32"), None, Tensor([8, 1, 2, 8],"float32"), Tensor([1, 8, 1, 8],"float32"), Tensor([1, 8, 1, 8],"float32"), position_ids=None, use_neox_rotary_style=False, time_major=True, )
paddle.incubate.nn.functional.fused_rotary_position_embedding(Tensor([8, 8912897, 4, 8],"float32"), None, Tensor([8, 1, 2, 8],"float32"), Tensor([1, 8, 1, 8],"float32"), Tensor([1, 8, 1, 8],"float32"), position_ids=None, use_neox_rotary_style=True, time_major=True, )
paddle.incubate.nn.functional.fused_rotary_position_embedding(Tensor([8, 8912897, 4, 8],"float32"), None, Tensor([8, 2, 1, 8],"float32"), Tensor([1, 8, 1, 8],"float32"), Tensor([1, 8, 1, 8],"float32"), position_ids=None, use_neox_rotary_style=False, time_major=True, )
paddle.incubate.nn.functional.fused_rotary_position_embedding(Tensor([8, 8912897, 4, 8],"float32"), None, Tensor([8, 2, 1, 8],"float32"), Tensor([1, 8, 1, 8],"float32"), Tensor([1, 8, 1, 8],"float32"), position_ids=None, use_neox_rotary_style=True, time_major=True, )
paddle.incubate.nn.functional.fused_rotary_position_embedding(Tensor([8, 8912897, 4, 8],"float32"), Tensor([8, 1, 2, 8],"float32"), None, Tensor([1, 8, 1, 8],"float32"), Tensor([1, 8, 1, 8],"float32"), position_ids=None, use_neox_rotary_style=False, time_major=True, )
paddle.incubate.nn.functional.fused_rotary_position_embedding(Tensor([8, 8912897, 4, 8],"float32"), Tensor([8, 1, 2, 8],"float32"), None, Tensor([1, 8, 1, 8],"float32"), Tensor([1, 8, 1, 8],"float32"), position_ids=None, use_neox_rotary_style=True, time_major=True, )
paddle.incubate.nn.functional.fused_rotary_position_embedding(Tensor([8, 8912897, 4, 8],"float32"), Tensor([8, 1, 2, 8],"float32"), Tensor([8, 1, 2, 8],"float32"), Tensor([1, 8, 1, 8],"float32"), Tensor([1, 8, 1, 8],"float32"), position_ids=None, use_neox_rotary_style=False, time_major=True, )
paddle.incubate.nn.functional.fused_rotary_position_embedding(Tensor([8, 8912897, 4, 8],"float32"), Tensor([8, 1, 2, 8],"float32"), Tensor([8, 1, 2, 8],"float32"), Tensor([1, 8, 1, 8],"float32"), Tensor([1, 8, 1, 8],"float32"), position_ids=None, use_neox_rotary_style=True, time_major=True, )
paddle.incubate.nn.functional.fused_rotary_position_embedding(Tensor([8, 8912897, 4, 8],"float32"), Tensor([8, 2, 1, 8],"float32"), None, Tensor([1, 8, 1, 8],"float32"), Tensor([1, 8, 1, 8],"float32"), position_ids=None, use_neox_rotary_style=False, time_major=True, )
paddle.incubate.nn.functional.fused_rotary_position_embedding(Tensor([8, 8912897, 4, 8],"float32"), Tensor([8, 2, 1, 8],"float32"), None, Tensor([1, 8, 1, 8],"float32"), Tensor([1, 8, 1, 8],"float32"), position_ids=None, use_neox_rotary_style=True, time_major=True, )
paddle.incubate.nn.functional.fused_rotary_position_embedding(Tensor([8, 8912897, 4, 8],"float32"), Tensor([8, 2, 1, 8],"float32"), Tensor([8, 2, 1, 8],"float32"), Tensor([1, 8, 1, 8],"float32"), Tensor([1, 8, 1, 8],"float32"), position_ids=None, use_neox_rotary_style=False, time_major=True, )
paddle.incubate.nn.functional.fused_rotary_position_embedding(Tensor([8, 8912897, 4, 8],"float32"), Tensor([8, 2, 1, 8],"float32"), Tensor([8, 2, 1, 8],"float32"), Tensor([1, 8, 1, 8],"float32"), Tensor([1, 8, 1, 8],"float32"), position_ids=None, use_neox_rotary_style=True, time_major=True, )
paddle.incubate.nn.functional.fused_rotary_position_embedding(Tensor([8912897, 8, 2, 16],"float32"), Tensor([2, 8, 2, 16],"float32"), None, Tensor([1, 8, 1, 16],"float32"), Tensor([1, 8, 1, 16],"float32"), position_ids=None, use_neox_rotary_style=False, time_major=False, )
paddle.incubate.nn.functional.fused_rotary_position_embedding(Tensor([8912897, 8, 2, 16],"float32"), Tensor([2, 8, 2, 16],"float32"), None, Tensor([1, 8, 1, 16],"float32"), Tensor([1, 8, 1, 16],"float32"), position_ids=None, use_neox_rotary_style=True, time_major=False, )
paddle.incubate.nn.functional.fused_rotary_position_embedding(Tensor([8912897, 8, 2, 16],"float32"), Tensor([2, 8, 2, 16],"float32"), Tensor([2, 8, 2, 16],"float32"), Tensor([1, 8, 1, 16],"float32"), Tensor([1, 8, 1, 16],"float32"), position_ids=None, use_neox_rotary_style=False, time_major=False, )
paddle.incubate.nn.functional.fused_rotary_position_embedding(Tensor([8912897, 8, 2, 16],"float32"), Tensor([2, 8, 2, 16],"float32"), Tensor([2, 8, 2, 16],"float32"), Tensor([1, 8, 1, 16],"float32"), Tensor([1, 8, 1, 16],"float32"), position_ids=None, use_neox_rotary_style=True, time_major=False, )
paddle.incubate.nn.functional.fused_rotary_position_embedding(Tensor([8912897, 8, 4, 8],"float32"), Tensor([1, 8, 2, 8],"float32"), None, Tensor([1, 8, 1, 8],"float32"), Tensor([1, 8, 1, 8],"float32"), position_ids=None, use_neox_rotary_style=False, time_major=False, )
paddle.incubate.nn.functional.fused_rotary_position_embedding(Tensor([8912897, 8, 4, 8],"float32"), Tensor([1, 8, 2, 8],"float32"), None, Tensor([1, 8, 1, 8],"float32"), Tensor([1, 8, 1, 8],"float32"), position_ids=None, use_neox_rotary_style=True, time_major=False, )
paddle.incubate.nn.functional.fused_rotary_position_embedding(Tensor([8912897, 8, 4, 8],"float32"), Tensor([1, 8, 2, 8],"float32"), Tensor([1, 8, 2, 8],"float32"), Tensor([1, 8, 1, 8],"float32"), Tensor([1, 8, 1, 8],"float32"), position_ids=None, use_neox_rotary_style=False, time_major=False, )
paddle.incubate.nn.functional.fused_rotary_position_embedding(Tensor([8912897, 8, 4, 8],"float32"), Tensor([1, 8, 2, 8],"float32"), Tensor([1, 8, 2, 8],"float32"), Tensor([1, 8, 1, 8],"float32"), Tensor([1, 8, 1, 8],"float32"), position_ids=None, use_neox_rotary_style=True, time_major=False, )
paddle.incubate.nn.functional.fused_rotary_position_embedding(Tensor([8912897, 8, 4, 8],"float32"), Tensor([2, 8, 1, 8],"float32"), None, Tensor([1, 8, 1, 8],"float32"), Tensor([1, 8, 1, 8],"float32"), position_ids=None, use_neox_rotary_style=False, time_major=False, )
paddle.incubate.nn.functional.fused_rotary_position_embedding(Tensor([8912897, 8, 4, 8],"float32"), Tensor([2, 8, 1, 8],"float32"), None, Tensor([1, 8, 1, 8],"float32"), Tensor([1, 8, 1, 8],"float32"), position_ids=None, use_neox_rotary_style=True, time_major=False, )
paddle.incubate.nn.functional.fused_rotary_position_embedding(Tensor([8912897, 8, 4, 8],"float32"), Tensor([2, 8, 1, 8],"float32"), Tensor([2, 8, 1, 8],"float32"), Tensor([1, 8, 1, 8],"float32"), Tensor([1, 8, 1, 8],"float32"), position_ids=None, use_neox_rotary_style=False, time_major=False, )
paddle.incubate.nn.functional.fused_rotary_position_embedding(Tensor([8912897, 8, 4, 8],"float32"), Tensor([2, 8, 1, 8],"float32"), Tensor([2, 8, 1, 8],"float32"), Tensor([1, 8, 1, 8],"float32"), Tensor([1, 8, 1, 8],"float32"), position_ids=None, use_neox_rotary_style=True, time_major=False, )
paddle.incubate.nn.functional.swiglu(Tensor([1, 4096, 1048576],"float32"), )
paddle.incubate.nn.functional.variable_length_memory_efficient_attention(Tensor([1, 1, 31, 64],"float16"), Tensor([1, 1, 31, 64],"float16"), Tensor([1, 1, 31, 138547333],"float16"), Tensor([1, 1],"int32"), Tensor([1, 1],"int32"), mask=Tensor([1, 1, 50, 50],"float16"), scale=0.125, )
paddle.incubate.nn.functional.variable_length_memory_efficient_attention(Tensor([1, 1, 67108865, 64],"float16"), Tensor([1, 1, 31, 64],"float16"), Tensor([1, 1, 31, 64],"float16"), Tensor([1, 1],"int32"), Tensor([1, 1],"int32"), mask=Tensor([1, 1, 50, 50],"float16"), scale=0.125, )
Expand Down