diff --git a/.github/scripts/apply_torch_pr.py b/.github/scripts/apply_torch_pr.py index 89fa32fdf..bbe89ed7d 100644 --- a/.github/scripts/apply_torch_pr.py +++ b/.github/scripts/apply_torch_pr.py @@ -12,8 +12,7 @@ # Fallback to CPU for XPU FP64 "https://github.com/pytorch/pytorch/pull/126516", # Modify the tolerance level in TIMM benchmark - # "https://github.com/pytorch/pytorch/pull/129735", - "https://github.com/mengfei25/pytorch/pull/21", + "https://github.com/pytorch/pytorch/pull/143739", ] ) parser.add_argument('--extra-pr-list', '-e', nargs='+',default=[]) diff --git a/src/ATen/native/transformers/SDPUtils.cpp b/src/ATen/native/transformers/SDPUtils.cpp index db4409493..eca5f9829 100644 --- a/src/ATen/native/transformers/SDPUtils.cpp +++ b/src/ATen/native/transformers/SDPUtils.cpp @@ -4,6 +4,8 @@ namespace sdp { +using c10::array_of; + bool check_all_tensors_on_device(sdp_params const& params, bool debug) { // Check that all tensors are on the GPU device // This should be handled by the stub dispatch, but whe call