diff --git a/drivers/gpu/drm/xe/xe_gt_pagefault.c b/drivers/gpu/drm/xe/xe_gt_pagefault.c index bdef4b76028b1b..e1e067d3bb8734 100644 --- a/drivers/gpu/drm/xe/xe_gt_pagefault.c +++ b/drivers/gpu/drm/xe/xe_gt_pagefault.c @@ -8,6 +8,7 @@ #include #include +#include #include #include @@ -84,11 +85,6 @@ static bool vma_matches(struct xe_vma *vma, u64 page_addr) return true; } -static bool only_needs_bo_lock(struct xe_bo *bo) -{ - return bo && bo->vm; -} - static struct xe_vma *lookup_vma(struct xe_vm *vm, u64 page_addr) { struct xe_vma *vma = NULL; @@ -103,17 +99,45 @@ static struct xe_vma *lookup_vma(struct xe_vm *vm, u64 page_addr) return vma; } +static int xe_pf_begin(struct drm_exec *exec, struct xe_vma *vma, + bool atomic, unsigned int id) +{ + struct xe_bo *bo = xe_vma_bo(vma); + struct xe_vm *vm = xe_vma_vm(vma); + unsigned int num_shared = 2; /* slots for bind + move */ + int err; + + err = xe_vm_prepare_vma(exec, vma, num_shared); + if (err) + return err; + + if (atomic) { + if (xe_vma_is_userptr(vma)) { + err = -EACCES; + return err; + } + + /* Migrate to VRAM, move should invalidate the VMA first */ + err = xe_bo_migrate(bo, XE_PL_VRAM0 + id); + if (err) + return err; + } else if (bo) { + /* Create backing store if needed */ + err = xe_bo_validate(bo, vm, true); + if (err) + return err; + } + + return 0; +} + static int handle_pagefault(struct xe_gt *gt, struct pagefault *pf) { struct xe_device *xe = gt_to_xe(gt); struct xe_tile *tile = gt_to_tile(gt); + struct drm_exec exec; struct xe_vm *vm; struct xe_vma *vma = NULL; - struct xe_bo *bo; - LIST_HEAD(objs); - LIST_HEAD(dups); - struct ttm_validate_buffer tv_bo, tv_vm; - struct ww_acquire_ctx ww; struct dma_fence *fence; bool write_locked; int ret = 0; @@ -170,35 +194,10 @@ static int handle_pagefault(struct xe_gt *gt, struct pagefault *pf) } /* Lock VM and BOs dma-resv */ - bo = xe_vma_bo(vma); - if (!only_needs_bo_lock(bo)) { - tv_vm.num_shared = xe->info.tile_count; - tv_vm.bo = xe_vm_ttm_bo(vm); - list_add(&tv_vm.head, &objs); - } - if (bo) { - tv_bo.bo = &bo->ttm; - tv_bo.num_shared = xe->info.tile_count; - list_add(&tv_bo.head, &objs); - } - - ret = ttm_eu_reserve_buffers(&ww, &objs, false, &dups); - if (ret) - goto unlock_vm; - - if (atomic) { - if (xe_vma_is_userptr(vma)) { - ret = -EACCES; - goto unlock_dma_resv; - } - - /* Migrate to VRAM, move should invalidate the VMA first */ - ret = xe_bo_migrate(bo, XE_PL_VRAM0 + tile->id); - if (ret) - goto unlock_dma_resv; - } else if (bo) { - /* Create backing store if needed */ - ret = xe_bo_validate(bo, vm, true); + drm_exec_init(&exec, 0); + drm_exec_until_all_locked(&exec) { + ret = xe_pf_begin(&exec, vma, atomic, tile->id); + drm_exec_retry_on_contention(&exec); if (ret) goto unlock_dma_resv; } @@ -225,7 +224,7 @@ static int handle_pagefault(struct xe_gt *gt, struct pagefault *pf) vma->usm.tile_invalidated &= ~BIT(tile->id); unlock_dma_resv: - ttm_eu_backoff_reservation(&ww, &objs); + drm_exec_fini(&exec); unlock_vm: if (!ret) vm->usm.last_fault_vma = vma; @@ -490,13 +489,9 @@ static int handle_acc(struct xe_gt *gt, struct acc *acc) { struct xe_device *xe = gt_to_xe(gt); struct xe_tile *tile = gt_to_tile(gt); + struct drm_exec exec; struct xe_vm *vm; struct xe_vma *vma; - struct xe_bo *bo; - LIST_HEAD(objs); - LIST_HEAD(dups); - struct ttm_validate_buffer tv_bo, tv_vm; - struct ww_acquire_ctx ww; int ret = 0; /* We only support ACC_TRIGGER at the moment */ @@ -528,23 +523,15 @@ static int handle_acc(struct xe_gt *gt, struct acc *acc) goto unlock_vm; /* Lock VM and BOs dma-resv */ - bo = xe_vma_bo(vma); - if (!only_needs_bo_lock(bo)) { - tv_vm.num_shared = xe->info.tile_count; - tv_vm.bo = xe_vm_ttm_bo(vm); - list_add(&tv_vm.head, &objs); + drm_exec_init(&exec, 0); + drm_exec_until_all_locked(&exec) { + ret = xe_pf_begin(&exec, vma, true, tile->id); + drm_exec_retry_on_contention(&exec); + if (ret) + break; } - tv_bo.bo = &bo->ttm; - tv_bo.num_shared = xe->info.tile_count; - list_add(&tv_bo.head, &objs); - ret = ttm_eu_reserve_buffers(&ww, &objs, false, &dups); - if (ret) - goto unlock_vm; - - /* Migrate to VRAM, move should invalidate the VMA first */ - ret = xe_bo_migrate(bo, XE_PL_VRAM0 + tile->id); - ttm_eu_backoff_reservation(&ww, &objs); + drm_exec_fini(&exec); unlock_vm: up_read(&vm->lock); xe_vm_put(vm); diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c index 80b374b9cdd1e2..52c5235677c51e 100644 --- a/drivers/gpu/drm/xe/xe_vm.c +++ b/drivers/gpu/drm/xe/xe_vm.c @@ -35,6 +35,11 @@ #define TEST_VM_ASYNC_OPS_ERROR +static struct drm_gem_object *xe_vm_obj(struct xe_vm *vm) +{ + return vm->gpuvm.r_obj; +} + /** * xe_vma_userptr_check_repin() - Advisory check for repin needed * @vma: The userptr vma @@ -422,7 +427,7 @@ int xe_vm_lock_dma_resv(struct xe_vm *vm, struct drm_exec *exec, lockdep_assert_held(&vm->lock); if (lock_vm) { - err = drm_exec_prepare_obj(exec, &xe_vm_ttm_bo(vm)->base, num_shared); + err = drm_exec_prepare_obj(exec, xe_vm_obj(vm), num_shared); if (err) return err; } @@ -514,7 +519,7 @@ static int xe_preempt_work_begin(struct drm_exec *exec, struct xe_vm *vm, struct xe_vma *vma; int err; - err = drm_exec_prepare_obj(exec, &xe_vm_ttm_bo(vm)->base, + err = drm_exec_prepare_obj(exec, xe_vm_obj(vm), vm->preempt.num_exec_queues); if (err) return err; @@ -1095,6 +1100,33 @@ static void xe_vma_destroy(struct xe_vma *vma, struct dma_fence *fence) } } +/** + * xe_vm_prepare_vma() - drm_exec utility to lock a vma + * @exec: The drm_exec object we're currently locking for. + * @vma: The vma for witch we want to lock the vm resv and any attached + * object's resv. + * @num_shared: The number of dma-fence slots to pre-allocate in the + * objects' reservation objects. + * + * Return: 0 on success, negative error code on error. In particular + * may return -EDEADLK on WW transaction contention and -EINTR if + * an interruptible wait is terminated by a signal. + */ +int xe_vm_prepare_vma(struct drm_exec *exec, struct xe_vma *vma, + unsigned int num_shared) +{ + struct xe_vm *vm = xe_vma_vm(vma); + struct xe_bo *bo = xe_vma_bo(vma); + int err; + + XE_WARN_ON(!vm); + err = drm_exec_prepare_obj(exec, xe_vm_obj(vm), num_shared); + if (!err && bo && !bo->vm) + err = drm_exec_prepare_obj(exec, &bo->ttm.base, num_shared); + + return err; +} + static void xe_vma_destroy_unlocked(struct xe_vma *vma) { struct ttm_validate_buffer tv[2]; diff --git a/drivers/gpu/drm/xe/xe_vm.h b/drivers/gpu/drm/xe/xe_vm.h index a26e84c742f114..ad9ff2b39a3068 100644 --- a/drivers/gpu/drm/xe/xe_vm.h +++ b/drivers/gpu/drm/xe/xe_vm.h @@ -219,6 +219,9 @@ void xe_vm_fence_all_extobjs(struct xe_vm *vm, struct dma_fence *fence, int xe_analyze_vm(struct drm_printer *p, struct xe_vm *vm, int gt_id); +int xe_vm_prepare_vma(struct drm_exec *exec, struct xe_vma *vma, + unsigned int num_shared); + /** * xe_vm_resv() - Return's the vm's reservation object * @vm: The vm