@@ -296,30 +296,18 @@ EXPORT_SYMBOL(ib_umem_odp_release);
296296static int ib_umem_odp_map_dma_single_page (
297297 struct ib_umem_odp * umem_odp ,
298298 unsigned int dma_index ,
299- struct page * page ,
300- u64 access_mask )
299+ struct page * page )
301300{
302301 struct ib_device * dev = umem_odp -> umem .ibdev ;
303302 dma_addr_t * dma_addr = & umem_odp -> dma_list [dma_index ];
304303
305- if (* dma_addr ) {
306- /*
307- * If the page is already dma mapped it means it went through
308- * a non-invalidating trasition, like read-only to writable.
309- * Resync the flags.
310- */
311- * dma_addr = (* dma_addr & ODP_DMA_ADDR_MASK ) | access_mask ;
312- return 0 ;
313- }
314-
315304 * dma_addr = ib_dma_map_page (dev , page , 0 , 1 << umem_odp -> page_shift ,
316305 DMA_BIDIRECTIONAL );
317306 if (ib_dma_mapping_error (dev , * dma_addr )) {
318307 * dma_addr = 0 ;
319308 return - EFAULT ;
320309 }
321310 umem_odp -> npages ++ ;
322- * dma_addr |= access_mask ;
323311 return 0 ;
324312}
325313
@@ -355,9 +343,6 @@ int ib_umem_odp_map_dma_and_lock(struct ib_umem_odp *umem_odp, u64 user_virt,
355343 struct hmm_range range = {};
356344 unsigned long timeout ;
357345
358- if (access_mask == 0 )
359- return - EINVAL ;
360-
361346 if (user_virt < ib_umem_start (umem_odp ) ||
362347 user_virt + bcnt > ib_umem_end (umem_odp ))
363348 return - EFAULT ;
@@ -383,7 +368,7 @@ int ib_umem_odp_map_dma_and_lock(struct ib_umem_odp *umem_odp, u64 user_virt,
383368 if (fault ) {
384369 range .default_flags = HMM_PFN_REQ_FAULT ;
385370
386- if (access_mask & ODP_WRITE_ALLOWED_BIT )
371+ if (access_mask & HMM_PFN_WRITE )
387372 range .default_flags |= HMM_PFN_REQ_WRITE ;
388373 }
389374
@@ -415,22 +400,17 @@ int ib_umem_odp_map_dma_and_lock(struct ib_umem_odp *umem_odp, u64 user_virt,
415400 for (pfn_index = 0 ; pfn_index < num_pfns ;
416401 pfn_index += 1 << (page_shift - PAGE_SHIFT ), dma_index ++ ) {
417402
418- if (fault ) {
419- /*
420- * Since we asked for hmm_range_fault() to populate
421- * pages it shouldn't return an error entry on success.
422- */
423- WARN_ON (range .hmm_pfns [pfn_index ] & HMM_PFN_ERROR );
424- WARN_ON (!(range .hmm_pfns [pfn_index ] & HMM_PFN_VALID ));
425- } else {
426- if (!(range .hmm_pfns [pfn_index ] & HMM_PFN_VALID )) {
427- WARN_ON (umem_odp -> dma_list [dma_index ]);
428- continue ;
429- }
430- access_mask = ODP_READ_ALLOWED_BIT ;
431- if (range .hmm_pfns [pfn_index ] & HMM_PFN_WRITE )
432- access_mask |= ODP_WRITE_ALLOWED_BIT ;
433- }
403+ /*
404+ * Since we asked for hmm_range_fault() to populate
405+ * pages it shouldn't return an error entry on success.
406+ */
407+ WARN_ON (fault && range .hmm_pfns [pfn_index ] & HMM_PFN_ERROR );
408+ WARN_ON (fault && !(range .hmm_pfns [pfn_index ] & HMM_PFN_VALID ));
409+ if (!(range .hmm_pfns [pfn_index ] & HMM_PFN_VALID ))
410+ continue ;
411+
412+ if (range .hmm_pfns [pfn_index ] & HMM_PFN_DMA_MAPPED )
413+ continue ;
434414
435415 hmm_order = hmm_pfn_to_map_order (range .hmm_pfns [pfn_index ]);
436416 /* If a hugepage was detected and ODP wasn't set for, the umem
@@ -445,13 +425,14 @@ int ib_umem_odp_map_dma_and_lock(struct ib_umem_odp *umem_odp, u64 user_virt,
445425 }
446426
447427 ret = ib_umem_odp_map_dma_single_page (
448- umem_odp , dma_index , hmm_pfn_to_page ( range . hmm_pfns [ pfn_index ]) ,
449- access_mask );
428+ umem_odp , dma_index ,
429+ hmm_pfn_to_page ( range . hmm_pfns [ pfn_index ]) );
450430 if (ret < 0 ) {
451431 ibdev_dbg (umem_odp -> umem .ibdev ,
452432 "ib_umem_odp_map_dma_single_page failed with error %d\n" , ret );
453433 break ;
454434 }
435+ range .hmm_pfns [pfn_index ] |= HMM_PFN_DMA_MAPPED ;
455436 }
456437 /* upon success lock should stay on hold for the callee */
457438 if (!ret )
@@ -471,7 +452,6 @@ EXPORT_SYMBOL(ib_umem_odp_map_dma_and_lock);
471452void ib_umem_odp_unmap_dma_pages (struct ib_umem_odp * umem_odp , u64 virt ,
472453 u64 bound )
473454{
474- dma_addr_t dma_addr ;
475455 dma_addr_t dma ;
476456 int idx ;
477457 u64 addr ;
@@ -482,34 +462,37 @@ void ib_umem_odp_unmap_dma_pages(struct ib_umem_odp *umem_odp, u64 virt,
482462 virt = max_t (u64 , virt , ib_umem_start (umem_odp ));
483463 bound = min_t (u64 , bound , ib_umem_end (umem_odp ));
484464 for (addr = virt ; addr < bound ; addr += BIT (umem_odp -> page_shift )) {
465+ unsigned long pfn_idx = (addr - ib_umem_start (umem_odp )) >>
466+ PAGE_SHIFT ;
467+ struct page * page =
468+ hmm_pfn_to_page (umem_odp -> pfn_list [pfn_idx ]);
469+
485470 idx = (addr - ib_umem_start (umem_odp )) >> umem_odp -> page_shift ;
486471 dma = umem_odp -> dma_list [idx ];
487472
488- /* The access flags guaranteed a valid DMA address in case was NULL */
489- if (dma ) {
490- unsigned long pfn_idx = (addr - ib_umem_start (umem_odp )) >> PAGE_SHIFT ;
491- struct page * page = hmm_pfn_to_page (umem_odp -> pfn_list [pfn_idx ]);
492-
493- dma_addr = dma & ODP_DMA_ADDR_MASK ;
494- ib_dma_unmap_page (dev , dma_addr ,
495- BIT (umem_odp -> page_shift ),
496- DMA_BIDIRECTIONAL );
497- if (dma & ODP_WRITE_ALLOWED_BIT ) {
498- struct page * head_page = compound_head (page );
499- /*
500- * set_page_dirty prefers being called with
501- * the page lock. However, MMU notifiers are
502- * called sometimes with and sometimes without
503- * the lock. We rely on the umem_mutex instead
504- * to prevent other mmu notifiers from
505- * continuing and allowing the page mapping to
506- * be removed.
507- */
508- set_page_dirty (head_page );
509- }
510- umem_odp -> dma_list [idx ] = 0 ;
511- umem_odp -> npages -- ;
473+ if (!(umem_odp -> pfn_list [pfn_idx ] & HMM_PFN_VALID ))
474+ goto clear ;
475+ if (!(umem_odp -> pfn_list [pfn_idx ] & HMM_PFN_DMA_MAPPED ))
476+ goto clear ;
477+
478+ ib_dma_unmap_page (dev , dma , BIT (umem_odp -> page_shift ),
479+ DMA_BIDIRECTIONAL );
480+ if (umem_odp -> pfn_list [pfn_idx ] & HMM_PFN_WRITE ) {
481+ struct page * head_page = compound_head (page );
482+ /*
483+ * set_page_dirty prefers being called with
484+ * the page lock. However, MMU notifiers are
485+ * called sometimes with and sometimes without
486+ * the lock. We rely on the umem_mutex instead
487+ * to prevent other mmu notifiers from
488+ * continuing and allowing the page mapping to
489+ * be removed.
490+ */
491+ set_page_dirty (head_page );
512492 }
493+ umem_odp -> npages -- ;
494+ clear :
495+ umem_odp -> pfn_list [pfn_idx ] &= ~HMM_PFN_FLAGS ;
513496 }
514497}
515498EXPORT_SYMBOL (ib_umem_odp_unmap_dma_pages );
0 commit comments