@@ -1471,11 +1471,14 @@ static void iommu_flush_dev_iotlb(struct dmar_domain *domain,
1471
1471
spin_unlock_irqrestore (& device_domain_lock , flags );
1472
1472
}
1473
1473
1474
- static void iommu_flush_iotlb_psi (struct intel_iommu * iommu , u16 did ,
1475
- unsigned long pfn , unsigned int pages , int ih , int map )
1474
+ static void iommu_flush_iotlb_psi (struct intel_iommu * iommu ,
1475
+ struct dmar_domain * domain ,
1476
+ unsigned long pfn , unsigned int pages ,
1477
+ int ih , int map )
1476
1478
{
1477
1479
unsigned int mask = ilog2 (__roundup_pow_of_two (pages ));
1478
1480
uint64_t addr = (uint64_t )pfn << VTD_PAGE_SHIFT ;
1481
+ u16 did = domain -> iommu_did [iommu -> seq_id ];
1479
1482
1480
1483
BUG_ON (pages == 0 );
1481
1484
@@ -3422,7 +3425,9 @@ static dma_addr_t __intel_map_single(struct device *dev, phys_addr_t paddr,
3422
3425
3423
3426
/* it's a non-present to present mapping. Only flush if caching mode */
3424
3427
if (cap_caching_mode (iommu -> cap ))
3425
- iommu_flush_iotlb_psi (iommu , domain -> id , mm_to_dma_pfn (iova -> pfn_lo ), size , 0 , 1 );
3428
+ iommu_flush_iotlb_psi (iommu , domain ,
3429
+ mm_to_dma_pfn (iova -> pfn_lo ),
3430
+ size , 0 , 1 );
3426
3431
else
3427
3432
iommu_flush_write_buffer (iommu );
3428
3433
@@ -3473,7 +3478,7 @@ static void flush_unmaps(void)
3473
3478
3474
3479
/* On real hardware multiple invalidations are expensive */
3475
3480
if (cap_caching_mode (iommu -> cap ))
3476
- iommu_flush_iotlb_psi (iommu , domain -> id ,
3481
+ iommu_flush_iotlb_psi (iommu , domain ,
3477
3482
iova -> pfn_lo , iova_size (iova ),
3478
3483
!deferred_flush [i ].freelist [j ], 0 );
3479
3484
else {
@@ -3557,7 +3562,7 @@ static void intel_unmap(struct device *dev, dma_addr_t dev_addr)
3557
3562
freelist = domain_unmap (domain , start_pfn , last_pfn );
3558
3563
3559
3564
if (intel_iommu_strict ) {
3560
- iommu_flush_iotlb_psi (iommu , domain -> id , start_pfn ,
3565
+ iommu_flush_iotlb_psi (iommu , domain , start_pfn ,
3561
3566
last_pfn - start_pfn + 1 , !freelist , 0 );
3562
3567
/* free iova */
3563
3568
__free_iova (& domain -> iovad , iova );
@@ -3715,7 +3720,7 @@ static int intel_map_sg(struct device *dev, struct scatterlist *sglist, int nele
3715
3720
3716
3721
/* it's a non-present to present mapping. Only flush if caching mode */
3717
3722
if (cap_caching_mode (iommu -> cap ))
3718
- iommu_flush_iotlb_psi (iommu , domain -> id , start_vpfn , size , 0 , 1 );
3723
+ iommu_flush_iotlb_psi (iommu , domain , start_vpfn , size , 0 , 1 );
3719
3724
else
3720
3725
iommu_flush_write_buffer (iommu );
3721
3726
@@ -4421,7 +4426,7 @@ static int intel_iommu_memory_notifier(struct notifier_block *nb,
4421
4426
4422
4427
rcu_read_lock ();
4423
4428
for_each_active_iommu (iommu , drhd )
4424
- iommu_flush_iotlb_psi (iommu , si_domain -> id ,
4429
+ iommu_flush_iotlb_psi (iommu , si_domain ,
4425
4430
iova -> pfn_lo , iova_size (iova ),
4426
4431
!freelist , 0 );
4427
4432
rcu_read_unlock ();
@@ -4872,17 +4877,18 @@ static size_t intel_iommu_unmap(struct iommu_domain *domain,
4872
4877
npages = last_pfn - start_pfn + 1 ;
4873
4878
4874
4879
for_each_set_bit (iommu_id , dmar_domain -> iommu_bmp , g_num_of_iommus ) {
4875
- iommu = g_iommus [iommu_id ];
4876
-
4877
- /*
4878
- * find bit position of dmar_domain
4879
- */
4880
- ndomains = cap_ndoms (iommu -> cap );
4881
- for_each_set_bit (num , iommu -> domain_ids , ndomains ) {
4882
- if (get_iommu_domain (iommu , num ) == dmar_domain )
4883
- iommu_flush_iotlb_psi (iommu , num , start_pfn ,
4884
- npages , !freelist , 0 );
4885
- }
4880
+ iommu = g_iommus [iommu_id ];
4881
+
4882
+ /*
4883
+ * find bit position of dmar_domain
4884
+ */
4885
+ ndomains = cap_ndoms (iommu -> cap );
4886
+ for_each_set_bit (num , iommu -> domain_ids , ndomains ) {
4887
+ if (get_iommu_domain (iommu , num ) == dmar_domain )
4888
+ iommu_flush_iotlb_psi (iommu , dmar_domain ,
4889
+ start_pfn , npages ,
4890
+ !freelist , 0 );
4891
+ }
4886
4892
4887
4893
}
4888
4894
0 commit comments