17
17
#include <linux/gfp.h>
18
18
#include <linux/huge_mm.h>
19
19
#include <linux/iommu.h>
20
+ #include <linux/iommu-dma.h>
20
21
#include <linux/iova.h>
21
22
#include <linux/irq.h>
22
23
#include <linux/list_sort.h>
@@ -1037,9 +1038,8 @@ static void *iommu_dma_alloc_remap(struct device *dev, size_t size,
1037
1038
return NULL ;
1038
1039
}
1039
1040
1040
- static struct sg_table * iommu_dma_alloc_noncontiguous (struct device * dev ,
1041
- size_t size , enum dma_data_direction dir , gfp_t gfp ,
1042
- unsigned long attrs )
1041
+ struct sg_table * iommu_dma_alloc_noncontiguous (struct device * dev , size_t size ,
1042
+ enum dma_data_direction dir , gfp_t gfp , unsigned long attrs )
1043
1043
{
1044
1044
struct dma_sgt_handle * sh ;
1045
1045
@@ -1055,7 +1055,7 @@ static struct sg_table *iommu_dma_alloc_noncontiguous(struct device *dev,
1055
1055
return & sh -> sgt ;
1056
1056
}
1057
1057
1058
- static void iommu_dma_free_noncontiguous (struct device * dev , size_t size ,
1058
+ void iommu_dma_free_noncontiguous (struct device * dev , size_t size ,
1059
1059
struct sg_table * sgt , enum dma_data_direction dir )
1060
1060
{
1061
1061
struct dma_sgt_handle * sh = sgt_handle (sgt );
@@ -1066,8 +1066,8 @@ static void iommu_dma_free_noncontiguous(struct device *dev, size_t size,
1066
1066
kfree (sh );
1067
1067
}
1068
1068
1069
- static void iommu_dma_sync_single_for_cpu (struct device * dev ,
1070
- dma_addr_t dma_handle , size_t size , enum dma_data_direction dir )
1069
+ void iommu_dma_sync_single_for_cpu (struct device * dev , dma_addr_t dma_handle ,
1070
+ size_t size , enum dma_data_direction dir )
1071
1071
{
1072
1072
phys_addr_t phys ;
1073
1073
@@ -1081,8 +1081,8 @@ static void iommu_dma_sync_single_for_cpu(struct device *dev,
1081
1081
swiotlb_sync_single_for_cpu (dev , phys , size , dir );
1082
1082
}
1083
1083
1084
- static void iommu_dma_sync_single_for_device (struct device * dev ,
1085
- dma_addr_t dma_handle , size_t size , enum dma_data_direction dir )
1084
+ void iommu_dma_sync_single_for_device (struct device * dev , dma_addr_t dma_handle ,
1085
+ size_t size , enum dma_data_direction dir )
1086
1086
{
1087
1087
phys_addr_t phys ;
1088
1088
@@ -1096,9 +1096,8 @@ static void iommu_dma_sync_single_for_device(struct device *dev,
1096
1096
arch_sync_dma_for_device (phys , size , dir );
1097
1097
}
1098
1098
1099
- static void iommu_dma_sync_sg_for_cpu (struct device * dev ,
1100
- struct scatterlist * sgl , int nelems ,
1101
- enum dma_data_direction dir )
1099
+ void iommu_dma_sync_sg_for_cpu (struct device * dev , struct scatterlist * sgl ,
1100
+ int nelems , enum dma_data_direction dir )
1102
1101
{
1103
1102
struct scatterlist * sg ;
1104
1103
int i ;
@@ -1112,9 +1111,8 @@ static void iommu_dma_sync_sg_for_cpu(struct device *dev,
1112
1111
arch_sync_dma_for_cpu (sg_phys (sg ), sg -> length , dir );
1113
1112
}
1114
1113
1115
- static void iommu_dma_sync_sg_for_device (struct device * dev ,
1116
- struct scatterlist * sgl , int nelems ,
1117
- enum dma_data_direction dir )
1114
+ void iommu_dma_sync_sg_for_device (struct device * dev , struct scatterlist * sgl ,
1115
+ int nelems , enum dma_data_direction dir )
1118
1116
{
1119
1117
struct scatterlist * sg ;
1120
1118
int i ;
@@ -1129,9 +1127,9 @@ static void iommu_dma_sync_sg_for_device(struct device *dev,
1129
1127
arch_sync_dma_for_device (sg_phys (sg ), sg -> length , dir );
1130
1128
}
1131
1129
1132
- static dma_addr_t iommu_dma_map_page (struct device * dev , struct page * page ,
1133
- unsigned long offset , size_t size , enum dma_data_direction dir ,
1134
- unsigned long attrs )
1130
+ dma_addr_t iommu_dma_map_page (struct device * dev , struct page * page ,
1131
+ unsigned long offset , size_t size , enum dma_data_direction dir ,
1132
+ unsigned long attrs )
1135
1133
{
1136
1134
phys_addr_t phys = page_to_phys (page ) + offset ;
1137
1135
bool coherent = dev_is_dma_coherent (dev );
@@ -1189,7 +1187,7 @@ static dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page,
1189
1187
return iova ;
1190
1188
}
1191
1189
1192
- static void iommu_dma_unmap_page (struct device * dev , dma_addr_t dma_handle ,
1190
+ void iommu_dma_unmap_page (struct device * dev , dma_addr_t dma_handle ,
1193
1191
size_t size , enum dma_data_direction dir , unsigned long attrs )
1194
1192
{
1195
1193
struct iommu_domain * domain = iommu_get_dma_domain (dev );
@@ -1342,8 +1340,8 @@ static int iommu_dma_map_sg_swiotlb(struct device *dev, struct scatterlist *sg,
1342
1340
* impedance-matching, to be able to hand off a suitably-aligned list,
1343
1341
* but still preserve the original offsets and sizes for the caller.
1344
1342
*/
1345
- static int iommu_dma_map_sg (struct device * dev , struct scatterlist * sg ,
1346
- int nents , enum dma_data_direction dir , unsigned long attrs )
1343
+ int iommu_dma_map_sg (struct device * dev , struct scatterlist * sg , int nents ,
1344
+ enum dma_data_direction dir , unsigned long attrs )
1347
1345
{
1348
1346
struct iommu_domain * domain = iommu_get_dma_domain (dev );
1349
1347
struct iommu_dma_cookie * cookie = domain -> iova_cookie ;
@@ -1462,8 +1460,8 @@ static int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg,
1462
1460
return ret ;
1463
1461
}
1464
1462
1465
- static void iommu_dma_unmap_sg (struct device * dev , struct scatterlist * sg ,
1466
- int nents , enum dma_data_direction dir , unsigned long attrs )
1463
+ void iommu_dma_unmap_sg (struct device * dev , struct scatterlist * sg , int nents ,
1464
+ enum dma_data_direction dir , unsigned long attrs )
1467
1465
{
1468
1466
dma_addr_t end = 0 , start ;
1469
1467
struct scatterlist * tmp ;
@@ -1512,15 +1510,15 @@ static void iommu_dma_unmap_sg(struct device *dev, struct scatterlist *sg,
1512
1510
__iommu_dma_unmap (dev , start , end - start );
1513
1511
}
1514
1512
1515
- static dma_addr_t iommu_dma_map_resource (struct device * dev , phys_addr_t phys ,
1513
+ dma_addr_t iommu_dma_map_resource (struct device * dev , phys_addr_t phys ,
1516
1514
size_t size , enum dma_data_direction dir , unsigned long attrs )
1517
1515
{
1518
1516
return __iommu_dma_map (dev , phys , size ,
1519
1517
dma_info_to_prot (dir , false, attrs ) | IOMMU_MMIO ,
1520
1518
dma_get_mask (dev ));
1521
1519
}
1522
1520
1523
- static void iommu_dma_unmap_resource (struct device * dev , dma_addr_t handle ,
1521
+ void iommu_dma_unmap_resource (struct device * dev , dma_addr_t handle ,
1524
1522
size_t size , enum dma_data_direction dir , unsigned long attrs )
1525
1523
{
1526
1524
__iommu_dma_unmap (dev , handle , size );
@@ -1557,7 +1555,7 @@ static void __iommu_dma_free(struct device *dev, size_t size, void *cpu_addr)
1557
1555
dma_free_contiguous (dev , page , alloc_size );
1558
1556
}
1559
1557
1560
- static void iommu_dma_free (struct device * dev , size_t size , void * cpu_addr ,
1558
+ void iommu_dma_free (struct device * dev , size_t size , void * cpu_addr ,
1561
1559
dma_addr_t handle , unsigned long attrs )
1562
1560
{
1563
1561
__iommu_dma_unmap (dev , handle , size );
@@ -1601,8 +1599,8 @@ static void *iommu_dma_alloc_pages(struct device *dev, size_t size,
1601
1599
return NULL ;
1602
1600
}
1603
1601
1604
- static void * iommu_dma_alloc (struct device * dev , size_t size ,
1605
- dma_addr_t * handle , gfp_t gfp , unsigned long attrs )
1602
+ void * iommu_dma_alloc (struct device * dev , size_t size , dma_addr_t * handle ,
1603
+ gfp_t gfp , unsigned long attrs )
1606
1604
{
1607
1605
bool coherent = dev_is_dma_coherent (dev );
1608
1606
int ioprot = dma_info_to_prot (DMA_BIDIRECTIONAL , coherent , attrs );
@@ -1635,7 +1633,7 @@ static void *iommu_dma_alloc(struct device *dev, size_t size,
1635
1633
return cpu_addr ;
1636
1634
}
1637
1635
1638
- static int iommu_dma_mmap (struct device * dev , struct vm_area_struct * vma ,
1636
+ int iommu_dma_mmap (struct device * dev , struct vm_area_struct * vma ,
1639
1637
void * cpu_addr , dma_addr_t dma_addr , size_t size ,
1640
1638
unsigned long attrs )
1641
1639
{
@@ -1666,7 +1664,7 @@ static int iommu_dma_mmap(struct device *dev, struct vm_area_struct *vma,
1666
1664
vma -> vm_page_prot );
1667
1665
}
1668
1666
1669
- static int iommu_dma_get_sgtable (struct device * dev , struct sg_table * sgt ,
1667
+ int iommu_dma_get_sgtable (struct device * dev , struct sg_table * sgt ,
1670
1668
void * cpu_addr , dma_addr_t dma_addr , size_t size ,
1671
1669
unsigned long attrs )
1672
1670
{
@@ -1693,72 +1691,42 @@ static int iommu_dma_get_sgtable(struct device *dev, struct sg_table *sgt,
1693
1691
return ret ;
1694
1692
}
1695
1693
1696
- static unsigned long iommu_dma_get_merge_boundary (struct device * dev )
1694
+ unsigned long iommu_dma_get_merge_boundary (struct device * dev )
1697
1695
{
1698
1696
struct iommu_domain * domain = iommu_get_dma_domain (dev );
1699
1697
1700
1698
return (1UL << __ffs (domain -> pgsize_bitmap )) - 1 ;
1701
1699
}
1702
1700
1703
- static size_t iommu_dma_opt_mapping_size (void )
1701
+ size_t iommu_dma_opt_mapping_size (void )
1704
1702
{
1705
1703
return iova_rcache_range ();
1706
1704
}
1707
1705
1708
- static size_t iommu_dma_max_mapping_size (struct device * dev )
1706
+ size_t iommu_dma_max_mapping_size (struct device * dev )
1709
1707
{
1710
1708
if (dev_is_untrusted (dev ))
1711
1709
return swiotlb_max_mapping_size (dev );
1712
1710
1713
1711
return SIZE_MAX ;
1714
1712
}
1715
1713
1716
- static const struct dma_map_ops iommu_dma_ops = {
1717
- .flags = DMA_F_PCI_P2PDMA_SUPPORTED |
1718
- DMA_F_CAN_SKIP_SYNC ,
1719
- .alloc = iommu_dma_alloc ,
1720
- .free = iommu_dma_free ,
1721
- .alloc_pages_op = dma_common_alloc_pages ,
1722
- .free_pages = dma_common_free_pages ,
1723
- .alloc_noncontiguous = iommu_dma_alloc_noncontiguous ,
1724
- .free_noncontiguous = iommu_dma_free_noncontiguous ,
1725
- .mmap = iommu_dma_mmap ,
1726
- .get_sgtable = iommu_dma_get_sgtable ,
1727
- .map_page = iommu_dma_map_page ,
1728
- .unmap_page = iommu_dma_unmap_page ,
1729
- .map_sg = iommu_dma_map_sg ,
1730
- .unmap_sg = iommu_dma_unmap_sg ,
1731
- .sync_single_for_cpu = iommu_dma_sync_single_for_cpu ,
1732
- .sync_single_for_device = iommu_dma_sync_single_for_device ,
1733
- .sync_sg_for_cpu = iommu_dma_sync_sg_for_cpu ,
1734
- .sync_sg_for_device = iommu_dma_sync_sg_for_device ,
1735
- .map_resource = iommu_dma_map_resource ,
1736
- .unmap_resource = iommu_dma_unmap_resource ,
1737
- .get_merge_boundary = iommu_dma_get_merge_boundary ,
1738
- .opt_mapping_size = iommu_dma_opt_mapping_size ,
1739
- .max_mapping_size = iommu_dma_max_mapping_size ,
1740
- };
1741
-
1742
1714
void iommu_setup_dma_ops (struct device * dev )
1743
1715
{
1744
1716
struct iommu_domain * domain = iommu_get_domain_for_dev (dev );
1745
1717
1746
1718
if (dev_is_pci (dev ))
1747
1719
dev -> iommu -> pci_32bit_workaround = !iommu_dma_forcedac ;
1748
1720
1749
- if (iommu_is_dma_domain (domain )) {
1750
- if (iommu_dma_init_domain (domain , dev ))
1751
- goto out_err ;
1752
- dev -> dma_ops = & iommu_dma_ops ;
1753
- } else if (dev -> dma_ops == & iommu_dma_ops ) {
1754
- /* Clean up if we've switched *from* a DMA domain */
1755
- dev -> dma_ops = NULL ;
1756
- }
1721
+ dev -> dma_iommu = iommu_is_dma_domain (domain );
1722
+ if (dev -> dma_iommu && iommu_dma_init_domain (domain , dev ))
1723
+ goto out_err ;
1757
1724
1758
1725
return ;
1759
1726
out_err :
1760
- pr_warn ("Failed to set up IOMMU for device %s; retaining platform DMA ops\n" ,
1761
- dev_name (dev ));
1727
+ pr_warn ("Failed to set up IOMMU for device %s; retaining platform DMA ops\n" ,
1728
+ dev_name (dev ));
1729
+ dev -> dma_iommu = false;
1762
1730
}
1763
1731
1764
1732
static struct iommu_dma_msi_page * iommu_dma_get_msi_page (struct device * dev ,
0 commit comments