@@ -150,19 +150,17 @@ static inline void b44_sync_dma_desc_for_device(struct ssb_device *sdev,
150150 unsigned long offset ,
151151 enum dma_data_direction dir )
152152{
153- ssb_dma_sync_single_range_for_device (sdev , dma_base ,
154- offset & dma_desc_align_mask ,
155- dma_desc_sync_size , dir );
153+ dma_sync_single_for_device (sdev -> dma_dev , dma_base + offset ,
154+ dma_desc_sync_size , dir );
156155}
157156
158157static inline void b44_sync_dma_desc_for_cpu (struct ssb_device * sdev ,
159158 dma_addr_t dma_base ,
160159 unsigned long offset ,
161160 enum dma_data_direction dir )
162161{
163- ssb_dma_sync_single_range_for_cpu (sdev , dma_base ,
164- offset & dma_desc_align_mask ,
165- dma_desc_sync_size , dir );
162+ dma_sync_single_for_cpu (sdev -> dma_dev , dma_base + offset ,
163+ dma_desc_sync_size , dir );
166164}
167165
168166static inline unsigned long br32 (const struct b44 * bp , unsigned long reg )
@@ -608,10 +606,10 @@ static void b44_tx(struct b44 *bp)
608606
609607 BUG_ON (skb == NULL );
610608
611- ssb_dma_unmap_single (bp -> sdev ,
612- rp -> mapping ,
613- skb -> len ,
614- DMA_TO_DEVICE );
609+ dma_unmap_single (bp -> sdev -> dma_dev ,
610+ rp -> mapping ,
611+ skb -> len ,
612+ DMA_TO_DEVICE );
615613 rp -> skb = NULL ;
616614 dev_kfree_skb_irq (skb );
617615 }
@@ -648,29 +646,29 @@ static int b44_alloc_rx_skb(struct b44 *bp, int src_idx, u32 dest_idx_unmasked)
648646 if (skb == NULL )
649647 return - ENOMEM ;
650648
651- mapping = ssb_dma_map_single (bp -> sdev , skb -> data ,
652- RX_PKT_BUF_SZ ,
653- DMA_FROM_DEVICE );
649+ mapping = dma_map_single (bp -> sdev -> dma_dev , skb -> data ,
650+ RX_PKT_BUF_SZ ,
651+ DMA_FROM_DEVICE );
654652
655653 /* Hardware bug work-around, the chip is unable to do PCI DMA
656654 to/from anything above 1GB :-( */
657- if (ssb_dma_mapping_error (bp -> sdev , mapping ) ||
655+ if (dma_mapping_error (bp -> sdev -> dma_dev , mapping ) ||
658656 mapping + RX_PKT_BUF_SZ > DMA_BIT_MASK (30 )) {
659657 /* Sigh... */
660- if (!ssb_dma_mapping_error (bp -> sdev , mapping ))
661- ssb_dma_unmap_single (bp -> sdev , mapping ,
658+ if (!dma_mapping_error (bp -> sdev -> dma_dev , mapping ))
659+ dma_unmap_single (bp -> sdev -> dma_dev , mapping ,
662660 RX_PKT_BUF_SZ , DMA_FROM_DEVICE );
663661 dev_kfree_skb_any (skb );
664662 skb = __netdev_alloc_skb (bp -> dev , RX_PKT_BUF_SZ , GFP_ATOMIC |GFP_DMA );
665663 if (skb == NULL )
666664 return - ENOMEM ;
667- mapping = ssb_dma_map_single (bp -> sdev , skb -> data ,
668- RX_PKT_BUF_SZ ,
669- DMA_FROM_DEVICE );
670- if (ssb_dma_mapping_error (bp -> sdev , mapping ) ||
671- mapping + RX_PKT_BUF_SZ > DMA_BIT_MASK (30 )) {
672- if (!ssb_dma_mapping_error (bp -> sdev , mapping ))
673- ssb_dma_unmap_single (bp -> sdev , mapping , RX_PKT_BUF_SZ ,DMA_FROM_DEVICE );
665+ mapping = dma_map_single (bp -> sdev -> dma_dev , skb -> data ,
666+ RX_PKT_BUF_SZ ,
667+ DMA_FROM_DEVICE );
668+ if (dma_mapping_error (bp -> sdev -> dma_dev , mapping ) ||
669+ mapping + RX_PKT_BUF_SZ > DMA_BIT_MASK (30 )) {
670+ if (!dma_mapping_error (bp -> sdev -> dma_dev , mapping ))
671+ dma_unmap_single (bp -> sdev -> dma_dev , mapping , RX_PKT_BUF_SZ ,DMA_FROM_DEVICE );
674672 dev_kfree_skb_any (skb );
675673 return - ENOMEM ;
676674 }
@@ -745,9 +743,9 @@ static void b44_recycle_rx(struct b44 *bp, int src_idx, u32 dest_idx_unmasked)
745743 dest_idx * sizeof (* dest_desc ),
746744 DMA_BIDIRECTIONAL );
747745
748- ssb_dma_sync_single_for_device (bp -> sdev , dest_map -> mapping ,
749- RX_PKT_BUF_SZ ,
750- DMA_FROM_DEVICE );
746+ dma_sync_single_for_device (bp -> sdev -> dma_dev , dest_map -> mapping ,
747+ RX_PKT_BUF_SZ ,
748+ DMA_FROM_DEVICE );
751749}
752750
753751static int b44_rx (struct b44 * bp , int budget )
@@ -767,9 +765,9 @@ static int b44_rx(struct b44 *bp, int budget)
767765 struct rx_header * rh ;
768766 u16 len ;
769767
770- ssb_dma_sync_single_for_cpu (bp -> sdev , map ,
771- RX_PKT_BUF_SZ ,
772- DMA_FROM_DEVICE );
768+ dma_sync_single_for_cpu (bp -> sdev -> dma_dev , map ,
769+ RX_PKT_BUF_SZ ,
770+ DMA_FROM_DEVICE );
773771 rh = (struct rx_header * ) skb -> data ;
774772 len = le16_to_cpu (rh -> len );
775773 if ((len > (RX_PKT_BUF_SZ - RX_PKT_OFFSET )) ||
@@ -801,8 +799,8 @@ static int b44_rx(struct b44 *bp, int budget)
801799 skb_size = b44_alloc_rx_skb (bp , cons , bp -> rx_prod );
802800 if (skb_size < 0 )
803801 goto drop_it ;
804- ssb_dma_unmap_single (bp -> sdev , map ,
805- skb_size , DMA_FROM_DEVICE );
802+ dma_unmap_single (bp -> sdev -> dma_dev , map ,
803+ skb_size , DMA_FROM_DEVICE );
806804 /* Leave out rx_header */
807805 skb_put (skb , len + RX_PKT_OFFSET );
808806 skb_pull (skb , RX_PKT_OFFSET );
@@ -954,24 +952,24 @@ static netdev_tx_t b44_start_xmit(struct sk_buff *skb, struct net_device *dev)
954952 goto err_out ;
955953 }
956954
957- mapping = ssb_dma_map_single (bp -> sdev , skb -> data , len , DMA_TO_DEVICE );
958- if (ssb_dma_mapping_error (bp -> sdev , mapping ) || mapping + len > DMA_BIT_MASK (30 )) {
955+ mapping = dma_map_single (bp -> sdev -> dma_dev , skb -> data , len , DMA_TO_DEVICE );
956+ if (dma_mapping_error (bp -> sdev -> dma_dev , mapping ) || mapping + len > DMA_BIT_MASK (30 )) {
959957 struct sk_buff * bounce_skb ;
960958
961959 /* Chip can't handle DMA to/from >1GB, use bounce buffer */
962- if (!ssb_dma_mapping_error (bp -> sdev , mapping ))
963- ssb_dma_unmap_single (bp -> sdev , mapping , len ,
960+ if (!dma_mapping_error (bp -> sdev -> dma_dev , mapping ))
961+ dma_unmap_single (bp -> sdev -> dma_dev , mapping , len ,
964962 DMA_TO_DEVICE );
965963
966964 bounce_skb = __netdev_alloc_skb (dev , len , GFP_ATOMIC | GFP_DMA );
967965 if (!bounce_skb )
968966 goto err_out ;
969967
970- mapping = ssb_dma_map_single (bp -> sdev , bounce_skb -> data ,
971- len , DMA_TO_DEVICE );
972- if (ssb_dma_mapping_error (bp -> sdev , mapping ) || mapping + len > DMA_BIT_MASK (30 )) {
973- if (!ssb_dma_mapping_error (bp -> sdev , mapping ))
974- ssb_dma_unmap_single (bp -> sdev , mapping ,
968+ mapping = dma_map_single (bp -> sdev -> dma_dev , bounce_skb -> data ,
969+ len , DMA_TO_DEVICE );
970+ if (dma_mapping_error (bp -> sdev -> dma_dev , mapping ) || mapping + len > DMA_BIT_MASK (30 )) {
971+ if (!dma_mapping_error (bp -> sdev -> dma_dev , mapping ))
972+ dma_unmap_single (bp -> sdev -> dma_dev , mapping ,
975973 len , DMA_TO_DEVICE );
976974 dev_kfree_skb_any (bounce_skb );
977975 goto err_out ;
@@ -1068,8 +1066,8 @@ static void b44_free_rings(struct b44 *bp)
10681066
10691067 if (rp -> skb == NULL )
10701068 continue ;
1071- ssb_dma_unmap_single (bp -> sdev , rp -> mapping , RX_PKT_BUF_SZ ,
1072- DMA_FROM_DEVICE );
1069+ dma_unmap_single (bp -> sdev -> dma_dev , rp -> mapping , RX_PKT_BUF_SZ ,
1070+ DMA_FROM_DEVICE );
10731071 dev_kfree_skb_any (rp -> skb );
10741072 rp -> skb = NULL ;
10751073 }
@@ -1080,8 +1078,8 @@ static void b44_free_rings(struct b44 *bp)
10801078
10811079 if (rp -> skb == NULL )
10821080 continue ;
1083- ssb_dma_unmap_single (bp -> sdev , rp -> mapping , rp -> skb -> len ,
1084- DMA_TO_DEVICE );
1081+ dma_unmap_single (bp -> sdev -> dma_dev , rp -> mapping , rp -> skb -> len ,
1082+ DMA_TO_DEVICE );
10851083 dev_kfree_skb_any (rp -> skb );
10861084 rp -> skb = NULL ;
10871085 }
@@ -1103,14 +1101,12 @@ static void b44_init_rings(struct b44 *bp)
11031101 memset (bp -> tx_ring , 0 , B44_TX_RING_BYTES );
11041102
11051103 if (bp -> flags & B44_FLAG_RX_RING_HACK )
1106- ssb_dma_sync_single_for_device (bp -> sdev , bp -> rx_ring_dma ,
1107- DMA_TABLE_BYTES ,
1108- DMA_BIDIRECTIONAL );
1104+ dma_sync_single_for_device (bp -> sdev -> dma_dev , bp -> rx_ring_dma ,
1105+ DMA_TABLE_BYTES , DMA_BIDIRECTIONAL );
11091106
11101107 if (bp -> flags & B44_FLAG_TX_RING_HACK )
1111- ssb_dma_sync_single_for_device (bp -> sdev , bp -> tx_ring_dma ,
1112- DMA_TABLE_BYTES ,
1113- DMA_TO_DEVICE );
1108+ dma_sync_single_for_device (bp -> sdev -> dma_dev , bp -> tx_ring_dma ,
1109+ DMA_TABLE_BYTES , DMA_TO_DEVICE );
11141110
11151111 for (i = 0 ; i < bp -> rx_pending ; i ++ ) {
11161112 if (b44_alloc_rx_skb (bp , -1 , i ) < 0 )
@@ -1130,27 +1126,23 @@ static void b44_free_consistent(struct b44 *bp)
11301126 bp -> tx_buffers = NULL ;
11311127 if (bp -> rx_ring ) {
11321128 if (bp -> flags & B44_FLAG_RX_RING_HACK ) {
1133- ssb_dma_unmap_single (bp -> sdev , bp -> rx_ring_dma ,
1134- DMA_TABLE_BYTES ,
1135- DMA_BIDIRECTIONAL );
1129+ dma_unmap_single (bp -> sdev -> dma_dev , bp -> rx_ring_dma ,
1130+ DMA_TABLE_BYTES , DMA_BIDIRECTIONAL );
11361131 kfree (bp -> rx_ring );
11371132 } else
1138- ssb_dma_free_consistent (bp -> sdev , DMA_TABLE_BYTES ,
1139- bp -> rx_ring , bp -> rx_ring_dma ,
1140- GFP_KERNEL );
1133+ dma_free_coherent (bp -> sdev -> dma_dev , DMA_TABLE_BYTES ,
1134+ bp -> rx_ring , bp -> rx_ring_dma );
11411135 bp -> rx_ring = NULL ;
11421136 bp -> flags &= ~B44_FLAG_RX_RING_HACK ;
11431137 }
11441138 if (bp -> tx_ring ) {
11451139 if (bp -> flags & B44_FLAG_TX_RING_HACK ) {
1146- ssb_dma_unmap_single (bp -> sdev , bp -> tx_ring_dma ,
1147- DMA_TABLE_BYTES ,
1148- DMA_TO_DEVICE );
1140+ dma_unmap_single (bp -> sdev -> dma_dev , bp -> tx_ring_dma ,
1141+ DMA_TABLE_BYTES , DMA_TO_DEVICE );
11491142 kfree (bp -> tx_ring );
11501143 } else
1151- ssb_dma_free_consistent (bp -> sdev , DMA_TABLE_BYTES ,
1152- bp -> tx_ring , bp -> tx_ring_dma ,
1153- GFP_KERNEL );
1144+ dma_free_coherent (bp -> sdev -> dma_dev , DMA_TABLE_BYTES ,
1145+ bp -> tx_ring , bp -> tx_ring_dma );
11541146 bp -> tx_ring = NULL ;
11551147 bp -> flags &= ~B44_FLAG_TX_RING_HACK ;
11561148 }
@@ -1175,7 +1167,8 @@ static int b44_alloc_consistent(struct b44 *bp, gfp_t gfp)
11751167 goto out_err ;
11761168
11771169 size = DMA_TABLE_BYTES ;
1178- bp -> rx_ring = ssb_dma_alloc_consistent (bp -> sdev , size , & bp -> rx_ring_dma , gfp );
1170+ bp -> rx_ring = dma_alloc_coherent (bp -> sdev -> dma_dev , size ,
1171+ & bp -> rx_ring_dma , gfp );
11791172 if (!bp -> rx_ring ) {
11801173 /* Allocation may have failed due to pci_alloc_consistent
11811174 insisting on use of GFP_DMA, which is more restrictive
@@ -1187,11 +1180,11 @@ static int b44_alloc_consistent(struct b44 *bp, gfp_t gfp)
11871180 if (!rx_ring )
11881181 goto out_err ;
11891182
1190- rx_ring_dma = ssb_dma_map_single (bp -> sdev , rx_ring ,
1191- DMA_TABLE_BYTES ,
1192- DMA_BIDIRECTIONAL );
1183+ rx_ring_dma = dma_map_single (bp -> sdev -> dma_dev , rx_ring ,
1184+ DMA_TABLE_BYTES ,
1185+ DMA_BIDIRECTIONAL );
11931186
1194- if (ssb_dma_mapping_error (bp -> sdev , rx_ring_dma ) ||
1187+ if (dma_mapping_error (bp -> sdev -> dma_dev , rx_ring_dma ) ||
11951188 rx_ring_dma + size > DMA_BIT_MASK (30 )) {
11961189 kfree (rx_ring );
11971190 goto out_err ;
@@ -1202,7 +1195,8 @@ static int b44_alloc_consistent(struct b44 *bp, gfp_t gfp)
12021195 bp -> flags |= B44_FLAG_RX_RING_HACK ;
12031196 }
12041197
1205- bp -> tx_ring = ssb_dma_alloc_consistent (bp -> sdev , size , & bp -> tx_ring_dma , gfp );
1198+ bp -> tx_ring = dma_alloc_coherent (bp -> sdev -> dma_dev , size ,
1199+ & bp -> tx_ring_dma , gfp );
12061200 if (!bp -> tx_ring ) {
12071201 /* Allocation may have failed due to ssb_dma_alloc_consistent
12081202 insisting on use of GFP_DMA, which is more restrictive
@@ -1214,11 +1208,11 @@ static int b44_alloc_consistent(struct b44 *bp, gfp_t gfp)
12141208 if (!tx_ring )
12151209 goto out_err ;
12161210
1217- tx_ring_dma = ssb_dma_map_single (bp -> sdev , tx_ring ,
1218- DMA_TABLE_BYTES ,
1219- DMA_TO_DEVICE );
1211+ tx_ring_dma = dma_map_single (bp -> sdev -> dma_dev , tx_ring ,
1212+ DMA_TABLE_BYTES ,
1213+ DMA_TO_DEVICE );
12201214
1221- if (ssb_dma_mapping_error (bp -> sdev , tx_ring_dma ) ||
1215+ if (dma_mapping_error (bp -> sdev -> dma_dev , tx_ring_dma ) ||
12221216 tx_ring_dma + size > DMA_BIT_MASK (30 )) {
12231217 kfree (tx_ring );
12241218 goto out_err ;
@@ -2176,12 +2170,14 @@ static int __devinit b44_init_one(struct ssb_device *sdev,
21762170 "Failed to powerup the bus\n" );
21772171 goto err_out_free_dev ;
21782172 }
2179- err = ssb_dma_set_mask (sdev , DMA_BIT_MASK (30 ));
2180- if (err ) {
2173+
2174+ if (dma_set_mask (sdev -> dma_dev , DMA_BIT_MASK (30 )) ||
2175+ dma_set_coherent_mask (sdev -> dma_dev , DMA_BIT_MASK (30 ))) {
21812176 dev_err (sdev -> dev ,
21822177 "Required 30BIT DMA mask unsupported by the system\n" );
21832178 goto err_out_powerdown ;
21842179 }
2180+
21852181 err = b44_get_invariants (bp );
21862182 if (err ) {
21872183 dev_err (sdev -> dev ,
0 commit comments