@@ -789,24 +789,6 @@ static void ena_free_all_io_tx_resources(struct ena_adapter *adapter)
789789 adapter -> num_io_queues );
790790}
791791
792- static int validate_rx_req_id (struct ena_ring * rx_ring , u16 req_id )
793- {
794- if (likely (req_id < rx_ring -> ring_size ))
795- return 0 ;
796-
797- netif_err (rx_ring -> adapter , rx_err , rx_ring -> netdev ,
798- "Invalid rx req_id: %hu\n" , req_id );
799-
800- u64_stats_update_begin (& rx_ring -> syncp );
801- rx_ring -> rx_stats .bad_req_id ++ ;
802- u64_stats_update_end (& rx_ring -> syncp );
803-
804- /* Trigger device reset */
805- rx_ring -> adapter -> reset_reason = ENA_REGS_RESET_INV_RX_REQ_ID ;
806- set_bit (ENA_FLAG_TRIGGER_RESET , & rx_ring -> adapter -> flags );
807- return - EFAULT ;
808- }
809-
810792/* ena_setup_rx_resources - allocate I/O Rx resources (Descriptors)
811793 * @adapter: network interface device structure
812794 * @qid: queue index
@@ -926,10 +908,14 @@ static void ena_free_all_io_rx_resources(struct ena_adapter *adapter)
926908static int ena_alloc_rx_page (struct ena_ring * rx_ring ,
927909 struct ena_rx_buffer * rx_info , gfp_t gfp )
928910{
911+ int headroom = rx_ring -> rx_headroom ;
929912 struct ena_com_buf * ena_buf ;
930913 struct page * page ;
931914 dma_addr_t dma ;
932915
916+ /* restore page offset value in case it has been changed by device */
917+ rx_info -> page_offset = headroom ;
918+
933919 /* if previous allocated page is not used */
934920 if (unlikely (rx_info -> page ))
935921 return 0 ;
@@ -959,10 +945,9 @@ static int ena_alloc_rx_page(struct ena_ring *rx_ring,
959945 "Allocate page %p, rx_info %p\n" , page , rx_info );
960946
961947 rx_info -> page = page ;
962- rx_info -> page_offset = 0 ;
963948 ena_buf = & rx_info -> ena_buf ;
964- ena_buf -> paddr = dma + rx_ring -> rx_headroom ;
965- ena_buf -> len = ENA_PAGE_SIZE - rx_ring -> rx_headroom ;
949+ ena_buf -> paddr = dma + headroom ;
950+ ena_buf -> len = ENA_PAGE_SIZE - headroom ;
966951
967952 return 0 ;
968953}
@@ -1356,15 +1341,10 @@ static struct sk_buff *ena_rx_skb(struct ena_ring *rx_ring,
13561341 struct ena_rx_buffer * rx_info ;
13571342 u16 len , req_id , buf = 0 ;
13581343 void * va ;
1359- int rc ;
13601344
13611345 len = ena_bufs [buf ].len ;
13621346 req_id = ena_bufs [buf ].req_id ;
13631347
1364- rc = validate_rx_req_id (rx_ring , req_id );
1365- if (unlikely (rc < 0 ))
1366- return NULL ;
1367-
13681348 rx_info = & rx_ring -> rx_buffer_info [req_id ];
13691349
13701350 if (unlikely (!rx_info -> page )) {
@@ -1379,7 +1359,8 @@ static struct sk_buff *ena_rx_skb(struct ena_ring *rx_ring,
13791359
13801360 /* save virt address of first buffer */
13811361 va = page_address (rx_info -> page ) + rx_info -> page_offset ;
1382- prefetch (va + NET_IP_ALIGN );
1362+
1363+ prefetch (va );
13831364
13841365 if (len <= rx_ring -> rx_copybreak ) {
13851366 skb = ena_alloc_skb (rx_ring , false);
@@ -1420,8 +1401,6 @@ static struct sk_buff *ena_rx_skb(struct ena_ring *rx_ring,
14201401
14211402 skb_add_rx_frag (skb , skb_shinfo (skb )-> nr_frags , rx_info -> page ,
14221403 rx_info -> page_offset , len , ENA_PAGE_SIZE );
1423- /* The offset is non zero only for the first buffer */
1424- rx_info -> page_offset = 0 ;
14251404
14261405 netif_dbg (rx_ring -> adapter , rx_status , rx_ring -> netdev ,
14271406 "RX skb updated. len %d. data_len %d\n" ,
@@ -1440,10 +1419,6 @@ static struct sk_buff *ena_rx_skb(struct ena_ring *rx_ring,
14401419 len = ena_bufs [buf ].len ;
14411420 req_id = ena_bufs [buf ].req_id ;
14421421
1443- rc = validate_rx_req_id (rx_ring , req_id );
1444- if (unlikely (rc < 0 ))
1445- return NULL ;
1446-
14471422 rx_info = & rx_ring -> rx_buffer_info [req_id ];
14481423 } while (1 );
14491424
@@ -1544,8 +1519,7 @@ static int ena_xdp_handle_buff(struct ena_ring *rx_ring, struct xdp_buff *xdp)
15441519 int ret ;
15451520
15461521 rx_info = & rx_ring -> rx_buffer_info [rx_ring -> ena_bufs [0 ].req_id ];
1547- xdp -> data = page_address (rx_info -> page ) +
1548- rx_info -> page_offset + rx_ring -> rx_headroom ;
1522+ xdp -> data = page_address (rx_info -> page ) + rx_info -> page_offset ;
15491523 xdp_set_data_meta_invalid (xdp );
15501524 xdp -> data_hard_start = page_address (rx_info -> page );
15511525 xdp -> data_end = xdp -> data + rx_ring -> ena_bufs [0 ].len ;
@@ -1612,8 +1586,9 @@ static int ena_clean_rx_irq(struct ena_ring *rx_ring, struct napi_struct *napi,
16121586 if (unlikely (ena_rx_ctx .descs == 0 ))
16131587 break ;
16141588
1589+ /* First descriptor might have an offset set by the device */
16151590 rx_info = & rx_ring -> rx_buffer_info [rx_ring -> ena_bufs [0 ].req_id ];
1616- rx_info -> page_offset = ena_rx_ctx .pkt_offset ;
1591+ rx_info -> page_offset + = ena_rx_ctx .pkt_offset ;
16171592
16181593 netif_dbg (rx_ring -> adapter , rx_status , rx_ring -> netdev ,
16191594 "rx_poll: q %d got packet from ena. descs #: %d l3 proto %d l4 proto %d hash: %x\n" ,
@@ -1697,12 +1672,18 @@ static int ena_clean_rx_irq(struct ena_ring *rx_ring, struct napi_struct *napi,
16971672error :
16981673 adapter = netdev_priv (rx_ring -> netdev );
16991674
1700- u64_stats_update_begin (& rx_ring -> syncp );
1701- rx_ring -> rx_stats .bad_desc_num ++ ;
1702- u64_stats_update_end (& rx_ring -> syncp );
1675+ if (rc == - ENOSPC ) {
1676+ u64_stats_update_begin (& rx_ring -> syncp );
1677+ rx_ring -> rx_stats .bad_desc_num ++ ;
1678+ u64_stats_update_end (& rx_ring -> syncp );
1679+ adapter -> reset_reason = ENA_REGS_RESET_TOO_MANY_RX_DESCS ;
1680+ } else {
1681+ u64_stats_update_begin (& rx_ring -> syncp );
1682+ rx_ring -> rx_stats .bad_req_id ++ ;
1683+ u64_stats_update_end (& rx_ring -> syncp );
1684+ adapter -> reset_reason = ENA_REGS_RESET_INV_RX_REQ_ID ;
1685+ }
17031686
1704- /* Too many desc from the device. Trigger reset */
1705- adapter -> reset_reason = ENA_REGS_RESET_TOO_MANY_RX_DESCS ;
17061687 set_bit (ENA_FLAG_TRIGGER_RESET , & adapter -> flags );
17071688
17081689 return 0 ;
@@ -3388,16 +3369,9 @@ static int ena_device_init(struct ena_com_dev *ena_dev, struct pci_dev *pdev,
33883369 goto err_mmio_read_less ;
33893370 }
33903371
3391- rc = pci_set_dma_mask ( pdev , DMA_BIT_MASK (dma_width ));
3372+ rc = dma_set_mask_and_coherent ( dev , DMA_BIT_MASK (dma_width ));
33923373 if (rc ) {
3393- dev_err (dev , "pci_set_dma_mask failed 0x%x\n" , rc );
3394- goto err_mmio_read_less ;
3395- }
3396-
3397- rc = pci_set_consistent_dma_mask (pdev , DMA_BIT_MASK (dma_width ));
3398- if (rc ) {
3399- dev_err (dev , "err_pci_set_consistent_dma_mask failed 0x%x\n" ,
3400- rc );
3374+ dev_err (dev , "dma_set_mask_and_coherent failed %d\n" , rc );
34013375 goto err_mmio_read_less ;
34023376 }
34033377
@@ -4167,6 +4141,12 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
41674141 return rc ;
41684142 }
41694143
4144+ rc = dma_set_mask_and_coherent (& pdev -> dev , DMA_BIT_MASK (ENA_MAX_PHYS_ADDR_SIZE_BITS ));
4145+ if (rc ) {
4146+ dev_err (& pdev -> dev , "dma_set_mask_and_coherent failed %d\n" , rc );
4147+ goto err_disable_device ;
4148+ }
4149+
41704150 pci_set_master (pdev );
41714151
41724152 ena_dev = vzalloc (sizeof (* ena_dev ));
0 commit comments