1313#include "reg.h"
1414#include "api.h"
1515
16+ static irqreturn_t cn20k_afvf_mbox_intr_handler (int irq , void * rvu_irq )
17+ {
18+ struct rvu_irq_data * rvu_irq_data = rvu_irq ;
19+ struct rvu * rvu = rvu_irq_data -> rvu ;
20+ u64 intr ;
21+
22+ /* Sync with mbox memory region */
23+ rmb ();
24+
25+ /* Clear interrupts */
26+ intr = rvupf_read64 (rvu , rvu_irq_data -> intr_status );
27+ rvupf_write64 (rvu , rvu_irq_data -> intr_status , intr );
28+
29+ if (intr )
30+ trace_otx2_msg_interrupt (rvu -> pdev , "VF(s) to AF" , intr );
31+
32+ rvu_irq_data -> afvf_queue_work_hdlr (& rvu -> afvf_wq_info , rvu_irq_data -> start ,
33+ rvu_irq_data -> mdevs , intr );
34+
35+ return IRQ_HANDLED ;
36+ }
37+
38+ int cn20k_register_afvf_mbox_intr (struct rvu * rvu , int pf_vec_start )
39+ {
40+ struct rvu_irq_data * irq_data ;
41+ int intr_vec , offset , vec = 0 ;
42+ int err ;
43+
44+ /* irq data for 4 VFPF intr vectors */
45+ irq_data = devm_kcalloc (rvu -> dev , 4 ,
46+ sizeof (struct rvu_irq_data ), GFP_KERNEL );
47+ if (!irq_data )
48+ return - ENOMEM ;
49+
50+ for (intr_vec = RVU_MBOX_PF_INT_VEC_VFPF_MBOX0 ; intr_vec <=
51+ RVU_MBOX_PF_INT_VEC_VFPF1_MBOX1 ;
52+ intr_vec ++ , vec ++ ) {
53+ switch (intr_vec ) {
54+ case RVU_MBOX_PF_INT_VEC_VFPF_MBOX0 :
55+ irq_data [vec ].intr_status =
56+ RVU_MBOX_PF_VFPF_INTX (0 );
57+ irq_data [vec ].start = 0 ;
58+ irq_data [vec ].mdevs = 64 ;
59+ break ;
60+ case RVU_MBOX_PF_INT_VEC_VFPF_MBOX1 :
61+ irq_data [vec ].intr_status =
62+ RVU_MBOX_PF_VFPF_INTX (1 );
63+ irq_data [vec ].start = 64 ;
64+ irq_data [vec ].mdevs = 64 ;
65+ break ;
66+ case RVU_MBOX_PF_INT_VEC_VFPF1_MBOX0 :
67+ irq_data [vec ].intr_status =
68+ RVU_MBOX_PF_VFPF1_INTX (0 );
69+ irq_data [vec ].start = 0 ;
70+ irq_data [vec ].mdevs = 64 ;
71+ break ;
72+ case RVU_MBOX_PF_INT_VEC_VFPF1_MBOX1 :
73+ irq_data [vec ].intr_status = RVU_MBOX_PF_VFPF1_INTX (1 );
74+ irq_data [vec ].start = 64 ;
75+ irq_data [vec ].mdevs = 64 ;
76+ break ;
77+ }
78+ irq_data [vec ].afvf_queue_work_hdlr =
79+ rvu_queue_work ;
80+ offset = pf_vec_start + intr_vec ;
81+ irq_data [vec ].vec_num = offset ;
82+ irq_data [vec ].rvu = rvu ;
83+
84+ sprintf (& rvu -> irq_name [offset * NAME_SIZE ], "RVUAF VFAF%d Mbox%d" ,
85+ vec / 2 , vec % 2 );
86+ err = request_irq (pci_irq_vector (rvu -> pdev , offset ),
87+ rvu -> ng_rvu -> rvu_mbox_ops -> afvf_intr_handler , 0 ,
88+ & rvu -> irq_name [offset * NAME_SIZE ],
89+ & irq_data [vec ]);
90+ if (err ) {
91+ dev_err (rvu -> dev ,
92+ "RVUAF: IRQ registration failed for AFVF mbox irq\n" );
93+ return err ;
94+ }
95+ rvu -> irq_allocated [offset ] = true;
96+ }
97+
98+ return 0 ;
99+ }
100+
16101/* CN20K mbox PFx => AF irq handler */
17102static irqreturn_t cn20k_mbox_pf_common_intr_handler (int irq , void * rvu_irq )
18103{
@@ -150,6 +235,21 @@ int cn20k_rvu_get_mbox_regions(struct rvu *rvu, void **mbox_addr,
150235 int region ;
151236 u64 bar ;
152237
238+ if (type == TYPE_AFVF ) {
239+ for (region = 0 ; region < num ; region ++ ) {
240+ if (!test_bit (region , pf_bmap ))
241+ continue ;
242+
243+ bar = (u64 )phys_to_virt ((u64 )rvu -> ng_rvu -> vf_mbox_addr -> base );
244+ bar += region * MBOX_SIZE ;
245+ mbox_addr [region ] = (void * )bar ;
246+
247+ if (!mbox_addr [region ])
248+ return - ENOMEM ;
249+ }
250+ return 0 ;
251+ }
252+
153253 for (region = 0 ; region < num ; region ++ ) {
154254 if (!test_bit (region , pf_bmap ))
155255 continue ;
@@ -180,6 +280,9 @@ static int rvu_alloc_mbox_memory(struct rvu *rvu, int type,
180280 *
181281 * AF will access mbox memory using direct physical addresses
182282 * and PFs will access the same shared memory from BAR2.
283+ *
284+ * PF <=> VF mbox memory also works in the same fashion.
285+ * AFPF, PFVF requires IOVA to be used to maintain the mailbox msgs
183286 */
184287
185288 err = qmem_alloc (rvu -> dev , & mbox_addr , ndevs , mbox_size );
@@ -196,6 +299,10 @@ static int rvu_alloc_mbox_memory(struct rvu *rvu, int type,
196299 iova += mbox_size ;
197300 }
198301 break ;
302+ case TYPE_AFVF :
303+ rvu -> ng_rvu -> vf_mbox_addr = mbox_addr ;
304+ rvupf_write64 (rvu , RVU_PF_VF_MBOX_ADDR , (u64 )mbox_addr -> iova );
305+ break ;
199306 default :
200307 return 0 ;
201308 }
@@ -205,6 +312,7 @@ static int rvu_alloc_mbox_memory(struct rvu *rvu, int type,
205312
206313static struct mbox_ops cn20k_mbox_ops = {
207314 .pf_intr_handler = cn20k_mbox_pf_common_intr_handler ,
315+ .afvf_intr_handler = cn20k_afvf_mbox_intr_handler ,
208316};
209317
210318int cn20k_rvu_mbox_init (struct rvu * rvu , int type , int ndevs )
@@ -216,9 +324,13 @@ int cn20k_rvu_mbox_init(struct rvu *rvu, int type, int ndevs)
216324
217325 rvu -> ng_rvu -> rvu_mbox_ops = & cn20k_mbox_ops ;
218326
219- for (dev = 0 ; dev < ndevs ; dev ++ )
220- rvu_write64 (rvu , BLKADDR_RVUM ,
221- RVU_MBOX_AF_PFX_CFG (dev ), ilog2 (MBOX_SIZE ));
327+ if (type == TYPE_AFVF ) {
328+ rvu_write64 (rvu , BLKADDR_RVUM , RVU_MBOX_PF_VF_CFG , ilog2 (MBOX_SIZE ));
329+ } else {
330+ for (dev = 0 ; dev < ndevs ; dev ++ )
331+ rvu_write64 (rvu , BLKADDR_RVUM ,
332+ RVU_MBOX_AF_PFX_CFG (dev ), ilog2 (MBOX_SIZE ));
333+ }
222334
223335 return rvu_alloc_mbox_memory (rvu , type , ndevs , MBOX_SIZE );
224336}
@@ -229,6 +341,51 @@ void cn20k_free_mbox_memory(struct rvu *rvu)
229341 return ;
230342
231343 qmem_free (rvu -> dev , rvu -> ng_rvu -> pf_mbox_addr );
344+ qmem_free (rvu -> dev , rvu -> ng_rvu -> vf_mbox_addr );
345+ }
346+
347+ void cn20k_rvu_disable_afvf_intr (struct rvu * rvu , int vfs )
348+ {
349+ rvupf_write64 (rvu , RVU_MBOX_PF_VFPF_INT_ENA_W1CX (0 ), INTR_MASK (vfs ));
350+ rvupf_write64 (rvu , RVU_MBOX_PF_VFPF1_INT_ENA_W1CX (0 ), INTR_MASK (vfs ));
351+ rvupf_write64 (rvu , RVU_PF_VFFLR_INT_ENA_W1CX (0 ), INTR_MASK (vfs ));
352+ rvupf_write64 (rvu , RVU_PF_VFME_INT_ENA_W1CX (0 ), INTR_MASK (vfs ));
353+
354+ if (vfs <= 64 )
355+ return ;
356+
357+ rvupf_write64 (rvu , RVU_MBOX_PF_VFPF_INT_ENA_W1CX (1 ), INTR_MASK (vfs - 64 ));
358+ rvupf_write64 (rvu , RVU_MBOX_PF_VFPF1_INT_ENA_W1CX (1 ), INTR_MASK (vfs - 64 ));
359+ rvupf_write64 (rvu , RVU_PF_VFFLR_INT_ENA_W1CX (1 ), INTR_MASK (vfs - 64 ));
360+ rvupf_write64 (rvu , RVU_PF_VFME_INT_ENA_W1CX (1 ), INTR_MASK (vfs - 64 ));
361+ }
362+
363+ void cn20k_rvu_enable_afvf_intr (struct rvu * rvu , int vfs )
364+ {
365+ /* Clear any pending interrupts and enable AF VF interrupts for
366+ * the first 64 VFs.
367+ */
368+ rvupf_write64 (rvu , RVU_MBOX_PF_VFPF_INTX (0 ), INTR_MASK (vfs ));
369+ rvupf_write64 (rvu , RVU_MBOX_PF_VFPF_INT_ENA_W1SX (0 ), INTR_MASK (vfs ));
370+ rvupf_write64 (rvu , RVU_MBOX_PF_VFPF1_INTX (0 ), INTR_MASK (vfs ));
371+ rvupf_write64 (rvu , RVU_MBOX_PF_VFPF1_INT_ENA_W1SX (0 ), INTR_MASK (vfs ));
372+
373+ /* FLR */
374+ rvupf_write64 (rvu , RVU_PF_VFFLR_INTX (0 ), INTR_MASK (vfs ));
375+ rvupf_write64 (rvu , RVU_PF_VFFLR_INT_ENA_W1SX (0 ), INTR_MASK (vfs ));
376+
377+ /* Same for remaining VFs, if any. */
378+ if (vfs <= 64 )
379+ return ;
380+
381+ rvupf_write64 (rvu , RVU_MBOX_PF_VFPF_INTX (1 ), INTR_MASK (vfs - 64 ));
382+ rvupf_write64 (rvu , RVU_MBOX_PF_VFPF_INT_ENA_W1SX (1 ), INTR_MASK (vfs - 64 ));
383+ rvupf_write64 (rvu , RVU_MBOX_PF_VFPF1_INTX (1 ), INTR_MASK (vfs - 64 ));
384+ rvupf_write64 (rvu , RVU_MBOX_PF_VFPF1_INT_ENA_W1SX (1 ), INTR_MASK (vfs - 64 ));
385+
386+ rvupf_write64 (rvu , RVU_PF_VFFLR_INTX (1 ), INTR_MASK (vfs - 64 ));
387+ rvupf_write64 (rvu , RVU_PF_VFFLR_INT_ENA_W1SX (1 ), INTR_MASK (vfs - 64 ));
388+ rvupf_write64 (rvu , RVU_PF_VFME_INT_ENA_W1SX (1 ), INTR_MASK (vfs - 64 ));
232389}
233390
234391int rvu_alloc_cint_qint_mem (struct rvu * rvu , struct rvu_pfvf * pfvf ,
0 commit comments