|
13 | 13 | static struct dev_hw_ops cn20k_hw_ops = { |
14 | 14 | .pfaf_mbox_intr_handler = cn20k_pfaf_mbox_intr_handler, |
15 | 15 | .vfaf_mbox_intr_handler = cn20k_vfaf_mbox_intr_handler, |
| 16 | + .pfvf_mbox_intr_handler = cn20k_pfvf_mbox_intr_handler, |
16 | 17 | }; |
17 | 18 |
|
18 | 19 | void cn20k_init(struct otx2_nic *pfvf) |
@@ -108,3 +109,144 @@ irqreturn_t cn20k_vfaf_mbox_intr_handler(int irq, void *vf_irq) |
108 | 109 |
|
109 | 110 | return IRQ_HANDLED; |
110 | 111 | } |
| 112 | + |
| 113 | +void cn20k_enable_pfvf_mbox_intr(struct otx2_nic *pf, int numvfs) |
| 114 | +{ |
| 115 | + /* Clear PF <=> VF mailbox IRQ */ |
| 116 | + otx2_write64(pf, RVU_MBOX_PF_VFPF_INTX(0), ~0ull); |
| 117 | + otx2_write64(pf, RVU_MBOX_PF_VFPF_INTX(1), ~0ull); |
| 118 | + otx2_write64(pf, RVU_MBOX_PF_VFPF1_INTX(0), ~0ull); |
| 119 | + otx2_write64(pf, RVU_MBOX_PF_VFPF1_INTX(1), ~0ull); |
| 120 | + |
| 121 | + /* Enable PF <=> VF mailbox IRQ */ |
| 122 | + otx2_write64(pf, RVU_MBOX_PF_VFPF_INT_ENA_W1SX(0), INTR_MASK(numvfs)); |
| 123 | + otx2_write64(pf, RVU_MBOX_PF_VFPF1_INT_ENA_W1SX(0), INTR_MASK(numvfs)); |
| 124 | + if (numvfs > 64) { |
| 125 | + numvfs -= 64; |
| 126 | + otx2_write64(pf, RVU_MBOX_PF_VFPF_INT_ENA_W1SX(1), |
| 127 | + INTR_MASK(numvfs)); |
| 128 | + otx2_write64(pf, RVU_MBOX_PF_VFPF1_INT_ENA_W1SX(1), |
| 129 | + INTR_MASK(numvfs)); |
| 130 | + } |
| 131 | +} |
| 132 | + |
| 133 | +void cn20k_disable_pfvf_mbox_intr(struct otx2_nic *pf, int numvfs) |
| 134 | +{ |
| 135 | + int vector, intr_vec, vec = 0; |
| 136 | + |
| 137 | + /* Disable PF <=> VF mailbox IRQ */ |
| 138 | + otx2_write64(pf, RVU_MBOX_PF_VFPF_INT_ENA_W1CX(0), ~0ull); |
| 139 | + otx2_write64(pf, RVU_MBOX_PF_VFPF_INT_ENA_W1CX(1), ~0ull); |
| 140 | + otx2_write64(pf, RVU_MBOX_PF_VFPF1_INT_ENA_W1CX(0), ~0ull); |
| 141 | + otx2_write64(pf, RVU_MBOX_PF_VFPF1_INT_ENA_W1CX(1), ~0ull); |
| 142 | + |
| 143 | + otx2_write64(pf, RVU_MBOX_PF_VFPF_INTX(0), ~0ull); |
| 144 | + otx2_write64(pf, RVU_MBOX_PF_VFPF1_INTX(0), ~0ull); |
| 145 | + |
| 146 | + if (numvfs > 64) { |
| 147 | + otx2_write64(pf, RVU_MBOX_PF_VFPF_INTX(1), ~0ull); |
| 148 | + otx2_write64(pf, RVU_MBOX_PF_VFPF1_INTX(1), ~0ull); |
| 149 | + } |
| 150 | + |
| 151 | + for (intr_vec = RVU_MBOX_PF_INT_VEC_VFPF_MBOX0; intr_vec <= |
| 152 | + RVU_MBOX_PF_INT_VEC_VFPF1_MBOX1; intr_vec++, vec++) { |
| 153 | + vector = pci_irq_vector(pf->pdev, intr_vec); |
| 154 | + free_irq(vector, pf->hw.pfvf_irq_devid[vec]); |
| 155 | + } |
| 156 | +} |
| 157 | + |
| 158 | +irqreturn_t cn20k_pfvf_mbox_intr_handler(int irq, void *pf_irq) |
| 159 | +{ |
| 160 | + struct pf_irq_data *irq_data = pf_irq; |
| 161 | + struct otx2_nic *pf = irq_data->pf; |
| 162 | + struct mbox *mbox; |
| 163 | + u64 intr; |
| 164 | + |
| 165 | + /* Sync with mbox memory region */ |
| 166 | + rmb(); |
| 167 | + |
| 168 | + /* Clear interrupts */ |
| 169 | + intr = otx2_read64(pf, irq_data->intr_status); |
| 170 | + otx2_write64(pf, irq_data->intr_status, intr); |
| 171 | + mbox = pf->mbox_pfvf; |
| 172 | + |
| 173 | + if (intr) |
| 174 | + trace_otx2_msg_interrupt(pf->pdev, "VF(s) to PF", intr); |
| 175 | + |
| 176 | + irq_data->pf_queue_work_hdlr(mbox, pf->mbox_pfvf_wq, irq_data->start, |
| 177 | + irq_data->mdevs, intr); |
| 178 | + |
| 179 | + return IRQ_HANDLED; |
| 180 | +} |
| 181 | + |
| 182 | +int cn20k_register_pfvf_mbox_intr(struct otx2_nic *pf, int numvfs) |
| 183 | +{ |
| 184 | + struct otx2_hw *hw = &pf->hw; |
| 185 | + struct pf_irq_data *irq_data; |
| 186 | + int intr_vec, ret, vec = 0; |
| 187 | + char *irq_name; |
| 188 | + |
| 189 | + /* irq data for 4 PF intr vectors */ |
| 190 | + irq_data = devm_kcalloc(pf->dev, 4, |
| 191 | + sizeof(struct pf_irq_data), GFP_KERNEL); |
| 192 | + if (!irq_data) |
| 193 | + return -ENOMEM; |
| 194 | + |
| 195 | + for (intr_vec = RVU_MBOX_PF_INT_VEC_VFPF_MBOX0; intr_vec <= |
| 196 | + RVU_MBOX_PF_INT_VEC_VFPF1_MBOX1; intr_vec++, vec++) { |
| 197 | + switch (intr_vec) { |
| 198 | + case RVU_MBOX_PF_INT_VEC_VFPF_MBOX0: |
| 199 | + irq_data[vec].intr_status = |
| 200 | + RVU_MBOX_PF_VFPF_INTX(0); |
| 201 | + irq_data[vec].start = 0; |
| 202 | + irq_data[vec].mdevs = 64; |
| 203 | + break; |
| 204 | + case RVU_MBOX_PF_INT_VEC_VFPF_MBOX1: |
| 205 | + irq_data[vec].intr_status = |
| 206 | + RVU_MBOX_PF_VFPF_INTX(1); |
| 207 | + irq_data[vec].start = 64; |
| 208 | + irq_data[vec].mdevs = 96; |
| 209 | + break; |
| 210 | + case RVU_MBOX_PF_INT_VEC_VFPF1_MBOX0: |
| 211 | + irq_data[vec].intr_status = |
| 212 | + RVU_MBOX_PF_VFPF1_INTX(0); |
| 213 | + irq_data[vec].start = 0; |
| 214 | + irq_data[vec].mdevs = 64; |
| 215 | + break; |
| 216 | + case RVU_MBOX_PF_INT_VEC_VFPF1_MBOX1: |
| 217 | + irq_data[vec].intr_status = |
| 218 | + RVU_MBOX_PF_VFPF1_INTX(1); |
| 219 | + irq_data[vec].start = 64; |
| 220 | + irq_data[vec].mdevs = 96; |
| 221 | + break; |
| 222 | + } |
| 223 | + irq_data[vec].pf_queue_work_hdlr = otx2_queue_vf_work; |
| 224 | + irq_data[vec].vec_num = intr_vec; |
| 225 | + irq_data[vec].pf = pf; |
| 226 | + |
| 227 | + /* Register mailbox interrupt handler */ |
| 228 | + irq_name = &hw->irq_name[intr_vec * NAME_SIZE]; |
| 229 | + if (pf->pcifunc) |
| 230 | + snprintf(irq_name, NAME_SIZE, |
| 231 | + "RVUPF%d_VF%d Mbox%d", rvu_get_pf(pf->pdev, |
| 232 | + pf->pcifunc), vec / 2, vec % 2); |
| 233 | + else |
| 234 | + snprintf(irq_name, NAME_SIZE, "RVUPF_VF%d Mbox%d", |
| 235 | + vec / 2, vec % 2); |
| 236 | + |
| 237 | + hw->pfvf_irq_devid[vec] = &irq_data[vec]; |
| 238 | + ret = request_irq(pci_irq_vector(pf->pdev, intr_vec), |
| 239 | + pf->hw_ops->pfvf_mbox_intr_handler, 0, |
| 240 | + irq_name, |
| 241 | + &irq_data[vec]); |
| 242 | + if (ret) { |
| 243 | + dev_err(pf->dev, |
| 244 | + "RVUPF: IRQ registration failed for PFVF mbox0 irq\n"); |
| 245 | + return ret; |
| 246 | + } |
| 247 | + } |
| 248 | + |
| 249 | + cn20k_enable_pfvf_mbox_intr(pf, numvfs); |
| 250 | + |
| 251 | + return 0; |
| 252 | +} |
0 commit comments