@@ -214,7 +214,8 @@ struct xilinx_dpdma_tx_desc {
214
214
* @running: true if the channel is running
215
215
* @first_frame: flag for the first frame of stream
216
216
* @video_group: flag if multi-channel operation is needed for video channels
217
- * @lock: lock to access struct xilinx_dpdma_chan
217
+ * @lock: lock to access struct xilinx_dpdma_chan. Must be taken before
218
+ * @vchan.lock, if both are to be held.
218
219
* @desc_pool: descriptor allocation pool
219
220
* @err_task: error IRQ bottom half handler
220
221
* @desc: References to descriptors being processed
@@ -1097,12 +1098,14 @@ static void xilinx_dpdma_chan_vsync_irq(struct xilinx_dpdma_chan *chan)
1097
1098
* Complete the active descriptor, if any, promote the pending
1098
1099
* descriptor to active, and queue the next transfer, if any.
1099
1100
*/
1101
+ spin_lock (& chan -> vchan .lock );
1100
1102
if (chan -> desc .active )
1101
1103
vchan_cookie_complete (& chan -> desc .active -> vdesc );
1102
1104
chan -> desc .active = pending ;
1103
1105
chan -> desc .pending = NULL ;
1104
1106
1105
1107
xilinx_dpdma_chan_queue_transfer (chan );
1108
+ spin_unlock (& chan -> vchan .lock );
1106
1109
1107
1110
out :
1108
1111
spin_unlock_irqrestore (& chan -> lock , flags );
@@ -1264,10 +1267,12 @@ static void xilinx_dpdma_issue_pending(struct dma_chan *dchan)
1264
1267
struct xilinx_dpdma_chan * chan = to_xilinx_chan (dchan );
1265
1268
unsigned long flags ;
1266
1269
1267
- spin_lock_irqsave (& chan -> vchan .lock , flags );
1270
+ spin_lock_irqsave (& chan -> lock , flags );
1271
+ spin_lock (& chan -> vchan .lock );
1268
1272
if (vchan_issue_pending (& chan -> vchan ))
1269
1273
xilinx_dpdma_chan_queue_transfer (chan );
1270
- spin_unlock_irqrestore (& chan -> vchan .lock , flags );
1274
+ spin_unlock (& chan -> vchan .lock );
1275
+ spin_unlock_irqrestore (& chan -> lock , flags );
1271
1276
}
1272
1277
1273
1278
static int xilinx_dpdma_config (struct dma_chan * dchan ,
@@ -1495,7 +1500,9 @@ static void xilinx_dpdma_chan_err_task(struct tasklet_struct *t)
1495
1500
XILINX_DPDMA_EINTR_CHAN_ERR_MASK << chan -> id );
1496
1501
1497
1502
spin_lock_irqsave (& chan -> lock , flags );
1503
+ spin_lock (& chan -> vchan .lock );
1498
1504
xilinx_dpdma_chan_queue_transfer (chan );
1505
+ spin_unlock (& chan -> vchan .lock );
1499
1506
spin_unlock_irqrestore (& chan -> lock , flags );
1500
1507
}
1501
1508
0 commit comments