@@ -219,9 +219,12 @@ struct pispbe_hw_enables {
219219
220220/* Records a job configuration and memory addresses. */
221221struct pispbe_job_descriptor {
222+ struct pispbe_buffer * buf [PISPBE_NUM_NODES ];
223+ struct pispbe_node_group * node_group ;
222224 dma_addr_t hw_dma_addrs [N_HW_ADDRESSES ];
223225 struct pisp_be_tiles_config * config ;
224226 struct pispbe_hw_enables hw_enables ;
227+ struct list_head queue ;
225228 dma_addr_t tiles ;
226229};
227230
@@ -235,8 +238,10 @@ struct pispbe_dev {
235238 struct clk * clk ;
236239 struct pispbe_node_group node_group [PISPBE_NUM_NODE_GROUPS ];
237240 struct pispbe_job queued_job , running_job ;
238- spinlock_t hw_lock ; /* protects "hw_busy" flag and streaming_map */
241+ /* protects "hw_busy" flag, streaming_map and job queue*/
242+ spinlock_t hw_lock ;
239243 bool hw_busy ; /* non-zero if a job is queued or is being started */
244+ struct list_head job_queue ;
240245 int irq ;
241246 u32 hw_version ;
242247 u8 done , started ;
@@ -463,42 +468,49 @@ static void pispbe_xlate_addrs(struct pispbe_job_descriptor *job,
463468 * For Output0, Output1, Tdn and Stitch, a buffer only needs to be
464469 * available if the blocks are enabled in the config.
465470 *
466- * Needs to be called with hw_lock held.
471+ * If all the buffers required to form a job are available, append the
472+ * job descriptor to the job queue to be later queued to the HW.
467473 *
468474 * Returns 0 if a job has been successfully prepared, < 0 otherwise.
469475 */
470- static int pispbe_prepare_job (struct pispbe_node_group * node_group ,
471- struct pispbe_job_descriptor * job )
476+ static int pispbe_prepare_job (struct pispbe_dev * pispbe ,
477+ struct pispbe_node_group * node_group )
472478{
473479 struct pispbe_buffer * buf [PISPBE_NUM_NODES ] = {};
474- struct pispbe_dev * pispbe = node_group -> pispbe ;
480+ struct pispbe_job_descriptor * job ;
481+ unsigned int streaming_map ;
475482 unsigned int config_index ;
476483 struct pispbe_node * node ;
477484 unsigned long flags ;
478485
479- lockdep_assert_held (& pispbe -> hw_lock );
480-
481- memset ( job , 0 , sizeof ( struct pispbe_job_descriptor ) );
486+ spin_lock_irqsave (& pispbe -> hw_lock , flags );
487+ streaming_map = node_group -> streaming_map ;
488+ spin_unlock_irqrestore ( & pispbe -> hw_lock , flags );
482489
483- if (((BIT (CONFIG_NODE ) | BIT (MAIN_INPUT_NODE )) &
484- node_group -> streaming_map ) !=
485- (BIT (CONFIG_NODE ) | BIT (MAIN_INPUT_NODE )))
490+ if (((BIT (CONFIG_NODE ) | BIT (MAIN_INPUT_NODE )) & streaming_map ) !=
491+ (BIT (CONFIG_NODE ) | BIT (MAIN_INPUT_NODE )))
486492 return - ENODEV ;
487493
494+ job = kzalloc (sizeof (* job ), GFP_KERNEL );
495+ if (!job )
496+ return - ENOMEM ;
497+
488498 node = & node_group -> node [CONFIG_NODE ];
489499 spin_lock_irqsave (& node -> ready_lock , flags );
490500 buf [CONFIG_NODE ] = list_first_entry_or_null (& node -> ready_queue ,
491501 struct pispbe_buffer ,
492502 ready_list );
493503 if (buf [CONFIG_NODE ]) {
494504 list_del (& buf [CONFIG_NODE ]-> ready_list );
495- pispbe -> queued_job . buf [CONFIG_NODE ] = buf [CONFIG_NODE ];
505+ job -> buf [CONFIG_NODE ] = buf [CONFIG_NODE ];
496506 }
497507 spin_unlock_irqrestore (& node -> ready_lock , flags );
498508
499509 /* Exit early if no config buffer has been queued. */
500- if (!buf [CONFIG_NODE ])
510+ if (!buf [CONFIG_NODE ]) {
511+ kfree (job );
501512 return - ENODEV ;
513+ }
502514
503515 config_index = buf [CONFIG_NODE ]-> vb .vb2_buf .index ;
504516 job -> config = & node_group -> config [config_index ];
@@ -519,7 +531,7 @@ static int pispbe_prepare_job(struct pispbe_node_group *node_group,
519531 continue ;
520532
521533 buf [i ] = NULL ;
522- if (!(node_group -> streaming_map & BIT (i )))
534+ if (!(streaming_map & BIT (i )))
523535 continue ;
524536
525537 if ((!(rgb_en & PISP_BE_RGB_ENABLE_OUTPUT0 ) &&
@@ -552,19 +564,23 @@ static int pispbe_prepare_job(struct pispbe_node_group *node_group,
552564 ready_list );
553565 if (buf [i ]) {
554566 list_del (& buf [i ]-> ready_list );
555- pispbe -> queued_job . buf [i ] = buf [i ];
567+ job -> buf [i ] = buf [i ];
556568 }
557569 spin_unlock_irqrestore (& node -> ready_lock , flags );
558570
559571 if (!buf [i ] && !ignore_buffers )
560572 goto err_return_buffers ;
561573 }
562574
563- pispbe -> queued_job . node_group = node_group ;
575+ job -> node_group = node_group ;
564576
565577 /* Convert buffers to DMA addresses for the hardware */
566578 pispbe_xlate_addrs (job , buf , node_group );
567579
580+ spin_lock_irqsave (& pispbe -> hw_lock , flags );
581+ list_add_tail (& job -> queue , & pispbe -> job_queue );
582+ spin_unlock_irqrestore (& pispbe -> hw_lock , flags );
583+
568584 return 0 ;
569585
570586err_return_buffers :
@@ -580,16 +596,15 @@ static int pispbe_prepare_job(struct pispbe_node_group *node_group,
580596 spin_unlock_irqrestore (& n -> ready_lock , flags );
581597 }
582598
583- memset ( & pispbe -> queued_job , 0 , sizeof ( pispbe -> queued_job ) );
599+ kfree ( job );
584600
585601 return - ENODEV ;
586602}
587603
588604static void pispbe_schedule (struct pispbe_dev * pispbe ,
589- struct pispbe_node_group * node_group ,
590605 bool clear_hw_busy )
591606{
592- struct pispbe_job_descriptor job ;
607+ struct pispbe_job_descriptor * job ;
593608 unsigned long flags ;
594609
595610 spin_lock_irqsave (& pispbe -> hw_lock , flags );
@@ -600,53 +615,51 @@ static void pispbe_schedule(struct pispbe_dev *pispbe,
600615 if (pispbe -> hw_busy )
601616 goto unlock_and_return ;
602617
603- for (unsigned int i = 0 ; i < PISPBE_NUM_NODE_GROUPS ; i ++ ) {
604- int ret ;
618+ job = list_first_entry_or_null (& pispbe -> job_queue ,
619+ struct pispbe_job_descriptor ,
620+ queue );
621+ if (!job )
622+ goto unlock_and_return ;
605623
606- /* Schedule jobs only for a specific group. */
607- if (node_group && & pispbe -> node_group [i ] != node_group )
608- continue ;
624+ list_del (& job -> queue );
609625
610- /*
611- * Prepare a job for this group, if the group is not ready
612- * continue and try with the next one.
613- */
614- ret = pispbe_prepare_job (& pispbe -> node_group [i ], & job );
615- if (ret )
616- continue ;
626+ for (unsigned int i = 0 ; i < PISPBE_NUM_NODES ; i ++ )
627+ pispbe -> queued_job .buf [i ] = job -> buf [i ];
628+ pispbe -> queued_job .node_group = job -> node_group ;
629+
630+ pispbe -> hw_busy = true;
617631
632+ /*
633+ * We can kick the job off without the hw_lock, as this can
634+ * never run again until hw_busy is cleared, which will happen
635+ * only when the following job has been queued and an interrupt
636+ * is rised.
637+ */
638+ spin_unlock_irqrestore (& pispbe -> hw_lock , flags );
639+
640+ if (job -> config -> num_tiles <= 0 ||
641+ job -> config -> num_tiles > PISP_BACK_END_NUM_TILES ||
642+ !((job -> hw_enables .bayer_enables |
643+ job -> hw_enables .rgb_enables ) &
644+ PISP_BE_BAYER_ENABLE_INPUT )) {
618645 /*
619- * We can kick the job off without the hw_lock, as this can
620- * never run again until hw_busy is cleared, which will happen
621- * only when the following job has been queued and an interrupt
622- * is rised.
646+ * Bad job. We can't let it proceed as it could lock up
647+ * the hardware, or worse!
648+ *
649+ * For now, just force num_tiles to 0, which causes the
650+ * H/W to do something bizarre but survivable. It
651+ * increments (started,done) counters by more than 1,
652+ * but we seem to survive...
623653 */
624- pispbe -> hw_busy = true;
625- spin_unlock_irqrestore (& pispbe -> hw_lock , flags );
626-
627- if (job .config -> num_tiles <= 0 ||
628- job .config -> num_tiles > PISP_BACK_END_NUM_TILES ||
629- !((job .hw_enables .bayer_enables |
630- job .hw_enables .rgb_enables ) &
631- PISP_BE_BAYER_ENABLE_INPUT )) {
632- /*
633- * Bad job. We can't let it proceed as it could lock up
634- * the hardware, or worse!
635- *
636- * For now, just force num_tiles to 0, which causes the
637- * H/W to do something bizarre but survivable. It
638- * increments (started,done) counters by more than 1,
639- * but we seem to survive...
640- */
641- dev_dbg (pispbe -> dev , "Bad job: invalid number of tiles: %u\n" ,
642- job .config -> num_tiles );
643- job .config -> num_tiles = 0 ;
644- }
654+ dev_dbg (pispbe -> dev , "Bad job: invalid number of tiles: %u\n" ,
655+ job -> config -> num_tiles );
656+ job -> config -> num_tiles = 0 ;
657+ }
645658
646- pispbe_queue_job (pispbe , & job );
659+ pispbe_queue_job (pispbe , job );
660+ kfree (job );
647661
648- return ;
649- }
662+ return ;
650663
651664unlock_and_return :
652665 /* No job has been queued, just release the lock and return. */
@@ -721,7 +734,7 @@ static irqreturn_t pispbe_isr(int irq, void *dev)
721734 }
722735
723736 /* check if there's more to do before going to sleep */
724- pispbe_schedule (pispbe , NULL , can_queue_another );
737+ pispbe_schedule (pispbe , can_queue_another );
725738
726739 return IRQ_HANDLED ;
727740}
@@ -907,7 +920,8 @@ static void pispbe_node_buffer_queue(struct vb2_buffer *buf)
907920 * Every time we add a buffer, check if there's now some work for the hw
908921 * to do, but only for this client.
909922 */
910- pispbe_schedule (node_group -> pispbe , node_group , false);
923+ if (!pispbe_prepare_job (pispbe , node_group ))
924+ pispbe_schedule (pispbe , false);
911925}
912926
913927static int pispbe_node_start_streaming (struct vb2_queue * q , unsigned int count )
@@ -934,7 +948,8 @@ static int pispbe_node_start_streaming(struct vb2_queue *q, unsigned int count)
934948 node -> node_group -> streaming_map );
935949
936950 /* Maybe we're ready to run. */
937- pispbe_schedule (node_group -> pispbe , node_group , false);
951+ if (!pispbe_prepare_job (pispbe , node_group ))
952+ pispbe_schedule (pispbe , false);
938953
939954 return 0 ;
940955
@@ -987,6 +1002,21 @@ static void pispbe_node_stop_streaming(struct vb2_queue *q)
9871002
9881003 spin_lock_irqsave (& pispbe -> hw_lock , flags );
9891004 node_group -> streaming_map &= ~BIT (node -> id );
1005+
1006+ /* Release all jobs once all nodes have stopped streaming. */
1007+ if (node_group -> streaming_map == 0 ) {
1008+ struct pispbe_job_descriptor * job ;
1009+
1010+ do {
1011+ job = list_first_entry_or_null (& pispbe -> job_queue ,
1012+ struct pispbe_job_descriptor ,
1013+ queue );
1014+ if (job ) {
1015+ list_del (& job -> queue );
1016+ kfree (job );
1017+ }
1018+ } while (!list_empty (& pispbe -> job_queue ));
1019+ }
9901020 spin_unlock_irqrestore (& pispbe -> hw_lock , flags );
9911021
9921022 pm_runtime_mark_last_busy (pispbe -> dev );
@@ -1746,6 +1776,8 @@ static int pispbe_probe(struct platform_device *pdev)
17461776 if (!pispbe )
17471777 return - ENOMEM ;
17481778
1779+ INIT_LIST_HEAD (& pispbe -> job_queue );
1780+
17491781 dev_set_drvdata (& pdev -> dev , pispbe );
17501782 pispbe -> dev = & pdev -> dev ;
17511783 platform_set_drvdata (pdev , pispbe );
0 commit comments