@@ -54,6 +54,20 @@ struct cache_req {
5454 struct list_head list ;
5555};
5656
57+ /**
58+ * struct batch_cache_req - An entry in our batch catch
59+ *
60+ * @list: linked list obj
61+ * @count: number of messages
62+ * @rpm_msgs: the messages
63+ */
64+
65+ struct batch_cache_req {
66+ struct list_head list ;
67+ int count ;
68+ struct rpmh_request rpm_msgs [];
69+ };
70+
5771static struct rpmh_ctrlr * get_rpmh_ctrlr (const struct device * dev )
5872{
5973 struct rsc_drv * drv = dev_get_drvdata (dev -> parent );
@@ -73,10 +87,13 @@ void rpmh_tx_done(const struct tcs_request *msg, int r)
7387 dev_err (rpm_msg -> dev , "RPMH TX fail in msg addr=%#x, err=%d\n" ,
7488 rpm_msg -> msg .cmds [0 ].addr , r );
7589
90+ if (!compl )
91+ goto exit ;
92+
7693 /* Signal the blocking thread we are done */
77- if (compl )
78- complete (compl );
94+ complete (compl );
7995
96+ exit :
8097 if (rpm_msg -> needs_free )
8198 kfree (rpm_msg );
8299}
@@ -264,6 +281,138 @@ int rpmh_write(const struct device *dev, enum rpmh_state state,
264281}
265282EXPORT_SYMBOL (rpmh_write );
266283
284+ static void cache_batch (struct rpmh_ctrlr * ctrlr , struct batch_cache_req * req )
285+ {
286+ unsigned long flags ;
287+
288+ spin_lock_irqsave (& ctrlr -> cache_lock , flags );
289+ list_add_tail (& req -> list , & ctrlr -> batch_cache );
290+ spin_unlock_irqrestore (& ctrlr -> cache_lock , flags );
291+ }
292+
293+ static int flush_batch (struct rpmh_ctrlr * ctrlr )
294+ {
295+ struct batch_cache_req * req ;
296+ const struct rpmh_request * rpm_msg ;
297+ unsigned long flags ;
298+ int ret = 0 ;
299+ int i ;
300+
301+ /* Send Sleep/Wake requests to the controller, expect no response */
302+ spin_lock_irqsave (& ctrlr -> cache_lock , flags );
303+ list_for_each_entry (req , & ctrlr -> batch_cache , list ) {
304+ for (i = 0 ; i < req -> count ; i ++ ) {
305+ rpm_msg = req -> rpm_msgs + i ;
306+ ret = rpmh_rsc_write_ctrl_data (ctrlr_to_drv (ctrlr ),
307+ & rpm_msg -> msg );
308+ if (ret )
309+ break ;
310+ }
311+ }
312+ spin_unlock_irqrestore (& ctrlr -> cache_lock , flags );
313+
314+ return ret ;
315+ }
316+
317+ static void invalidate_batch (struct rpmh_ctrlr * ctrlr )
318+ {
319+ struct batch_cache_req * req , * tmp ;
320+ unsigned long flags ;
321+
322+ spin_lock_irqsave (& ctrlr -> cache_lock , flags );
323+ list_for_each_entry_safe (req , tmp , & ctrlr -> batch_cache , list )
324+ kfree (req );
325+ INIT_LIST_HEAD (& ctrlr -> batch_cache );
326+ spin_unlock_irqrestore (& ctrlr -> cache_lock , flags );
327+ }
328+
329+ /**
330+ * rpmh_write_batch: Write multiple sets of RPMH commands and wait for the
331+ * batch to finish.
332+ *
333+ * @dev: the device making the request
334+ * @state: Active/sleep set
335+ * @cmd: The payload data
336+ * @n: The array of count of elements in each batch, 0 terminated.
337+ *
338+ * Write a request to the RSC controller without caching. If the request
339+ * state is ACTIVE, then the requests are treated as completion request
340+ * and sent to the controller immediately. The function waits until all the
341+ * commands are complete. If the request was to SLEEP or WAKE_ONLY, then the
342+ * request is sent as fire-n-forget and no ack is expected.
343+ *
344+ * May sleep. Do not call from atomic contexts for ACTIVE_ONLY requests.
345+ */
346+ int rpmh_write_batch (const struct device * dev , enum rpmh_state state ,
347+ const struct tcs_cmd * cmd , u32 * n )
348+ {
349+ struct batch_cache_req * req ;
350+ struct rpmh_request * rpm_msgs ;
351+ DECLARE_COMPLETION_ONSTACK (compl );
352+ struct rpmh_ctrlr * ctrlr = get_rpmh_ctrlr (dev );
353+ unsigned long time_left ;
354+ int count = 0 ;
355+ int ret , i , j ;
356+
357+ if (!cmd || !n )
358+ return - EINVAL ;
359+
360+ while (n [count ] > 0 )
361+ count ++ ;
362+ if (!count )
363+ return - EINVAL ;
364+
365+ req = kzalloc (sizeof (* req ) + count * sizeof (req -> rpm_msgs [0 ]),
366+ GFP_ATOMIC );
367+ if (!req )
368+ return - ENOMEM ;
369+ req -> count = count ;
370+ rpm_msgs = req -> rpm_msgs ;
371+
372+ for (i = 0 ; i < count ; i ++ ) {
373+ __fill_rpmh_msg (rpm_msgs + i , state , cmd , n [i ]);
374+ cmd += n [i ];
375+ }
376+
377+ if (state != RPMH_ACTIVE_ONLY_STATE ) {
378+ cache_batch (ctrlr , req );
379+ return 0 ;
380+ }
381+
382+ for (i = 0 ; i < count ; i ++ ) {
383+ rpm_msgs [i ].completion = & compl ;
384+ ret = rpmh_rsc_send_data (ctrlr_to_drv (ctrlr ), & rpm_msgs [i ].msg );
385+ if (ret ) {
386+ pr_err ("Error(%d) sending RPMH message addr=%#x\n" ,
387+ ret , rpm_msgs [i ].msg .cmds [0 ].addr );
388+ for (j = i ; j < count ; j ++ )
389+ rpmh_tx_done (& rpm_msgs [j ].msg , ret );
390+ break ;
391+ }
392+ }
393+
394+ time_left = RPMH_TIMEOUT_MS ;
395+ for (i = 0 ; i < count ; i ++ ) {
396+ time_left = wait_for_completion_timeout (& compl , time_left );
397+ if (!time_left ) {
398+ /*
399+ * Better hope they never finish because they'll signal
400+ * the completion on our stack and that's bad once
401+ * we've returned from the function.
402+ */
403+ WARN_ON (1 );
404+ ret = - ETIMEDOUT ;
405+ goto exit ;
406+ }
407+ }
408+
409+ exit :
410+ kfree (req );
411+
412+ return ret ;
413+ }
414+ EXPORT_SYMBOL (rpmh_write_batch );
415+
267416static int is_req_valid (struct cache_req * req )
268417{
269418 return (req -> sleep_val != UINT_MAX &&
@@ -309,6 +458,11 @@ int rpmh_flush(const struct device *dev)
309458 return 0 ;
310459 }
311460
461+ /* First flush the cached batch requests */
462+ ret = flush_batch (ctrlr );
463+ if (ret )
464+ return ret ;
465+
312466 /*
313467 * Nobody else should be calling this function other than system PM,
314468 * hence we can run without locks.
@@ -347,6 +501,7 @@ int rpmh_invalidate(const struct device *dev)
347501 struct rpmh_ctrlr * ctrlr = get_rpmh_ctrlr (dev );
348502 int ret ;
349503
504+ invalidate_batch (ctrlr );
350505 ctrlr -> dirty = true;
351506
352507 do {
0 commit comments