88#include <linux/interrupt.h>
99#include <linux/jiffies.h>
1010#include <linux/kernel.h>
11+ #include <linux/list.h>
1112#include <linux/module.h>
1213#include <linux/of.h>
1314#include <linux/platform_device.h>
1415#include <linux/slab.h>
16+ #include <linux/spinlock.h>
1517#include <linux/types.h>
1618#include <linux/wait.h>
1719
3638
3739#define ctrlr_to_drv (ctrlr ) container_of(ctrlr, struct rsc_drv, client)
3840
41+ /**
42+ * struct cache_req: the request object for caching
43+ *
44+ * @addr: the address of the resource
45+ * @sleep_val: the sleep vote
46+ * @wake_val: the wake vote
47+ * @list: linked list obj
48+ */
49+ struct cache_req {
50+ u32 addr ;
51+ u32 sleep_val ;
52+ u32 wake_val ;
53+ struct list_head list ;
54+ };
55+
3956static struct rpmh_ctrlr * get_rpmh_ctrlr (const struct device * dev )
4057{
4158 struct rsc_drv * drv = dev_get_drvdata (dev -> parent );
@@ -60,26 +77,107 @@ void rpmh_tx_done(const struct tcs_request *msg, int r)
6077 complete (compl );
6178}
6279
80+ static struct cache_req * __find_req (struct rpmh_ctrlr * ctrlr , u32 addr )
81+ {
82+ struct cache_req * p , * req = NULL ;
83+
84+ list_for_each_entry (p , & ctrlr -> cache , list ) {
85+ if (p -> addr == addr ) {
86+ req = p ;
87+ break ;
88+ }
89+ }
90+
91+ return req ;
92+ }
93+
94+ static struct cache_req * cache_rpm_request (struct rpmh_ctrlr * ctrlr ,
95+ enum rpmh_state state ,
96+ struct tcs_cmd * cmd )
97+ {
98+ struct cache_req * req ;
99+ unsigned long flags ;
100+
101+ spin_lock_irqsave (& ctrlr -> cache_lock , flags );
102+ req = __find_req (ctrlr , cmd -> addr );
103+ if (req )
104+ goto existing ;
105+
106+ req = kzalloc (sizeof (* req ), GFP_ATOMIC );
107+ if (!req ) {
108+ req = ERR_PTR (- ENOMEM );
109+ goto unlock ;
110+ }
111+
112+ req -> addr = cmd -> addr ;
113+ req -> sleep_val = req -> wake_val = UINT_MAX ;
114+ INIT_LIST_HEAD (& req -> list );
115+ list_add_tail (& req -> list , & ctrlr -> cache );
116+
117+ existing :
118+ switch (state ) {
119+ case RPMH_ACTIVE_ONLY_STATE :
120+ if (req -> sleep_val != UINT_MAX )
121+ req -> wake_val = cmd -> data ;
122+ break ;
123+ case RPMH_WAKE_ONLY_STATE :
124+ req -> wake_val = cmd -> data ;
125+ break ;
126+ case RPMH_SLEEP_STATE :
127+ req -> sleep_val = cmd -> data ;
128+ break ;
129+ default :
130+ break ;
131+ }
132+
133+ ctrlr -> dirty = true;
134+ unlock :
135+ spin_unlock_irqrestore (& ctrlr -> cache_lock , flags );
136+
137+ return req ;
138+ }
139+
63140/**
64- * __rpmh_write: send the RPMH request
141+ * __rpmh_write: Cache and send the RPMH request
65142 *
66143 * @dev: The device making the request
67144 * @state: Active/Sleep request type
68145 * @rpm_msg: The data that needs to be sent (cmds).
146+ *
147+ * Cache the RPMH request and send if the state is ACTIVE_ONLY.
148+ * SLEEP/WAKE_ONLY requests are not sent to the controller at
149+ * this time. Use rpmh_flush() to send them to the controller.
69150 */
70151static int __rpmh_write (const struct device * dev , enum rpmh_state state ,
71152 struct rpmh_request * rpm_msg )
72153{
73154 struct rpmh_ctrlr * ctrlr = get_rpmh_ctrlr (dev );
155+ int ret = - EINVAL ;
156+ struct cache_req * req ;
157+ int i ;
74158
75159 rpm_msg -> msg .state = state ;
76160
77- if (state != RPMH_ACTIVE_ONLY_STATE )
78- return - EINVAL ;
161+ /* Cache the request in our store and link the payload */
162+ for (i = 0 ; i < rpm_msg -> msg .num_cmds ; i ++ ) {
163+ req = cache_rpm_request (ctrlr , state , & rpm_msg -> msg .cmds [i ]);
164+ if (IS_ERR (req ))
165+ return PTR_ERR (req );
166+ }
167+
168+ rpm_msg -> msg .state = state ;
79169
80- WARN_ON (irqs_disabled ());
170+ if (state == RPMH_ACTIVE_ONLY_STATE ) {
171+ WARN_ON (irqs_disabled ());
172+ ret = rpmh_rsc_send_data (ctrlr_to_drv (ctrlr ), & rpm_msg -> msg );
173+ } else {
174+ ret = rpmh_rsc_write_ctrl_data (ctrlr_to_drv (ctrlr ),
175+ & rpm_msg -> msg );
176+ /* Clean up our call by spoofing tx_done */
177+ rpmh_tx_done (& rpm_msg -> msg , ret );
178+ }
81179
82- return rpmh_rsc_send_data ( ctrlr_to_drv ( ctrlr ), & rpm_msg -> msg ) ;
180+ return ret ;
83181}
84182
85183/**
@@ -114,3 +212,96 @@ int rpmh_write(const struct device *dev, enum rpmh_state state,
114212 return (ret > 0 ) ? 0 : - ETIMEDOUT ;
115213}
116214EXPORT_SYMBOL (rpmh_write );
215+
216+ static int is_req_valid (struct cache_req * req )
217+ {
218+ return (req -> sleep_val != UINT_MAX &&
219+ req -> wake_val != UINT_MAX &&
220+ req -> sleep_val != req -> wake_val );
221+ }
222+
223+ static int send_single (const struct device * dev , enum rpmh_state state ,
224+ u32 addr , u32 data )
225+ {
226+ DEFINE_RPMH_MSG_ONSTACK (dev , state , NULL , rpm_msg );
227+ struct rpmh_ctrlr * ctrlr = get_rpmh_ctrlr (dev );
228+
229+ /* Wake sets are always complete and sleep sets are not */
230+ rpm_msg .msg .wait_for_compl = (state == RPMH_WAKE_ONLY_STATE );
231+ rpm_msg .cmd [0 ].addr = addr ;
232+ rpm_msg .cmd [0 ].data = data ;
233+ rpm_msg .msg .num_cmds = 1 ;
234+
235+ return rpmh_rsc_write_ctrl_data (ctrlr_to_drv (ctrlr ), & rpm_msg .msg );
236+ }
237+
238+ /**
239+ * rpmh_flush: Flushes the buffered active and sleep sets to TCS
240+ *
241+ * @dev: The device making the request
242+ *
243+ * Return: -EBUSY if the controller is busy, probably waiting on a response
244+ * to a RPMH request sent earlier.
245+ *
246+ * This function is always called from the sleep code from the last CPU
247+ * that is powering down the entire system. Since no other RPMH API would be
248+ * executing at this time, it is safe to run lockless.
249+ */
250+ int rpmh_flush (const struct device * dev )
251+ {
252+ struct cache_req * p ;
253+ struct rpmh_ctrlr * ctrlr = get_rpmh_ctrlr (dev );
254+ int ret ;
255+
256+ if (!ctrlr -> dirty ) {
257+ pr_debug ("Skipping flush, TCS has latest data.\n" );
258+ return 0 ;
259+ }
260+
261+ /*
262+ * Nobody else should be calling this function other than system PM,
263+ * hence we can run without locks.
264+ */
265+ list_for_each_entry (p , & ctrlr -> cache , list ) {
266+ if (!is_req_valid (p )) {
267+ pr_debug ("%s: skipping RPMH req: a:%#x s:%#x w:%#x" ,
268+ __func__ , p -> addr , p -> sleep_val , p -> wake_val );
269+ continue ;
270+ }
271+ ret = send_single (dev , RPMH_SLEEP_STATE , p -> addr , p -> sleep_val );
272+ if (ret )
273+ return ret ;
274+ ret = send_single (dev , RPMH_WAKE_ONLY_STATE ,
275+ p -> addr , p -> wake_val );
276+ if (ret )
277+ return ret ;
278+ }
279+
280+ ctrlr -> dirty = false;
281+
282+ return 0 ;
283+ }
284+ EXPORT_SYMBOL (rpmh_flush );
285+
286+ /**
287+ * rpmh_invalidate: Invalidate all sleep and active sets
288+ * sets.
289+ *
290+ * @dev: The device making the request
291+ *
292+ * Invalidate the sleep and active values in the TCS blocks.
293+ */
294+ int rpmh_invalidate (const struct device * dev )
295+ {
296+ struct rpmh_ctrlr * ctrlr = get_rpmh_ctrlr (dev );
297+ int ret ;
298+
299+ ctrlr -> dirty = true;
300+
301+ do {
302+ ret = rpmh_rsc_invalidate (ctrlr_to_drv (ctrlr ));
303+ } while (ret == - EAGAIN );
304+
305+ return ret ;
306+ }
307+ EXPORT_SYMBOL (rpmh_invalidate );
0 commit comments