@@ -12,6 +12,7 @@
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
+#include <linux/psci.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/types.h>
@@ -113,13 +114,14 @@ static struct cache_req *__find_req(struct rpmh_ctrlr *ctrlr, u32 addr)
return req;
}
-static struct cache_req *cache_rpm_request(struct rpmh_ctrlr *ctrlr,
+static int cache_rpm_request(struct rpmh_ctrlr *ctrlr,
enum rpmh_state state,
struct tcs_cmd *cmd)
{
struct cache_req *req;
unsigned long flags;
u32 old_sleep_val, old_wake_val;
+ int ret = 0;
spin_lock_irqsave(&ctrlr->cache_lock, flags);
req = __find_req(ctrlr, cmd->addr);
@@ -155,10 +157,13 @@ static struct cache_req *cache_rpm_request(struct rpmh_ctrlr *ctrlr,
req->sleep_val != UINT_MAX &&
req->wake_val != UINT_MAX;
+ if (ctrlr->dirty && !psci_has_osi_support())
+ ret = rpmh_flush(ctrlr);
+
unlock:
spin_unlock_irqrestore(&ctrlr->cache_lock, flags);
- return req;
+ return ret;
}
/**
@@ -176,17 +181,16 @@ static int __rpmh_write(const struct device *dev, enum rpmh_state state,
struct rpmh_request *rpm_msg)
{
struct rpmh_ctrlr *ctrlr = get_rpmh_ctrlr(dev);
- int ret = -EINVAL;
- struct cache_req *req;
+ int ret;
int i;
rpm_msg->msg.state = state;
/* Cache the request in our store and link the payload */
for (i = 0; i < rpm_msg->msg.num_cmds; i++) {
- req = cache_rpm_request(ctrlr, state, &rpm_msg->msg.cmds[i]);
- if (IS_ERR(req))
- return PTR_ERR(req);
+ ret = cache_rpm_request(ctrlr, state, &rpm_msg->msg.cmds[i]);
+ if (ret)
+ return ret;
}
rpm_msg->msg.state = state;
@@ -283,26 +287,32 @@ int rpmh_write(const struct device *dev, enum rpmh_state state,
}
EXPORT_SYMBOL(rpmh_write);
-static void cache_batch(struct rpmh_ctrlr *ctrlr, struct batch_cache_req *req)
+static int cache_batch(struct rpmh_ctrlr *ctrlr, struct batch_cache_req *req)
{
unsigned long flags;
+ int ret = 0;
spin_lock_irqsave(&ctrlr->cache_lock, flags);
+
list_add_tail(&req->list, &ctrlr->batch_cache);
ctrlr->dirty = true;
+
+ if (!psci_has_osi_support())
+ ret = rpmh_flush(ctrlr);
+
spin_unlock_irqrestore(&ctrlr->cache_lock, flags);
+
+ return ret;
}
static int flush_batch(struct rpmh_ctrlr *ctrlr)
{
struct batch_cache_req *req;
const struct rpmh_request *rpm_msg;
- unsigned long flags;
int ret = 0;
int i;
/* Send Sleep/Wake requests to the controller, expect no response */
- spin_lock_irqsave(&ctrlr->cache_lock, flags);
list_for_each_entry(req, &ctrlr->batch_cache, list) {
for (i = 0; i < req->count; i++) {
rpm_msg = req->rpm_msgs + i;
@@ -312,7 +322,6 @@ static int flush_batch(struct rpmh_ctrlr *ctrlr)
break;
}
}
- spin_unlock_irqrestore(&ctrlr->cache_lock, flags);
return ret;
}
@@ -384,10 +393,8 @@ int rpmh_write_batch(const struct device *dev, enum rpmh_state state,
cmd += n[i];
}
- if (state != RPMH_ACTIVE_ONLY_STATE) {
- cache_batch(ctrlr, req);
- return 0;
- }
+ if (state != RPMH_ACTIVE_ONLY_STATE)
+ return cache_batch(ctrlr, req);
for (i = 0; i < count; i++) {
struct completion *compl = &compls[i];
@@ -450,12 +457,10 @@ static int send_single(struct rpmh_ctrlr *ctrlr, enum rpmh_state state,
*
* @ctrlr: controller making request to flush cached data
*
- * Return: -EBUSY if the controller is busy, probably waiting on a response
- * to a RPMH request sent earlier.
+ * Return: 0 on success, error number otherwise.
*
- * This function is always called from the sleep code from the last CPU
- * that is powering down the entire system. Since no other RPMH API would be
- * executing at this time, it is safe to run lockless.
+ * This function can either be called from sleep code on the last CPU
+ * (thus no spinlock needed) or with the ctrlr->cache_lock already held.
*/
int rpmh_flush(struct rpmh_ctrlr *ctrlr)
{
@@ -472,10 +477,6 @@ int rpmh_flush(struct rpmh_ctrlr *ctrlr)
if (ret)
return ret;
- /*
- * Nobody else should be calling this function other than system PM,
- * hence we can run without locks.
- */
list_for_each_entry(p, &ctrlr->cache, list) {
if (!is_req_valid(p)) {
pr_debug("%s: skipping RPMH req: a:%#x s:%#x w:%#x",