Commit a4207a1c authored by Olof Johansson's avatar Olof Johansson
Browse files

Merge tag 'scmi-fixes-5.4' of...

Merge tag 'scmi-fixes-5.4' of git://git.kernel.org/pub/scm/linux/kernel/git/sudeep.holla/linux into arm/fixes

ARM SCMI fixes for v5.4

Couple of fixes: one in scmi reset driver initialising missed scmi handle
and an other in scmi reset API implementation fixing the assignment of
reset state

* tag 'scmi-fixes-5.4' of git://git.kernel.org/pub/scm/linux/kernel/git/sudeep.holla/linux:
  reset: reset-scmi: add missing handle initialisation
  firmware: arm_scmi: reset: fix reset_state assignment in scmi_domain_reset

Link: https://lore.kernel.org/r/20190918142139.GA4370@bogus

Signed-off-by: default avatarOlof Johansson <olof@lixom.net>
parents b74d957f 61423712
...@@ -73,6 +73,16 @@ Required properties: ...@@ -73,6 +73,16 @@ Required properties:
as used by the firmware. Refer to platform details as used by the firmware. Refer to platform details
for your implementation for the IDs to use. for your implementation for the IDs to use.
Reset signal bindings for the reset domains based on SCMI Message Protocol
------------------------------------------------------------
This binding for the SCMI reset domain providers uses the generic reset
signal binding[5].
Required properties:
- #reset-cells : Should be 1. Contains the reset domain ID value used
by SCMI commands.
SRAM and Shared Memory for SCMI SRAM and Shared Memory for SCMI
------------------------------- -------------------------------
...@@ -93,6 +103,7 @@ Required sub-node properties: ...@@ -93,6 +103,7 @@ Required sub-node properties:
[2] Documentation/devicetree/bindings/power/power_domain.txt [2] Documentation/devicetree/bindings/power/power_domain.txt
[3] Documentation/devicetree/bindings/thermal/thermal.txt [3] Documentation/devicetree/bindings/thermal/thermal.txt
[4] Documentation/devicetree/bindings/sram/sram.txt [4] Documentation/devicetree/bindings/sram/sram.txt
[5] Documentation/devicetree/bindings/reset/reset.txt
Example: Example:
...@@ -152,6 +163,11 @@ firmware { ...@@ -152,6 +163,11 @@ firmware {
reg = <0x15>; reg = <0x15>;
#thermal-sensor-cells = <1>; #thermal-sensor-cells = <1>;
}; };
scmi_reset: protocol@16 {
reg = <0x16>;
#reset-cells = <1>;
};
}; };
}; };
...@@ -166,6 +182,7 @@ hdlcd@7ff60000 { ...@@ -166,6 +182,7 @@ hdlcd@7ff60000 {
reg = <0 0x7ff60000 0 0x1000>; reg = <0 0x7ff60000 0 0x1000>;
clocks = <&scmi_clk 4>; clocks = <&scmi_clk 4>;
power-domains = <&scmi_devpd 1>; power-domains = <&scmi_devpd 1>;
resets = <&scmi_reset 10>;
}; };
thermal-zones { thermal-zones {
......
...@@ -15575,6 +15575,7 @@ F: drivers/clk/clk-sc[mp]i.c ...@@ -15575,6 +15575,7 @@ F: drivers/clk/clk-sc[mp]i.c
F: drivers/cpufreq/sc[mp]i-cpufreq.c F: drivers/cpufreq/sc[mp]i-cpufreq.c
F: drivers/firmware/arm_scpi.c F: drivers/firmware/arm_scpi.c
F: drivers/firmware/arm_scmi/ F: drivers/firmware/arm_scmi/
F: drivers/reset/reset-scmi.c
F: include/linux/sc[mp]i_protocol.h F: include/linux/sc[mp]i_protocol.h
SYSTEM RESET/SHUTDOWN DRIVERS SYSTEM RESET/SHUTDOWN DRIVERS
......
...@@ -69,7 +69,7 @@ static int scmi_clk_set_rate(struct clk_hw *hw, unsigned long rate, ...@@ -69,7 +69,7 @@ static int scmi_clk_set_rate(struct clk_hw *hw, unsigned long rate,
{ {
struct scmi_clk *clk = to_scmi_clk(hw); struct scmi_clk *clk = to_scmi_clk(hw);
return clk->handle->clk_ops->rate_set(clk->handle, clk->id, 0, rate); return clk->handle->clk_ops->rate_set(clk->handle, clk->id, rate);
} }
static int scmi_clk_enable(struct clk_hw *hw) static int scmi_clk_enable(struct clk_hw *hw)
......
...@@ -2,5 +2,5 @@ ...@@ -2,5 +2,5 @@
obj-y = scmi-bus.o scmi-driver.o scmi-protocols.o obj-y = scmi-bus.o scmi-driver.o scmi-protocols.o
scmi-bus-y = bus.o scmi-bus-y = bus.o
scmi-driver-y = driver.o scmi-driver-y = driver.o
scmi-protocols-y = base.o clock.o perf.o power.o sensors.o scmi-protocols-y = base.o clock.o perf.o power.o reset.o sensors.o
obj-$(CONFIG_ARM_SCMI_POWER_DOMAIN) += scmi_pm_domain.o obj-$(CONFIG_ARM_SCMI_POWER_DOMAIN) += scmi_pm_domain.o
...@@ -204,7 +204,7 @@ static int scmi_base_discover_agent_get(const struct scmi_handle *handle, ...@@ -204,7 +204,7 @@ static int scmi_base_discover_agent_get(const struct scmi_handle *handle,
if (ret) if (ret)
return ret; return ret;
*(__le32 *)t->tx.buf = cpu_to_le32(id); put_unaligned_le32(id, t->tx.buf);
ret = scmi_do_xfer(handle, t); ret = scmi_do_xfer(handle, t);
if (!ret) if (!ret)
......
...@@ -56,7 +56,7 @@ struct scmi_msg_resp_clock_describe_rates { ...@@ -56,7 +56,7 @@ struct scmi_msg_resp_clock_describe_rates {
struct scmi_clock_set_rate { struct scmi_clock_set_rate {
__le32 flags; __le32 flags;
#define CLOCK_SET_ASYNC BIT(0) #define CLOCK_SET_ASYNC BIT(0)
#define CLOCK_SET_DELAYED BIT(1) #define CLOCK_SET_IGNORE_RESP BIT(1)
#define CLOCK_SET_ROUND_UP BIT(2) #define CLOCK_SET_ROUND_UP BIT(2)
#define CLOCK_SET_ROUND_AUTO BIT(3) #define CLOCK_SET_ROUND_AUTO BIT(3)
__le32 id; __le32 id;
...@@ -67,6 +67,7 @@ struct scmi_clock_set_rate { ...@@ -67,6 +67,7 @@ struct scmi_clock_set_rate {
struct clock_info { struct clock_info {
int num_clocks; int num_clocks;
int max_async_req; int max_async_req;
atomic_t cur_async_req;
struct scmi_clock_info *clk; struct scmi_clock_info *clk;
}; };
...@@ -106,7 +107,7 @@ static int scmi_clock_attributes_get(const struct scmi_handle *handle, ...@@ -106,7 +107,7 @@ static int scmi_clock_attributes_get(const struct scmi_handle *handle,
if (ret) if (ret)
return ret; return ret;
*(__le32 *)t->tx.buf = cpu_to_le32(clk_id); put_unaligned_le32(clk_id, t->tx.buf);
attr = t->rx.buf; attr = t->rx.buf;
ret = scmi_do_xfer(handle, t); ret = scmi_do_xfer(handle, t);
...@@ -203,39 +204,47 @@ scmi_clock_rate_get(const struct scmi_handle *handle, u32 clk_id, u64 *value) ...@@ -203,39 +204,47 @@ scmi_clock_rate_get(const struct scmi_handle *handle, u32 clk_id, u64 *value)
if (ret) if (ret)
return ret; return ret;
*(__le32 *)t->tx.buf = cpu_to_le32(clk_id); put_unaligned_le32(clk_id, t->tx.buf);
ret = scmi_do_xfer(handle, t); ret = scmi_do_xfer(handle, t);
if (!ret) { if (!ret)
__le32 *pval = t->rx.buf; *value = get_unaligned_le64(t->rx.buf);
*value = le32_to_cpu(*pval);
*value |= (u64)le32_to_cpu(*(pval + 1)) << 32;
}
scmi_xfer_put(handle, t); scmi_xfer_put(handle, t);
return ret; return ret;
} }
static int scmi_clock_rate_set(const struct scmi_handle *handle, u32 clk_id, static int scmi_clock_rate_set(const struct scmi_handle *handle, u32 clk_id,
u32 config, u64 rate) u64 rate)
{ {
int ret; int ret;
u32 flags = 0;
struct scmi_xfer *t; struct scmi_xfer *t;
struct scmi_clock_set_rate *cfg; struct scmi_clock_set_rate *cfg;
struct clock_info *ci = handle->clk_priv;
ret = scmi_xfer_get_init(handle, CLOCK_RATE_SET, SCMI_PROTOCOL_CLOCK, ret = scmi_xfer_get_init(handle, CLOCK_RATE_SET, SCMI_PROTOCOL_CLOCK,
sizeof(*cfg), 0, &t); sizeof(*cfg), 0, &t);
if (ret) if (ret)
return ret; return ret;
if (ci->max_async_req &&
atomic_inc_return(&ci->cur_async_req) < ci->max_async_req)
flags |= CLOCK_SET_ASYNC;
cfg = t->tx.buf; cfg = t->tx.buf;
cfg->flags = cpu_to_le32(config); cfg->flags = cpu_to_le32(flags);
cfg->id = cpu_to_le32(clk_id); cfg->id = cpu_to_le32(clk_id);
cfg->value_low = cpu_to_le32(rate & 0xffffffff); cfg->value_low = cpu_to_le32(rate & 0xffffffff);
cfg->value_high = cpu_to_le32(rate >> 32); cfg->value_high = cpu_to_le32(rate >> 32);
ret = scmi_do_xfer(handle, t); if (flags & CLOCK_SET_ASYNC)
ret = scmi_do_xfer_with_response(handle, t);
else
ret = scmi_do_xfer(handle, t);
if (ci->max_async_req)
atomic_dec(&ci->cur_async_req);
scmi_xfer_put(handle, t); scmi_xfer_put(handle, t);
return ret; return ret;
......
...@@ -15,6 +15,8 @@ ...@@ -15,6 +15,8 @@
#include <linux/scmi_protocol.h> #include <linux/scmi_protocol.h>
#include <linux/types.h> #include <linux/types.h>
#include <asm/unaligned.h>
#define PROTOCOL_REV_MINOR_MASK GENMASK(15, 0) #define PROTOCOL_REV_MINOR_MASK GENMASK(15, 0)
#define PROTOCOL_REV_MAJOR_MASK GENMASK(31, 16) #define PROTOCOL_REV_MAJOR_MASK GENMASK(31, 16)
#define PROTOCOL_REV_MAJOR(x) (u16)(FIELD_GET(PROTOCOL_REV_MAJOR_MASK, (x))) #define PROTOCOL_REV_MAJOR(x) (u16)(FIELD_GET(PROTOCOL_REV_MAJOR_MASK, (x)))
...@@ -48,11 +50,11 @@ struct scmi_msg_resp_prot_version { ...@@ -48,11 +50,11 @@ struct scmi_msg_resp_prot_version {
/** /**
* struct scmi_msg_hdr - Message(Tx/Rx) header * struct scmi_msg_hdr - Message(Tx/Rx) header
* *
* @id: The identifier of the command being sent * @id: The identifier of the message being sent
* @protocol_id: The identifier of the protocol used to send @id command * @protocol_id: The identifier of the protocol used to send @id message
* @seq: The token to identify the message. when a message/command returns, * @seq: The token to identify the message. When a message returns, the
* the platform returns the whole message header unmodified including * platform returns the whole message header unmodified including the
* the token * token
* @status: Status of the transfer once it's complete * @status: Status of the transfer once it's complete
* @poll_completion: Indicate if the transfer needs to be polled for * @poll_completion: Indicate if the transfer needs to be polled for
* completion or interrupt mode is used * completion or interrupt mode is used
...@@ -84,17 +86,21 @@ struct scmi_msg { ...@@ -84,17 +86,21 @@ struct scmi_msg {
* @rx: Receive message, the buffer should be pre-allocated to store * @rx: Receive message, the buffer should be pre-allocated to store
* message. If request-ACK protocol is used, we can reuse the same * message. If request-ACK protocol is used, we can reuse the same
* buffer for the rx path as we use for the tx path. * buffer for the rx path as we use for the tx path.
* @done: completion event * @done: command message transmit completion event
* @async: pointer to delayed response message received event completion
*/ */
struct scmi_xfer { struct scmi_xfer {
struct scmi_msg_hdr hdr; struct scmi_msg_hdr hdr;
struct scmi_msg tx; struct scmi_msg tx;
struct scmi_msg rx; struct scmi_msg rx;
struct completion done; struct completion done;
struct completion *async_done;
}; };
void scmi_xfer_put(const struct scmi_handle *h, struct scmi_xfer *xfer); void scmi_xfer_put(const struct scmi_handle *h, struct scmi_xfer *xfer);
int scmi_do_xfer(const struct scmi_handle *h, struct scmi_xfer *xfer); int scmi_do_xfer(const struct scmi_handle *h, struct scmi_xfer *xfer);
int scmi_do_xfer_with_response(const struct scmi_handle *h,
struct scmi_xfer *xfer);
int scmi_xfer_get_init(const struct scmi_handle *h, u8 msg_id, u8 prot_id, int scmi_xfer_get_init(const struct scmi_handle *h, u8 msg_id, u8 prot_id,
size_t tx_size, size_t rx_size, struct scmi_xfer **p); size_t tx_size, size_t rx_size, struct scmi_xfer **p);
int scmi_handle_put(const struct scmi_handle *handle); int scmi_handle_put(const struct scmi_handle *handle);
......
...@@ -30,8 +30,14 @@ ...@@ -30,8 +30,14 @@
#include "common.h" #include "common.h"
#define MSG_ID_MASK GENMASK(7, 0) #define MSG_ID_MASK GENMASK(7, 0)
#define MSG_XTRACT_ID(hdr) FIELD_GET(MSG_ID_MASK, (hdr))
#define MSG_TYPE_MASK GENMASK(9, 8) #define MSG_TYPE_MASK GENMASK(9, 8)
#define MSG_XTRACT_TYPE(hdr) FIELD_GET(MSG_TYPE_MASK, (hdr))
#define MSG_TYPE_COMMAND 0
#define MSG_TYPE_DELAYED_RESP 2
#define MSG_TYPE_NOTIFICATION 3
#define MSG_PROTOCOL_ID_MASK GENMASK(17, 10) #define MSG_PROTOCOL_ID_MASK GENMASK(17, 10)
#define MSG_XTRACT_PROT_ID(hdr) FIELD_GET(MSG_PROTOCOL_ID_MASK, (hdr))
#define MSG_TOKEN_ID_MASK GENMASK(27, 18) #define MSG_TOKEN_ID_MASK GENMASK(27, 18)
#define MSG_XTRACT_TOKEN(hdr) FIELD_GET(MSG_TOKEN_ID_MASK, (hdr)) #define MSG_XTRACT_TOKEN(hdr) FIELD_GET(MSG_TOKEN_ID_MASK, (hdr))
#define MSG_TOKEN_MAX (MSG_XTRACT_TOKEN(MSG_TOKEN_ID_MASK) + 1) #define MSG_TOKEN_MAX (MSG_XTRACT_TOKEN(MSG_TOKEN_ID_MASK) + 1)
...@@ -86,7 +92,7 @@ struct scmi_desc { ...@@ -86,7 +92,7 @@ struct scmi_desc {
}; };
/** /**
* struct scmi_chan_info - Structure representing a SCMI channel informfation * struct scmi_chan_info - Structure representing a SCMI channel information
* *
* @cl: Mailbox Client * @cl: Mailbox Client
* @chan: Transmit/Receive mailbox channel * @chan: Transmit/Receive mailbox channel
...@@ -111,8 +117,9 @@ struct scmi_chan_info { ...@@ -111,8 +117,9 @@ struct scmi_chan_info {
* @handle: Instance of SCMI handle to send to clients * @handle: Instance of SCMI handle to send to clients
* @version: SCMI revision information containing protocol version, * @version: SCMI revision information containing protocol version,
* implementation version and (sub-)vendor identification. * implementation version and (sub-)vendor identification.
* @minfo: Message info * @tx_minfo: Universal Transmit Message management info
* @tx_idr: IDR object to map protocol id to channel info pointer * @tx_idr: IDR object to map protocol id to Tx channel info pointer
* @rx_idr: IDR object to map protocol id to Rx channel info pointer
* @protocols_imp: List of protocols implemented, currently maximum of * @protocols_imp: List of protocols implemented, currently maximum of
* MAX_PROTOCOLS_IMP elements allocated by the base protocol * MAX_PROTOCOLS_IMP elements allocated by the base protocol
* @node: List head * @node: List head
...@@ -123,8 +130,9 @@ struct scmi_info { ...@@ -123,8 +130,9 @@ struct scmi_info {
const struct scmi_desc *desc; const struct scmi_desc *desc;
struct scmi_revision_info version; struct scmi_revision_info version;
struct scmi_handle handle; struct scmi_handle handle;
struct scmi_xfers_info minfo; struct scmi_xfers_info tx_minfo;
struct idr tx_idr; struct idr tx_idr;
struct idr rx_idr;
u8 *protocols_imp; u8 *protocols_imp;
struct list_head node; struct list_head node;
int users; int users;
...@@ -182,7 +190,7 @@ static inline int scmi_to_linux_errno(int errno) ...@@ -182,7 +190,7 @@ static inline int scmi_to_linux_errno(int errno)
static inline void scmi_dump_header_dbg(struct device *dev, static inline void scmi_dump_header_dbg(struct device *dev,
struct scmi_msg_hdr *hdr) struct scmi_msg_hdr *hdr)
{ {
dev_dbg(dev, "Command ID: %x Sequence ID: %x Protocol: %x\n", dev_dbg(dev, "Message ID: %x Sequence ID: %x Protocol: %x\n",
hdr->id, hdr->seq, hdr->protocol_id); hdr->id, hdr->seq, hdr->protocol_id);
} }
...@@ -190,64 +198,20 @@ static void scmi_fetch_response(struct scmi_xfer *xfer, ...@@ -190,64 +198,20 @@ static void scmi_fetch_response(struct scmi_xfer *xfer,
struct scmi_shared_mem __iomem *mem) struct scmi_shared_mem __iomem *mem)
{ {
xfer->hdr.status = ioread32(mem->msg_payload); xfer->hdr.status = ioread32(mem->msg_payload);
/* Skip the length of header and statues in payload area i.e 8 bytes*/ /* Skip the length of header and status in payload area i.e 8 bytes */
xfer->rx.len = min_t(size_t, xfer->rx.len, ioread32(&mem->length) - 8); xfer->rx.len = min_t(size_t, xfer->rx.len, ioread32(&mem->length) - 8);
/* Take a copy to the rx buffer.. */ /* Take a copy to the rx buffer.. */
memcpy_fromio(xfer->rx.buf, mem->msg_payload + 4, xfer->rx.len); memcpy_fromio(xfer->rx.buf, mem->msg_payload + 4, xfer->rx.len);
} }
/**
* scmi_rx_callback() - mailbox client callback for receive messages
*
* @cl: client pointer
* @m: mailbox message
*
* Processes one received message to appropriate transfer information and
* signals completion of the transfer.
*
* NOTE: This function will be invoked in IRQ context, hence should be
* as optimal as possible.
*/
static void scmi_rx_callback(struct mbox_client *cl, void *m)
{
u16 xfer_id;
struct scmi_xfer *xfer;
struct scmi_chan_info *cinfo = client_to_scmi_chan_info(cl);
struct device *dev = cinfo->dev;
struct scmi_info *info = handle_to_scmi_info(cinfo->handle);
struct scmi_xfers_info *minfo = &info->minfo;
struct scmi_shared_mem __iomem *mem = cinfo->payload;
xfer_id = MSG_XTRACT_TOKEN(ioread32(&mem->msg_header));
/* Are we even expecting this? */
if (!test_bit(xfer_id, minfo->xfer_alloc_table)) {
dev_err(dev, "message for %d is not expected!\n", xfer_id);
return;
}
xfer = &minfo->xfer_block[xfer_id];
scmi_dump_header_dbg(dev, &xfer->hdr);
/* Is the message of valid length? */
if (xfer->rx.len > info->desc->max_msg_size) {
dev_err(dev, "unable to handle %zu xfer(max %d)\n",
xfer->rx.len, info->desc->max_msg_size);
return;
}
scmi_fetch_response(xfer, mem);
complete(&xfer->done);
}
/** /**
* pack_scmi_header() - packs and returns 32-bit header * pack_scmi_header() - packs and returns 32-bit header
* *
* @hdr: pointer to header containing all the information on message id, * @hdr: pointer to header containing all the information on message id,
* protocol id and sequence id. * protocol id and sequence id.
* *
* Return: 32-bit packed command header to be sent to the platform. * Return: 32-bit packed message header to be sent to the platform.
*/ */
static inline u32 pack_scmi_header(struct scmi_msg_hdr *hdr) static inline u32 pack_scmi_header(struct scmi_msg_hdr *hdr)
{ {
...@@ -256,6 +220,18 @@ static inline u32 pack_scmi_header(struct scmi_msg_hdr *hdr) ...@@ -256,6 +220,18 @@ static inline u32 pack_scmi_header(struct scmi_msg_hdr *hdr)
FIELD_PREP(MSG_PROTOCOL_ID_MASK, hdr->protocol_id); FIELD_PREP(MSG_PROTOCOL_ID_MASK, hdr->protocol_id);
} }
/**
* unpack_scmi_header() - unpacks and records message and protocol id
*
* @msg_hdr: 32-bit packed message header sent from the platform
* @hdr: pointer to header to fetch message and protocol id.
*/
static inline void unpack_scmi_header(u32 msg_hdr, struct scmi_msg_hdr *hdr)
{
hdr->id = MSG_XTRACT_ID(msg_hdr);
hdr->protocol_id = MSG_XTRACT_PROT_ID(msg_hdr);
}
/** /**
* scmi_tx_prepare() - mailbox client callback to prepare for the transfer * scmi_tx_prepare() - mailbox client callback to prepare for the transfer
* *
...@@ -271,6 +247,14 @@ static void scmi_tx_prepare(struct mbox_client *cl, void *m) ...@@ -271,6 +247,14 @@ static void scmi_tx_prepare(struct mbox_client *cl, void *m)
struct scmi_chan_info *cinfo = client_to_scmi_chan_info(cl); struct scmi_chan_info *cinfo = client_to_scmi_chan_info(cl);
struct scmi_shared_mem __iomem *mem = cinfo->payload; struct scmi_shared_mem __iomem *mem = cinfo->payload;
/*
* Ideally channel must be free by now unless OS timeout last
* request and platform continued to process the same, wait
* until it releases the shared memory, otherwise we may endup
* overwriting its response with new message payload or vice-versa
*/
spin_until_cond(ioread32(&mem->channel_status) &
SCMI_SHMEM_CHAN_STAT_CHANNEL_FREE);
/* Mark channel busy + clear error */ /* Mark channel busy + clear error */
iowrite32(0x0, &mem->channel_status); iowrite32(0x0, &mem->channel_status);
iowrite32(t->hdr.poll_completion ? 0 : SCMI_SHMEM_FLAG_INTR_ENABLED, iowrite32(t->hdr.poll_completion ? 0 : SCMI_SHMEM_FLAG_INTR_ENABLED,
...@@ -285,8 +269,9 @@ static void scmi_tx_prepare(struct mbox_client *cl, void *m) ...@@ -285,8 +269,9 @@ static void scmi_tx_prepare(struct mbox_client *cl, void *m)
* scmi_xfer_get() - Allocate one message * scmi_xfer_get() - Allocate one message
* *
* @handle: Pointer to SCMI entity handle * @handle: Pointer to SCMI entity handle
* @minfo: Pointer to Tx/Rx Message management info based on channel type
* *
* Helper function which is used by various command functions that are * Helper function which is used by various message functions that are
* exposed to clients of this driver for allocating a message traffic event. * exposed to clients of this driver for allocating a message traffic event.
* *
* This function can sleep depending on pending requests already in the system * This function can sleep depending on pending requests already in the system
...@@ -295,13 +280,13 @@ static void scmi_tx_prepare(struct mbox_client *cl, void *m) ...@@ -295,13 +280,13 @@ static void scmi_tx_prepare(struct mbox_client *cl, void *m)
* *
* Return: 0 if all went fine, else corresponding error. * Return: 0 if all went fine, else corresponding error.
*/ */
static struct scmi_xfer *scmi_xfer_get(const struct scmi_handle *handle) static struct scmi_xfer *scmi_xfer_get(const struct scmi_handle *handle,
struct scmi_xfers_info *minfo)
{ {
u16 xfer_id; u16 xfer_id;
struct scmi_xfer *xfer; struct scmi_xfer *xfer;
unsigned long flags, bit_pos; unsigned long flags, bit_pos;
struct scmi_info *info = handle_to_scmi_info(handle); struct scmi_info *info = handle_to_scmi_info(handle);
struct scmi_xfers_info *minfo = &info->minfo;
/* Keep the locked section as small as possible */ /* Keep the locked section as small as possible */
spin_lock_irqsave(&minfo->xfer_lock, flags); spin_lock_irqsave(&minfo->xfer_lock, flags);
...@@ -324,18 +309,17 @@ static struct scmi_xfer *scmi_xfer_get(const struct scmi_handle *handle) ...@@ -324,18 +309,17 @@ static struct scmi_xfer *scmi_xfer_get(const struct scmi_handle *handle)
} }
/** /**
* scmi_xfer_put() - Release a message * __scmi_xfer_put() - Release a message
* *
* @handle: Pointer to SCMI entity handle * @minfo: Pointer to Tx/Rx Message management info based on channel type
* @xfer: message that was reserved by scmi_xfer_get * @xfer: message that was reserved by scmi_xfer_get
* *
* This holds a spinlock to maintain integrity of internal data structures. * This holds a spinlock to maintain integrity of internal data structures.
*/ */
void scmi_xfer_put(const struct scmi_handle *handle, struct scmi_xfer *xfer) static void
__scmi_xfer_put(struct scmi_xfers_info *minfo, struct scmi_xfer *xfer)
{ {
unsigned long flags; unsigned long flags;
struct scmi_info *info = handle_to_scmi_info(handle);
struct scmi_xfers_info *minfo = &info->minfo;
/* /*
* Keep the locked section as small as possible * Keep the locked section as small as possible
...@@ -347,6 +331,68 @@ void scmi_xfer_put(const struct scmi_handle *handle, struct scmi_xfer *xfer) ...@@ -347,6 +331,68 @@ void scmi_xfer_put(const struct scmi_handle *handle, struct scmi_xfer *xfer)
spin_unlock_irqrestore(&minfo->xfer_lock, flags); spin_unlock_irqrestore(&minfo->xfer_lock, flags);
} }
/**
* scmi_rx_callback() - mailbox client callback for receive messages
*
* @cl: client pointer
* @m: mailbox message
*
* Processes one received message to appropriate transfer information and
* signals completion of the transfer.
*
* NOTE: This function will be invoked in IRQ context, hence should be
* as optimal as possible.
*/
static void scmi_rx_callback(struct mbox_client *cl, void *m)
{
u8 msg_type;
u32 msg_hdr;
u16 xfer_id;
struct scmi_xfer *xfer;
struct scmi_chan_info *cinfo = client_to_scmi_chan_info(cl);
struct device *dev = cinfo->dev;
struct scmi_info *info = handle_to_scmi_info(cinfo->handle);
struct scmi_xfers_info *minfo = &info->tx_minfo;
struct scmi_shared_mem __iomem *mem = cinfo->payload;
msg_hdr = ioread32(&mem->msg_header);
msg_type = MSG_XTRACT_TYPE(msg_hdr