From cf5c5456363bd342b99ea79998e856b906ebb5ae Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Fri, 2 May 2014 13:38:31 -0700 Subject: arm64: topology: Tell the scheduler about the relative power of cores In heterogeneous systems like big.LITTLE systems the scheduler will be able to make better use of the available cores if we provide power numbers to it indicating their relative performance. Do this by parsing the CPU nodes in the DT. This code currently has no effect as no information on the relative performance of the cores is provided. Signed-off-by: Mark Brown Signed-off-by: Jon Medhurst --- arch/arm64/kernel/topology.c | 153 +++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 153 insertions(+) diff --git a/arch/arm64/kernel/topology.c b/arch/arm64/kernel/topology.c index b6ee26b0939a..7a6eabec3c06 100644 --- a/arch/arm64/kernel/topology.c +++ b/arch/arm64/kernel/topology.c @@ -19,10 +19,34 @@ #include #include #include +#include #include #include +/* + * cpu power table + * This per cpu data structure describes the relative capacity of each core. + * On a heteregenous system, cores don't have the same computation capacity + * and we reflect that difference in the cpu_power field so the scheduler can + * take this difference into account during load balance. A per cpu structure + * is preferred because each CPU updates its own cpu_power field during the + * load balance except for idle cores. One idle core is selected to run the + * rebalance_domains for all idle cores and the cpu_power can be updated + * during this sequence. + */ +static DEFINE_PER_CPU(unsigned long, cpu_scale); + +unsigned long arch_scale_freq_power(struct sched_domain *sd, int cpu) +{ + return per_cpu(cpu_scale, cpu); +} + +static void set_power_scale(unsigned int cpu, unsigned long power) +{ + per_cpu(cpu_scale, cpu) = power; +} + static int __init get_cpu_for_node(struct device_node *node) { struct device_node *cpu_node; @@ -161,6 +185,38 @@ static int __init parse_cluster(struct device_node *cluster, int depth) return 0; } +struct cpu_efficiency { + const char *compatible; + unsigned long efficiency; +}; + +/* + * Table of relative efficiency of each processors + * The efficiency value must fit in 20bit and the final + * cpu_scale value must be in the range + * 0 < cpu_scale < 3*SCHED_POWER_SCALE/2 + * in order to return at most 1 when DIV_ROUND_CLOSEST + * is used to compute the capacity of a CPU. + * Processors that are not defined in the table, + * use the default SCHED_POWER_SCALE value for cpu_scale. + */ +static const struct cpu_efficiency table_efficiency[] = { + { NULL, }, +}; + +static unsigned long *__cpu_capacity; +#define cpu_capacity(cpu) __cpu_capacity[cpu] + +static unsigned long middle_capacity = 1; + +/* + * Iterate all CPUs' descriptor in DT and compute the efficiency + * (as per table_efficiency). Also calculate a middle efficiency + * as close as possible to (max{eff_i} - min{eff_i}) / 2 + * This is later used to scale the cpu_power field such that an + * 'average' CPU is of middle power. Also see the comments near + * table_efficiency[] and update_cpu_power(). + */ static int __init parse_dt_topology(void) { struct device_node *cn, *map; @@ -200,6 +256,91 @@ out: return ret; } +static void __init parse_dt_cpu_power(void) +{ + const struct cpu_efficiency *cpu_eff; + struct device_node *cn; + unsigned long min_capacity = ULONG_MAX; + unsigned long max_capacity = 0; + unsigned long capacity = 0; + int cpu; + + __cpu_capacity = kcalloc(nr_cpu_ids, sizeof(*__cpu_capacity), + GFP_NOWAIT); + + for_each_possible_cpu(cpu) { + const u32 *rate; + int len; + + /* Too early to use cpu->of_node */ + cn = of_get_cpu_node(cpu, NULL); + if (!cn) { + pr_err("Missing device node for CPU %d\n", cpu); + continue; + } + + for (cpu_eff = table_efficiency; cpu_eff->compatible; cpu_eff++) + if (of_device_is_compatible(cn, cpu_eff->compatible)) + break; + + if (cpu_eff->compatible == NULL) { + pr_warn("%s: Unknown CPU type\n", cn->full_name); + continue; + } + + rate = of_get_property(cn, "clock-frequency", &len); + if (!rate || len != 4) { + pr_err("%s: Missing clock-frequency property\n", + cn->full_name); + continue; + } + + capacity = ((be32_to_cpup(rate)) >> 20) * cpu_eff->efficiency; + + /* Save min capacity of the system */ + if (capacity < min_capacity) + min_capacity = capacity; + + /* Save max capacity of the system */ + if (capacity > max_capacity) + max_capacity = capacity; + + cpu_capacity(cpu) = capacity; + } + + /* If min and max capacities are equal we bypass the update of the + * cpu_scale because all CPUs have the same capacity. Otherwise, we + * compute a middle_capacity factor that will ensure that the capacity + * of an 'average' CPU of the system will be as close as possible to + * SCHED_POWER_SCALE, which is the default value, but with the + * constraint explained near table_efficiency[]. + */ + if (min_capacity == max_capacity) + return; + else if (4 * max_capacity < (3 * (max_capacity + min_capacity))) + middle_capacity = (min_capacity + max_capacity) + >> (SCHED_POWER_SHIFT+1); + else + middle_capacity = ((max_capacity / 3) + >> (SCHED_POWER_SHIFT-1)) + 1; +} + +/* + * Look for a customed capacity of a CPU in the cpu_topo_data table during the + * boot. The update of all CPUs is in O(n^2) for heteregeneous system but the + * function returns directly for SMP system. + */ +static void update_cpu_power(unsigned int cpu) +{ + if (!cpu_capacity(cpu)) + return; + + set_power_scale(cpu, cpu_capacity(cpu) / middle_capacity); + + pr_info("CPU%u: update cpu_power %lu\n", + cpu, arch_scale_freq_power(NULL, cpu)); +} + /* * cpu topology table */ @@ -269,6 +410,7 @@ void store_cpu_topology(unsigned int cpuid) topology_populated: update_siblings_masks(cpuid); + update_cpu_power(cpuid); } static void __init reset_cpu_topology(void) @@ -289,6 +431,14 @@ static void __init reset_cpu_topology(void) } } +static void __init reset_cpu_power(void) +{ + unsigned int cpu; + + for_each_possible_cpu(cpu) + set_power_scale(cpu, SCHED_POWER_SCALE); +} + void __init init_cpu_topology(void) { reset_cpu_topology(); @@ -299,4 +449,7 @@ void __init init_cpu_topology(void) */ if (parse_dt_topology()) reset_cpu_topology(); + + reset_cpu_power(); + parse_dt_cpu_power(); } -- cgit v1.2.3 From 8af7eb2659d217be0746e701007c30a5db31d235 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Fri, 2 May 2014 13:38:32 -0700 Subject: arm64: topology: Provide relative power numbers for cores Provide performance numbers to the scheduler to help it fill the cores in the system on big.LITTLE systems. With the current scheduler this may perform poorly for applications that try to do OpenMP style work over all cores but should help for more common workloads. The current 32 bit ARM implementation provides a similar estimate so this helps ensure that work to improve big.LITTLE systems on ARMv7 systems performs similarly on ARMv8 systems. The power numbers are the same as for ARMv7 since it seems that the expected differential between the big and little cores is very similar on both ARMv7 and ARMv8. In both ARMv7 and ARMv8 cases the numbers were based on the published DMIPS numbers. These numbers are just an initial and basic approximation for use with the current scheduler, it is likely that both experience with silicon and ongoing work on improving the scheduler will lead to further tuning or will tune automatically at runtime and so make the specific choice of numbers here less critical. Signed-off-by: Mark Brown Signed-off-by: Jon Medhurst --- arch/arm64/kernel/topology.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/arch/arm64/kernel/topology.c b/arch/arm64/kernel/topology.c index 7a6eabec3c06..7924ecb6faf7 100644 --- a/arch/arm64/kernel/topology.c +++ b/arch/arm64/kernel/topology.c @@ -201,6 +201,8 @@ struct cpu_efficiency { * use the default SCHED_POWER_SCALE value for cpu_scale. */ static const struct cpu_efficiency table_efficiency[] = { + { "arm,cortex-a57", 3891 }, + { "arm,cortex-a53", 2048 }, { NULL, }, }; -- cgit v1.2.3 From d9b20074c6b8c4ff46834f06aae0ef6a3d3fd950 Mon Sep 17 00:00:00 2001 From: Suman Anna Date: Thu, 12 Jun 2014 22:30:34 +0530 Subject: mailbox: rename pl320-ipc specific mailbox.h The patch 30058677 "ARM / highbank: add support for pl320 IPC" added a pl320 IPC specific header file as a generic mailbox.h. This file has been renamed appropriately to allow the introduction of the generic mailbox API framework. Acked-by: Mark Langsdorf Cc: Rafael J. Wysocki Signed-off-by: Suman Anna Signed-off-by: Jon Medhurst --- arch/arm/mach-highbank/highbank.c | 2 +- drivers/cpufreq/highbank-cpufreq.c | 2 +- drivers/mailbox/pl320-ipc.c | 2 +- include/linux/mailbox.h | 17 ----------------- include/linux/pl320-ipc.h | 17 +++++++++++++++++ 5 files changed, 20 insertions(+), 20 deletions(-) delete mode 100644 include/linux/mailbox.h create mode 100644 include/linux/pl320-ipc.h diff --git a/arch/arm/mach-highbank/highbank.c b/arch/arm/mach-highbank/highbank.c index 8c35ae4ff176..07a09570175d 100644 --- a/arch/arm/mach-highbank/highbank.c +++ b/arch/arm/mach-highbank/highbank.c @@ -20,7 +20,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/drivers/cpufreq/highbank-cpufreq.c b/drivers/cpufreq/highbank-cpufreq.c index bf8902a0866d..b464f29d8d54 100644 --- a/drivers/cpufreq/highbank-cpufreq.c +++ b/drivers/cpufreq/highbank-cpufreq.c @@ -19,7 +19,7 @@ #include #include #include -#include +#include #include #define HB_CPUFREQ_CHANGE_NOTE 0x80000001 diff --git a/drivers/mailbox/pl320-ipc.c b/drivers/mailbox/pl320-ipc.c index d873cbae2fbb..f3755e0aa935 100644 --- a/drivers/mailbox/pl320-ipc.c +++ b/drivers/mailbox/pl320-ipc.c @@ -26,7 +26,7 @@ #include #include -#include +#include #define IPCMxSOURCE(m) ((m) * 0x40) #define IPCMxDSET(m) (((m) * 0x40) + 0x004) diff --git a/include/linux/mailbox.h b/include/linux/mailbox.h deleted file mode 100644 index 5161f63ec1c8..000000000000 --- a/include/linux/mailbox.h +++ /dev/null @@ -1,17 +0,0 @@ -/* - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program. If not, see . - */ - -int pl320_ipc_transmit(u32 *data); -int pl320_ipc_register_notifier(struct notifier_block *nb); -int pl320_ipc_unregister_notifier(struct notifier_block *nb); diff --git a/include/linux/pl320-ipc.h b/include/linux/pl320-ipc.h new file mode 100644 index 000000000000..5161f63ec1c8 --- /dev/null +++ b/include/linux/pl320-ipc.h @@ -0,0 +1,17 @@ +/* + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + */ + +int pl320_ipc_transmit(u32 *data); +int pl320_ipc_register_notifier(struct notifier_block *nb); +int pl320_ipc_unregister_notifier(struct notifier_block *nb); -- cgit v1.2.3 From 47a565a6d351095432a87fe33cba4244afbd1acb Mon Sep 17 00:00:00 2001 From: Jassi Brar Date: Thu, 12 Jun 2014 22:31:19 +0530 Subject: mailbox: Introduce framework for mailbox Introduce common framework for client/protocol drivers and controller drivers of Inter-Processor-Communication (IPC). Client driver developers should have a look at include/linux/mailbox_client.h to understand the part of the API exposed to client drivers. Similarly controller driver developers should have a look at include/linux/mailbox_controller.h Signed-off-by: Jassi Brar Reviewed-by: Mark Brown Signed-off-by: Mark Brown Signed-off-by: Jon Medhurst --- MAINTAINERS | 8 + drivers/mailbox/Makefile | 4 + drivers/mailbox/mailbox.c | 467 +++++++++++++++++++++++++++++++++++++ include/linux/mailbox_client.h | 46 ++++ include/linux/mailbox_controller.h | 135 +++++++++++ 5 files changed, 660 insertions(+) create mode 100644 drivers/mailbox/mailbox.c create mode 100644 include/linux/mailbox_client.h create mode 100644 include/linux/mailbox_controller.h diff --git a/MAINTAINERS b/MAINTAINERS index 5e7866a486b0..e6d35f41ec52 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -5742,6 +5742,14 @@ S: Maintained F: drivers/net/macvlan.c F: include/linux/if_macvlan.h +MAILBOX API +M: Jassi Brar +L: linux-kernel@vger.kernel.org +S: Maintained +F: drivers/mailbox/ +F: include/linux/mailbox_client.h +F: include/linux/mailbox_controller.h + MAN-PAGES: MANUAL PAGES FOR LINUX -- Sections 2, 3, 4, 5, and 7 M: Michael Kerrisk W: http://www.kernel.org/doc/man-pages diff --git a/drivers/mailbox/Makefile b/drivers/mailbox/Makefile index 6d184dbcaca8..94ed7cefb14d 100644 --- a/drivers/mailbox/Makefile +++ b/drivers/mailbox/Makefile @@ -1,3 +1,7 @@ +# Generic MAILBOX API + +obj-$(CONFIG_MAILBOX) += mailbox.o + obj-$(CONFIG_PL320_MBOX) += pl320-ipc.o obj-$(CONFIG_OMAP2PLUS_MBOX) += omap-mailbox.o diff --git a/drivers/mailbox/mailbox.c b/drivers/mailbox/mailbox.c new file mode 100644 index 000000000000..df09d3bf55e4 --- /dev/null +++ b/drivers/mailbox/mailbox.c @@ -0,0 +1,467 @@ +/* + * Mailbox: Common code for Mailbox controllers and users + * + * Copyright (C) 2013-2014 Linaro Ltd. + * Author: Jassi Brar + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define TXDONE_BY_IRQ BIT(0) /* controller has remote RTR irq */ +#define TXDONE_BY_POLL BIT(1) /* controller can read status of last TX */ +#define TXDONE_BY_ACK BIT(2) /* S/W ACK recevied by Client ticks the TX */ + +static LIST_HEAD(mbox_cons); +static DEFINE_MUTEX(con_mutex); + +static int add_to_rbuf(struct mbox_chan *chan, void *mssg) +{ + int idx; + unsigned long flags; + + spin_lock_irqsave(&chan->lock, flags); + + /* See if there is any space left */ + if (chan->msg_count == MBOX_TX_QUEUE_LEN) { + spin_unlock_irqrestore(&chan->lock, flags); + return -ENOBUFS; + } + + idx = chan->msg_free; + chan->msg_data[idx] = mssg; + chan->msg_count++; + + if (idx == MBOX_TX_QUEUE_LEN - 1) + chan->msg_free = 0; + else + chan->msg_free++; + + spin_unlock_irqrestore(&chan->lock, flags); + + return idx; +} + +static void msg_submit(struct mbox_chan *chan) +{ + unsigned count, idx; + unsigned long flags; + void *data; + int err; + + spin_lock_irqsave(&chan->lock, flags); + + if (!chan->msg_count || chan->active_req) + goto exit; + + count = chan->msg_count; + idx = chan->msg_free; + if (idx >= count) + idx -= count; + else + idx += MBOX_TX_QUEUE_LEN - count; + + data = chan->msg_data[idx]; + + /* Try to submit a message to the MBOX controller */ + err = chan->mbox->ops->send_data(chan, data); + if (!err) { + chan->active_req = data; + chan->msg_count--; + } +exit: + spin_unlock_irqrestore(&chan->lock, flags); +} + +static void tx_tick(struct mbox_chan *chan, int r) +{ + unsigned long flags; + void *mssg; + + spin_lock_irqsave(&chan->lock, flags); + mssg = chan->active_req; + chan->active_req = NULL; + spin_unlock_irqrestore(&chan->lock, flags); + + /* Submit next message */ + msg_submit(chan); + + /* Notify the client */ + if (mssg && chan->cl->tx_done) + chan->cl->tx_done(chan->cl, mssg, r); + + if (chan->cl->tx_block) + complete(&chan->tx_complete); +} + +static void poll_txdone(unsigned long data) +{ + struct mbox_controller *mbox = (struct mbox_controller *)data; + bool txdone, resched = false; + int i; + + for (i = 0; i < mbox->num_chans; i++) { + struct mbox_chan *chan = &mbox->chans[i]; + + if (chan->active_req && chan->cl) { + resched = true; + txdone = chan->mbox->ops->last_tx_done(chan); + if (txdone) + tx_tick(chan, 0); + } + } + + if (resched) + mod_timer(&mbox->poll, jiffies + + msecs_to_jiffies(mbox->period)); +} + +/** + * mbox_chan_received_data - A way for controller driver to push data + * received from remote to the upper layer. + * @chan: Pointer to the mailbox channel on which RX happened. + * @mssg: Client specific message typecasted as void * + * + * After startup and before shutdown any data received on the chan + * is passed on to the API via atomic mbox_chan_received_data(). + * The controller should ACK the RX only after this call returns. + */ +void mbox_chan_received_data(struct mbox_chan *chan, void *mssg) +{ + /* No buffering the received data */ + if (chan->cl->rx_callback) + chan->cl->rx_callback(chan->cl, mssg); +} +EXPORT_SYMBOL_GPL(mbox_chan_received_data); + +/** + * mbox_chan_txdone - A way for controller driver to notify the + * framework that the last TX has completed. + * @chan: Pointer to the mailbox chan on which TX happened. + * @r: Status of last TX - OK or ERROR + * + * The controller that has IRQ for TX ACK calls this atomic API + * to tick the TX state machine. It works only if txdone_irq + * is set by the controller. + */ +void mbox_chan_txdone(struct mbox_chan *chan, int r) +{ + if (unlikely(!(chan->txdone_method & TXDONE_BY_IRQ))) { + dev_err(chan->mbox->dev, + "Controller can't run the TX ticker\n"); + return; + } + + tx_tick(chan, r); +} +EXPORT_SYMBOL_GPL(mbox_chan_txdone); + +/** + * mbox_client_txdone - The way for a client to run the TX state machine. + * @chan: Mailbox channel assigned to this client. + * @r: Success status of last transmission. + * + * The client/protocol had received some 'ACK' packet and it notifies + * the API that the last packet was sent successfully. This only works + * if the controller can't sense TX-Done. + */ +void mbox_client_txdone(struct mbox_chan *chan, int r) +{ + if (unlikely(!(chan->txdone_method & TXDONE_BY_ACK))) { + dev_err(chan->mbox->dev, "Client can't run the TX ticker\n"); + return; + } + + tx_tick(chan, r); +} +EXPORT_SYMBOL_GPL(mbox_client_txdone); + +/** + * mbox_client_peek_data - A way for client driver to pull data + * received from remote by the controller. + * @chan: Mailbox channel assigned to this client. + * + * A poke to controller driver for any received data. + * The data is actually passed onto client via the + * mbox_chan_received_data() + * The call can be made from atomic context, so the controller's + * implementation of peek_data() must not sleep. + * + * Return: True, if controller has, and is going to push after this, + * some data. + * False, if controller doesn't have any data to be read. + */ +bool mbox_client_peek_data(struct mbox_chan *chan) +{ + if (chan->mbox->ops->peek_data) + return chan->mbox->ops->peek_data(chan); + + return false; +} +EXPORT_SYMBOL_GPL(mbox_client_peek_data); + +/** + * mbox_send_message - For client to submit a message to be + * sent to the remote. + * @chan: Mailbox channel assigned to this client. + * @mssg: Client specific message typecasted. + * + * For client to submit data to the controller destined for a remote + * processor. If the client had set 'tx_block', the call will return + * either when the remote receives the data or when 'tx_tout' millisecs + * run out. + * In non-blocking mode, the requests are buffered by the API and a + * non-negative token is returned for each queued request. If the request + * is not queued, a negative token is returned. Upon failure or successful + * TX, the API calls 'tx_done' from atomic context, from which the client + * could submit yet another request. + * The pointer to message should be preserved until it is sent + * over the chan, i.e, tx_done() is made. + * This function could be called from atomic context as it simply + * queues the data and returns a token against the request. + * + * Return: Non-negative integer for successful submission (non-blocking mode) + * or transmission over chan (blocking mode). + * Negative value denotes failure. + */ +int mbox_send_message(struct mbox_chan *chan, void *mssg) +{ + int t; + + if (!chan || !chan->cl) + return -EINVAL; + + t = add_to_rbuf(chan, mssg); + if (t < 0) { + dev_err(chan->mbox->dev, "Try increasing MBOX_TX_QUEUE_LEN\n"); + return t; + } + + msg_submit(chan); + + init_completion(&chan->tx_complete); + + if (chan->txdone_method == TXDONE_BY_POLL) + poll_txdone((unsigned long)chan->mbox); + + if (chan->cl->tx_block && chan->active_req) { + unsigned long wait; + int ret; + + if (!chan->cl->tx_tout) /* wait forever */ + wait = msecs_to_jiffies(3600000); + else + wait = msecs_to_jiffies(chan->cl->tx_tout); + + ret = wait_for_completion_timeout(&chan->tx_complete, wait); + if (ret == 0) { + t = -EIO; + tx_tick(chan, -EIO); + } + } + + return t; +} +EXPORT_SYMBOL_GPL(mbox_send_message); + +/** + * mbox_request_channel - Request a mailbox channel. + * @cl: Identity of the client requesting the channel. + * @index: Index of mailbox specifier in 'mboxes' property. + * + * The Client specifies its requirements and capabilities while asking for + * a mailbox channel. It can't be called from atomic context. + * The channel is exclusively allocated and can't be used by another + * client before the owner calls mbox_free_channel. + * After assignment, any packet received on this channel will be + * handed over to the client via the 'rx_callback'. + * The framework holds reference to the client, so the mbox_client + * structure shouldn't be modified until the mbox_free_channel returns. + * + * Return: Pointer to the channel assigned to the client if successful. + * ERR_PTR for request failure. + */ +struct mbox_chan *mbox_request_channel(struct mbox_client *cl, int index) +{ + struct device *dev = cl->dev; + struct mbox_controller *mbox; + struct of_phandle_args spec; + struct mbox_chan *chan; + unsigned long flags; + int ret; + + if (!dev || !dev->of_node) { + pr_debug("%s: No owner device node\n", __func__); + return ERR_PTR(-ENODEV); + } + + mutex_lock(&con_mutex); + + if (of_parse_phandle_with_args(dev->of_node, "mboxes", + "#mbox-cells", index, &spec)) { + dev_dbg(dev, "%s: can't parse \"mboxes\" property\n", __func__); + mutex_unlock(&con_mutex); + return ERR_PTR(-ENODEV); + } + + chan = NULL; + list_for_each_entry(mbox, &mbox_cons, node) + if (mbox->dev->of_node == spec.np) { + chan = mbox->of_xlate(mbox, &spec); + break; + } + + of_node_put(spec.np); + + if (!chan || chan->cl || !try_module_get(mbox->dev->driver->owner)) { + dev_dbg(dev, "%s: mailbox not free\n", __func__); + mutex_unlock(&con_mutex); + return ERR_PTR(-EBUSY); + } + + spin_lock_irqsave(&chan->lock, flags); + chan->msg_free = 0; + chan->msg_count = 0; + chan->active_req = NULL; + chan->cl = cl; + init_completion(&chan->tx_complete); + + if (chan->txdone_method == TXDONE_BY_POLL && cl->knows_txdone) + chan->txdone_method |= TXDONE_BY_ACK; + + spin_unlock_irqrestore(&chan->lock, flags); + + ret = chan->mbox->ops->startup(chan); + if (ret) { + dev_err(dev, "Unable to startup the chan (%d)\n", ret); + mbox_free_channel(chan); + chan = ERR_PTR(ret); + } + + mutex_unlock(&con_mutex); + return chan; +} +EXPORT_SYMBOL_GPL(mbox_request_channel); + +/** + * mbox_free_channel - The client relinquishes control of a mailbox + * channel by this call. + * @chan: The mailbox channel to be freed. + */ +void mbox_free_channel(struct mbox_chan *chan) +{ + unsigned long flags; + + if (!chan || !chan->cl) + return; + + chan->mbox->ops->shutdown(chan); + + /* The queued TX requests are simply aborted, no callbacks are made */ + spin_lock_irqsave(&chan->lock, flags); + chan->cl = NULL; + chan->active_req = NULL; + if (chan->txdone_method == (TXDONE_BY_POLL | TXDONE_BY_ACK)) + chan->txdone_method = TXDONE_BY_POLL; + + module_put(chan->mbox->dev->driver->owner); + spin_unlock_irqrestore(&chan->lock, flags); +} +EXPORT_SYMBOL_GPL(mbox_free_channel); + +static struct mbox_chan * +of_mbox_index_xlate(struct mbox_controller *mbox, + const struct of_phandle_args *sp) +{ + int ind = sp->args[0]; + + if (ind >= mbox->num_chans) + return NULL; + + return &mbox->chans[ind]; +} + +/** + * mbox_controller_register - Register the mailbox controller + * @mbox: Pointer to the mailbox controller. + * + * The controller driver registers its communication channels + */ +int mbox_controller_register(struct mbox_controller *mbox) +{ + int i, txdone; + + /* Sanity check */ + if (!mbox || !mbox->dev || !mbox->ops || !mbox->num_chans) + return -EINVAL; + + if (mbox->txdone_irq) + txdone = TXDONE_BY_IRQ; + else if (mbox->txdone_poll) + txdone = TXDONE_BY_POLL; + else /* It has to be ACK then */ + txdone = TXDONE_BY_ACK; + + if (txdone == TXDONE_BY_POLL) { + mbox->poll.function = &poll_txdone; + mbox->poll.data = (unsigned long)mbox; + init_timer(&mbox->poll); + } + + for (i = 0; i < mbox->num_chans; i++) { + struct mbox_chan *chan = &mbox->chans[i]; + + chan->cl = NULL; + chan->mbox = mbox; + chan->txdone_method = txdone; + spin_lock_init(&chan->lock); + } + + if (!mbox->of_xlate) + mbox->of_xlate = of_mbox_index_xlate; + + mutex_lock(&con_mutex); + list_add_tail(&mbox->node, &mbox_cons); + mutex_unlock(&con_mutex); + + return 0; +} +EXPORT_SYMBOL_GPL(mbox_controller_register); + +/** + * mbox_controller_unregister - Unregister the mailbox controller + * @mbox: Pointer to the mailbox controller. + */ +void mbox_controller_unregister(struct mbox_controller *mbox) +{ + int i; + + if (!mbox) + return; + + mutex_lock(&con_mutex); + + list_del(&mbox->node); + + for (i = 0; i < mbox->num_chans; i++) + mbox_free_channel(&mbox->chans[i]); + + if (mbox->txdone_poll) + del_timer_sync(&mbox->poll); + + mutex_unlock(&con_mutex); +} +EXPORT_SYMBOL_GPL(mbox_controller_unregister); diff --git a/include/linux/mailbox_client.h b/include/linux/mailbox_client.h new file mode 100644 index 000000000000..307d9cab2026 --- /dev/null +++ b/include/linux/mailbox_client.h @@ -0,0 +1,46 @@ +/* + * Copyright (C) 2013-2014 Linaro Ltd. + * Author: Jassi Brar + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef __MAILBOX_CLIENT_H +#define __MAILBOX_CLIENT_H + +#include +#include + +struct mbox_chan; + +/** + * struct mbox_client - User of a mailbox + * @dev: The client device + * @tx_block: If the mbox_send_message should block until data is + * transmitted. + * @tx_tout: Max block period in ms before TX is assumed failure + * @knows_txdone: If the client could run the TX state machine. Usually + * if the client receives some ACK packet for transmission. + * Unused if the controller already has TX_Done/RTR IRQ. + * @rx_callback: Atomic callback to provide client the data received + * @tx_done: Atomic callback to tell client of data transmission + */ +struct mbox_client { + struct device *dev; + bool tx_block; + unsigned long tx_tout; + bool knows_txdone; + + void (*rx_callback)(struct mbox_client *cl, void *mssg); + void (*tx_done)(struct mbox_client *cl, void *mssg, int r); +}; + +struct mbox_chan *mbox_request_channel(struct mbox_client *cl, int index); +int mbox_send_message(struct mbox_chan *chan, void *mssg); +void mbox_client_txdone(struct mbox_chan *chan, int r); /* atomic */ +bool mbox_client_peek_data(struct mbox_chan *chan); /* atomic */ +void mbox_free_channel(struct mbox_chan *chan); /* may sleep */ + +#endif /* __MAILBOX_CLIENT_H */ diff --git a/include/linux/mailbox_controller.h b/include/linux/mailbox_controller.h new file mode 100644 index 000000000000..9ee195b02444 --- /dev/null +++ b/include/linux/mailbox_controller.h @@ -0,0 +1,135 @@ +/* + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef __MAILBOX_CONTROLLER_H +#define __MAILBOX_CONTROLLER_H + +#include +#include +#include +#include +#include + +struct mbox_chan; + +/** + * struct mbox_chan_ops - methods to control mailbox channels + * @send_data: The API asks the MBOX controller driver, in atomic + * context try to transmit a message on the bus. Returns 0 if + * data is accepted for transmission, -EBUSY while rejecting + * if the remote hasn't yet read the last data sent. Actual + * transmission of data is reported by the controller via + * mbox_chan_txdone (if it has some TX ACK irq). It must not + * sleep. + * @startup: Called when a client requests the chan. The controller + * could ask clients for additional parameters of communication + * to be provided via client's chan_data. This call may + * block. After this call the Controller must forward any + * data received on the chan by calling mbox_chan_received_data. + * The controller may do stuff that need to sleep. + * @shutdown: Called when a client relinquishes control of a chan. + * This call may block too. The controller must not forward + * any received data anymore. + * The controller may do stuff that need to sleep. + * @last_tx_done: If the controller sets 'txdone_poll', the API calls + * this to poll status of last TX. The controller must + * give priority to IRQ method over polling and never + * set both txdone_poll and txdone_irq. Only in polling + * mode 'send_data' is expected to return -EBUSY. + * The controller may do stuff that need to sleep/block. + * Used only if txdone_poll:=true && txdone_irq:=false + * @peek_data: Atomic check for any received data. Return true if controller + * has some data to push to the client. False otherwise. + */ +struct mbox_chan_ops { + int (*send_data)(struct mbox_chan *chan, void *data); + int (*startup)(struct mbox_chan *chan); + void (*shutdown)(struct mbox_chan *chan); + bool (*last_tx_done)(struct mbox_chan *chan); + bool (*peek_data)(struct mbox_chan *chan); +}; + +/** + * struct mbox_controller - Controller of a class of communication channels + * @dev: Device backing this controller + * @ops: Operators that work on each communication chan + * @chans: Array of channels + * @num_chans: Number of channels in the 'chans' array. + * @txdone_irq: Indicates if the controller can report to API when + * the last transmitted data was read by the remote. + * Eg, if it has some TX ACK irq. + * @txdone_poll: If the controller can read but not report the TX + * done. Ex, some register shows the TX status but + * no interrupt rises. Ignored if 'txdone_irq' is set. + * @txpoll_period: If 'txdone_poll' is in effect, the API polls for + * last TX's status after these many millisecs + * @of_xlate: Controller driver specific mapping of channel via DT + * @poll: API private. Used to poll for TXDONE on all channels. + * @period: API private. Polling period. + * @node: API private. To hook into list of controllers. + */ +struct mbox_controller { + struct device *dev; + struct mbox_chan_ops *ops; + struct mbox_chan *chans; + int num_chans; + bool txdone_irq; + bool txdone_poll; + unsigned txpoll_period; + struct mbox_chan *(*of_xlate)(struct mbox_controller *mbox, + const struct of_phandle_args *sp); + /* Internal to API */ + struct timer_list poll; + unsigned period; + struct list_head node; +}; + +/* + * The length of circular buffer for queuing messages from a client. + * 'msg_count' tracks the number of buffered messages while 'msg_free' + * is the index where the next message would be buffered. + * We shouldn't need it too big because every transfer is interrupt + * triggered and if we have lots of data to transfer, the interrupt + * latencies are going to be the bottleneck, not the buffer length. + * Besides, mbox_send_message could be called from atomic context and + * the client could also queue another message from the notifier 'tx_done' + * of the last transfer done. + * REVISIT: If too many platforms see the "Try increasing MBOX_TX_QUEUE_LEN" + * print, it needs to be taken from config option or somesuch. + */ +#define MBOX_TX_QUEUE_LEN 20 + +/** + * struct mbox_chan - s/w representation of a communication chan + * @mbox: Pointer to the parent/provider of this channel + * @txdone_method: Way to detect TXDone chosen by the API + * @cl: Pointer to the current owner of this channel + * @tx_complete: Transmission completion + * @active_req: Currently active request hook + * @msg_count: No. of mssg currently queued + * @msg_free: Index of next available mssg slot + * @msg_data: Hook for data packet + * @lock: Serialise access to the channel + * @con_priv: Hook for controller driver to attach private data + */ +struct mbox_chan { + struct mbox_controller *mbox; + unsigned txdone_method; + struct mbox_client *cl; + struct completion tx_complete; + void *active_req; + unsigned msg_count, msg_free; + void *msg_data[MBOX_TX_QUEUE_LEN]; + spinlock_t lock; /* Serialise access to the channel */ + void *con_priv; +}; + +int mbox_controller_register(struct mbox_controller *mbox); /* can sleep */ +void mbox_controller_unregister(struct mbox_controller *mbox); /* can sleep */ +void mbox_chan_received_data(struct mbox_chan *chan, void *data); /* atomic */ +void mbox_chan_txdone(struct mbox_chan *chan, int r); /* atomic */ + +#endif /* __MAILBOX_CONTROLLER_H */ -- cgit v1.2.3 From 207bef925b26c09cfc6236c8631316b62a74516d Mon Sep 17 00:00:00 2001 From: Jassi Brar Date: Tue, 22 Jul 2014 20:05:58 +0530 Subject: doc: add documentation for mailbox framework Some explanations with examples of how to write to implement users and providers of the mailbox framework. Signed-off-by: Jassi Brar Signed-off-by: Mark Brown Signed-off-by: Jon Medhurst --- Documentation/mailbox.txt | 122 ++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 122 insertions(+) create mode 100644 Documentation/mailbox.txt diff --git a/Documentation/mailbox.txt b/Documentation/mailbox.txt new file mode 100644 index 000000000000..60f43ff629aa --- /dev/null +++ b/Documentation/mailbox.txt @@ -0,0 +1,122 @@ + The Common Mailbox Framework + Jassi Brar + + This document aims to help developers write client and controller +drivers for the API. But before we start, let us note that the +client (especially) and controller drivers are likely going to be +very platform specific because the remote firmware is likely to be +proprietary and implement non-standard protocol. So even if two +platforms employ, say, PL320 controller, the client drivers can't +be shared across them. Even the PL320 driver might need to accommodate +some platform specific quirks. So the API is meant mainly to avoid +similar copies of code written for each platform. Having said that, +nothing prevents the remote f/w to also be Linux based and use the +same api there. However none of that helps us locally because we only +ever deal at client's protocol level. + Some of the choices made during implementation are the result of this +peculiarity of this "common" framework. + + + + Part 1 - Controller Driver (See include/linux/mailbox_controller.h) + + Allocate mbox_controller and the array of mbox_chan. +Populate mbox_chan_ops, except peek_data() all are mandatory. +The controller driver might know a message has been consumed +by the remote by getting an IRQ or polling some hardware flag +or it can never know (the client knows by way of the protocol). +The method in order of preference is IRQ -> Poll -> None, which +the controller driver should set via 'txdone_irq' or 'txdone_poll' +or neither. + + + Part 2 - Client Driver (See include/linux/mailbox_client.h) + + The client might want to operate in blocking mode (synchronously +send a message through before returning) or non-blocking/async mode (submit +a message and a callback function to the API and return immediately). + + +struct demo_client { + struct mbox_client cl; + struct mbox_chan *mbox; + struct completion c; + bool async; + /* ... */ +}; + +/* + * This is the handler for data received from remote. The behaviour is purely + * dependent upon the protocol. This is just an example. + */ +static void message_from_remote(struct mbox_client *cl, void *mssg) +{ + struct demo_client *dc = container_of(mbox_client, + struct demo_client, cl); + if (dc->aysnc) { + if (is_an_ack(mssg)) { + /* An ACK to our last sample sent */ + return; /* Or do something else here */ + } else { /* A new message from remote */ + queue_req(mssg); + } + } else { + /* Remote f/w sends only ACK packets on this channel */ + return; + } +} + +static void sample_sent(struct mbox_client *cl, void *mssg, int r) +{ + struct demo_client *dc = container_of(mbox_client, + struct demo_client, cl); + complete(&dc->c); +} + +static void client_demo(struct platform_device *pdev) +{ + struct demo_client *dc_sync, *dc_async; + /* The controller already knows async_pkt and sync_pkt */ + struct async_pkt ap; + struct sync_pkt sp; + + dc_sync = kzalloc(sizeof(*dc_sync), GFP_KERNEL); + dc_async = kzalloc(sizeof(*dc_async), GFP_KERNEL); + + /* Populate non-blocking mode client */ + dc_async->cl.dev = &pdev->dev; + dc_async->cl.rx_callback = message_from_remote; + dc_async->cl.tx_done = sample_sent; + dc_async->cl.tx_block = false; + dc_async->cl.tx_tout = 0; /* doesn't matter here */ + dc_async->cl.knows_txdone = false; /* depending upon protocol */ + dc_async->async = true; + init_completion(&dc_async->c); + + /* Populate blocking mode client */ + dc_sync->cl.dev = &pdev->dev; + dc_sync->cl.rx_callback = message_from_remote; + dc_sync->cl.tx_done = NULL; /* operate in blocking mode */ + dc_sync->cl.tx_block = true; + dc_sync->cl.tx_tout = 500; /* by half a second */ + dc_sync->cl.knows_txdone = false; /* depending upon protocol */ + dc_sync->async = false; + + /* ASync mailbox is listed second in 'mboxes' property */ + dc_async->mbox = mbox_request_channel(&dc_async->cl, 1); + /* Populate data packet */ + /* ap.xxx = 123; etc */ + /* Send async message to remote */ + mbox_send_message(dc_async->mbox, &ap); + + /* Sync mailbox is listed first in 'mboxes' property */ + dc_sync->mbox = mbox_request_channel(&dc_sync->cl, 0); + /* Populate data packet */ + /* sp.abc = 123; etc */ + /* Send message to remote in blocking mode */ + mbox_send_message(dc_sync->mbox, &sp); + /* At this point 'sp' has been sent */ + + /* Now wait for async chan to be done */ + wait_for_completion(&dc_async->c); +} -- cgit v1.2.3 From f82ed6085e9b3541d54132712981468256d94b4f Mon Sep 17 00:00:00 2001 From: Jassi Brar Date: Tue, 22 Jul 2014 20:40:04 +0530 Subject: dt: mailbox: add generic bindings Define generic bindings for the framework clients to request mailbox channels. Signed-off-by: Jassi Brar Reviewed-by: Mark Brown Signed-off-by: Mark Brown Signed-off-by: Jon Medhurst --- .../devicetree/bindings/mailbox/mailbox.txt | 38 ++++++++++++++++++++++ 1 file changed, 38 insertions(+) create mode 100644 Documentation/devicetree/bindings/mailbox/mailbox.txt diff --git a/Documentation/devicetree/bindings/mailbox/mailbox.txt b/Documentation/devicetree/bindings/mailbox/mailbox.txt new file mode 100644 index 000000000000..1a2cd3d266db --- /dev/null +++ b/Documentation/devicetree/bindings/mailbox/mailbox.txt @@ -0,0 +1,38 @@ +* Generic Mailbox Controller and client driver bindings + +Generic binding to provide a way for Mailbox controller drivers to +assign appropriate mailbox channel to client drivers. + +* Mailbox Controller + +Required property: +- #mbox-cells: Must be at least 1. Number of cells in a mailbox + specifier. + +Example: + mailbox: mailbox { + ... + #mbox-cells = <1>; + }; + + +* Mailbox Client + +Required property: +- mboxes: List of phandle and mailbox channel specifiers. + +Optional property: +- mbox-names: List of identifier strings for each mailbox channel + required by the client. The use of this property + is discouraged in favor of using index in list of + 'mboxes' while requesting a mailbox. Instead the + platforms may define channel indices, in DT headers, + to something legible. + +Example: + pwr_cntrl: power { + ... + mbox-names = "pwr-ctrl", "rpc"; + mboxes = <&mailbox 0 + &mailbox 1>; + }; -- cgit v1.2.3 From d773eebb6e74cdb7a52755a3ec83f1a5ed4aa939 Mon Sep 17 00:00:00 2001 From: Sudeep Holla Date: Tue, 15 Apr 2014 15:14:27 +0100 Subject: mailbox: add support for ARM Message Handling Unit(MHU) controller This patch adds support for ARM Message Handling Unit(MHU) controller that provides control logic and interrupt generation to support inter-processor communication between the Application Processor and the System Control Processor(SCP). This support is built on the existing common mailbox framework for client/protocol drivers and controller drivers of Inter Processor Communication(IPC). SCP controls most of the power managament on the Application Processors. Signed-off-by: Sudeep Holla Signed-off-by: Jon Medhurst --- drivers/mailbox/Kconfig | 14 ++ drivers/mailbox/Makefile | 2 + drivers/mailbox/arm_mhu.c | 336 ++++++++++++++++++++++++++++++++++++++++++++++ drivers/mailbox/arm_mhu.h | 31 +++++ 4 files changed, 383 insertions(+) create mode 100644 drivers/mailbox/arm_mhu.c create mode 100644 drivers/mailbox/arm_mhu.h diff --git a/drivers/mailbox/Kconfig b/drivers/mailbox/Kconfig index 9fd9c6717e0c..cbe038319816 100644 --- a/drivers/mailbox/Kconfig +++ b/drivers/mailbox/Kconfig @@ -6,6 +6,20 @@ menuconfig MAILBOX signals. Say Y if your platform supports hardware mailboxes. if MAILBOX +config ARM_MHU_MBOX + bool "ARM Message Handling Unit (MHU) Mailbox" + help + This driver provides support for inter-processor communication + between System Control Processor (SCP) with Cortex-M3 processor + and Application Processors (AP) on some ARM based systems with + MHU peripheral. + + SCP controls most of the power managament on the Application + Processors. It offers control and management of: the core/cluster + power states, various power domain DVFS including the core/cluster, + certain system clocks configuration, thermal sensors and many + others. + config PL320_MBOX bool "ARM PL320 Mailbox" depends on ARM_AMBA diff --git a/drivers/mailbox/Makefile b/drivers/mailbox/Makefile index 94ed7cefb14d..ff640e5c957b 100644 --- a/drivers/mailbox/Makefile +++ b/drivers/mailbox/Makefile @@ -2,6 +2,8 @@ obj-$(CONFIG_MAILBOX) += mailbox.o +obj-$(CONFIG_ARM_MHU_MBOX) += arm_mhu.o + obj-$(CONFIG_PL320_MBOX) += pl320-ipc.o obj-$(CONFIG_OMAP2PLUS_MBOX) += omap-mailbox.o diff --git a/drivers/mailbox/arm_mhu.c b/drivers/mailbox/arm_mhu.c new file mode 100644 index 000000000000..841b0cb1b710 --- /dev/null +++ b/drivers/mailbox/arm_mhu.c @@ -0,0 +1,336 @@ +/* + * Driver for the Message Handling Unit (MHU) which is the peripheral in + * the Compute SubSystem (CSS) providing a mechanism for inter-processor + * communication between System Control Processor (SCP) with Cortex-M3 + * processor and Application Processors (AP). + * + * The MHU peripheral provides a mechanism to assert interrupt signals to + * facilitate inter-processor message passing between the SCP and the AP. + * The message payload can be deposited into main memory or on-chip memories. + * The MHU supports three bi-directional channels - low priority, high + * priority and secure(can't be used in non-secure execution modes) + * + * Copyright (C) 2014 ARM Ltd. + * + * Author: Sudeep Holla + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "arm_mhu.h" + +#define DRIVER_NAME CONTROLLER_NAME"_drv" + +/* + * +--------------------+-------+---------------+ + * | Hardware Register | Offset| Driver View | + * +--------------------+-------+---------------+ + * | SCP_INTR_L_STAT | 0x000 | RX_STATUS(L) | + * | SCP_INTR_L_SET | 0x008 | RX_SET(L) | + * | SCP_INTR_L_CLEAR | 0x010 | RX_CLEAR(L) | + * +--------------------+-------+---------------+ + * | SCP_INTR_H_STAT | 0x020 | RX_STATUS(H) | + * | SCP_INTR_H_SET | 0x028 | RX_SET(H) | + * | SCP_INTR_H_CLEAR | 0x030 | RX_CLEAR(H) | + * +--------------------+-------+---------------+ + * | CPU_INTR_L_STAT | 0x100 | TX_STATUS(L) | + * | CPU_INTR_L_SET | 0x108 | TX_SET(L) | + * | CPU_INTR_L_CLEAR | 0x110 | TX_CLEAR(L) | + * +--------------------+-------+---------------+ + * | CPU_INTR_H_STAT | 0x120 | TX_STATUS(H) | + * | CPU_INTR_H_SET | 0x128 | TX_SET(H) | + * | CPU_INTR_H_CLEAR | 0x130 | TX_CLEAR(H) | + * +--------------------+-------+---------------+ +*/ +#define RX_OFFSET(chan) ((idx) * 0x20) +#define RX_STATUS(chan) RX_OFFSET(chan) +#define RX_SET(chan) (RX_OFFSET(chan) + 0x8) +#define RX_CLEAR(chan) (RX_OFFSET(chan) + 0x10) + +#define TX_OFFSET(chan) (0x100 + (idx) * 0x20) +#define TX_STATUS(chan) TX_OFFSET(chan) +#define TX_SET(chan) (TX_OFFSET(chan) + 0x8) +#define TX_CLEAR(chan) (TX_OFFSET(chan) + 0x10) + +/* + * +---------------+-------+----------------+ + * | Payload | Offset| Driver View | + * +---------------+-------+----------------+ + * | SCP->AP Low | 0x000 | RX_PAYLOAD(L) | + * | SCP->AP High | 0x400 | RX_PAYLOAD(H) | + * +---------------+-------+----------------+ + * | AP->SCP Low | 0x200 | TX_PAYLOAD(H) | + * | AP->SCP High | 0x600 | TX_PAYLOAD(H) | + * +---------------+-------+----------------+ +*/ +#define PAYLOAD_MAX_SIZE 0x200 +#define PAYLOAD_OFFSET 0x400 +#define RX_PAYLOAD(chan) ((chan) * PAYLOAD_OFFSET) +#define TX_PAYLOAD(chan) ((chan) * PAYLOAD_OFFSET + PAYLOAD_MAX_SIZE) + +struct mhu_chan { + int index; + int rx_irq; + struct mbox_link link; + struct mhu_ctlr *ctlr; + struct mhu_data_buf *data; +}; + +struct mhu_ctlr { + struct device *dev; + void __iomem *mbox_base; + void __iomem *payload_base; + struct mbox_controller mbox_con; + struct mhu_chan channels[CHANNEL_MAX]; +}; + +static inline struct mhu_chan *to_mhu_chan(struct mbox_link *lnk) +{ + if (!lnk) + return NULL; + + return container_of(lnk, struct mhu_chan, link); +} + +static irqreturn_t mbox_handler(int irq, void *p) +{ + struct mbox_link *link = (struct mbox_link *)p; + struct mhu_chan *chan = to_mhu_chan(link); + struct mhu_ctlr *ctlr = chan->ctlr; + void __iomem *mbox_base = ctlr->mbox_base; + void __iomem *payload = ctlr->payload_base; + int idx = chan->index; + u32 status = readl(mbox_base + RX_STATUS(idx)); + + if (status && irq == chan->rx_irq) { + struct mhu_data_buf *data = chan->data; + if (!data) + return IRQ_NONE; /* spurious */ + if (data->rx_buf) + memcpy(data->rx_buf, payload + RX_PAYLOAD(idx), + data->rx_size); + chan->data = NULL; + writel(~0, mbox_base + RX_CLEAR(idx)); + mbox_link_received_data(link, data); + } + + return IRQ_HANDLED; +} + +static int mhu_send_data(struct mbox_link *link, void *msg) +{ + struct mhu_chan *chan = to_mhu_chan(link); + struct mhu_ctlr *ctlr = chan->ctlr; + void __iomem *mbox_base = ctlr->mbox_base; + void __iomem *payload = ctlr->payload_base; + struct mhu_data_buf *data = (struct mhu_data_buf *)msg; + int idx = chan->index; + + if (!data) + return -EINVAL; + + chan->data = data; + if (data->tx_buf) + memcpy(payload + TX_PAYLOAD(idx), data->tx_buf, data->tx_size); + writel(data->cmd, mbox_base + TX_SET(idx)); + + return 0; +} + +static int mhu_startup(struct mbox_link *link, void *ignored) +{ + struct mhu_chan *chan = to_mhu_chan(link); + int err, mbox_irq = chan->rx_irq; + + err = request_threaded_irq(mbox_irq, NULL, mbox_handler, IRQF_ONESHOT, + link->link_name, link); + if (err) + return err; + + chan->data = NULL; + return 0; +} + +static void mhu_shutdown(struct mbox_link *link) +{ + struct mhu_chan *chan = to_mhu_chan(link); + + chan->data = NULL; + free_irq(chan->rx_irq, link); +} + +static bool mhu_last_tx_done(struct mbox_link *link) +{ + struct mhu_chan *chan = to_mhu_chan(link); + struct mhu_ctlr *ctlr = chan->ctlr; + void __iomem *mbox_base = ctlr->mbox_base; + int idx = chan->index; + + return !readl(mbox_base + TX_STATUS(idx)); +} + +static struct mbox_link_ops mhu_ops = { + .send_data = mhu_send_data, + .startup = mhu_startup, + .shutdown = mhu_shutdown, + .last_tx_done = mhu_last_tx_done, +}; + +static int mhu_probe(struct platform_device *pdev) +{ + struct mhu_ctlr *ctlr; + struct mhu_chan *chan; + struct device *dev = &pdev->dev; + struct mbox_link **l; + struct resource *res; + int idx; + static const char * const channel_names[] = { + CHANNEL_LOW_PRIORITY, + CHANNEL_HIGH_PRIORITY + }; + + ctlr = devm_kzalloc(dev, sizeof(*ctlr), GFP_KERNEL); + if (!ctlr) { + dev_err(dev, "failed to allocate memory\n"); + return -ENOMEM; + } + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) { + dev_err(dev, "failed to get mailbox memory resource\n"); + return -ENXIO; + } + + ctlr->mbox_base = devm_request_and_ioremap(dev, res); + if (!ctlr->mbox_base) { + dev_err(dev, "failed to request or ioremap mailbox control\n"); + return -EADDRNOTAVAIL; + } + + res = platform_get_resource(pdev, IORESOURCE_MEM, 1); + if (!res) { + dev_err(dev, "failed to get payload memory resource\n"); + return -ENXIO; + } + + ctlr->payload_base = devm_request_and_ioremap(dev, res); + if (!ctlr->payload_base) { + dev_err(dev, "failed to request or ioremap mailbox payload\n"); + return -EADDRNOTAVAIL; + } + + ctlr->dev = dev; + platform_set_drvdata(pdev, ctlr); + + l = devm_kzalloc(dev, sizeof(*l) * (CHANNEL_MAX + 1), GFP_KERNEL); + if (!l) { + dev_err(dev, "failed to allocate memory\n"); + return -ENOMEM; + } + + ctlr->mbox_con.links = l; + ctlr->mbox_con.txdone_poll = true; + ctlr->mbox_con.txpoll_period = 10; + ctlr->mbox_con.ops = &mhu_ops; + snprintf(ctlr->mbox_con.controller_name, 16, CONTROLLER_NAME); + ctlr->mbox_con.dev = dev; + + for (idx = 0; idx < CHANNEL_MAX; idx++) { + chan = &ctlr->channels[idx]; + chan->index = idx; + chan->ctlr = ctlr; + chan->rx_irq = platform_get_irq(pdev, idx); + if (chan->rx_irq < 0) { + dev_err(dev, "failed to get interrupt for %s\n", + channel_names[idx]); + return -ENXIO; + } + l[idx] = &chan->link; + snprintf(l[idx]->link_name, 16, channel_names[idx]); + } + l[idx] = NULL; + + if (mbox_controller_register(&ctlr->mbox_con)) { + dev_err(dev, "failed to register mailbox controller\n"); + return -ENOMEM; + } + _dev_info(dev, "registered mailbox controller %s\n", + ctlr->mbox_con.controller_name); + return 0; +} + +static int mhu_remove(struct platform_device *pdev) +{ + struct mhu_ctlr *ctlr = platform_get_drvdata(pdev); + struct device *dev = &pdev->dev; + + mbox_controller_unregister(&ctlr->mbox_con); + _dev_info(dev, "unregistered mailbox controller %s\n", + ctlr->mbox_con.controller_name); + devm_kfree(dev, ctlr->mbox_con.links); + + devm_iounmap(dev, ctlr->payload_base); + devm_iounmap(dev, ctlr->mbox_base); + + platform_set_drvdata(pdev, NULL); + devm_kfree(dev, ctlr); + return 0; +} + +static struct of_device_id mhu_of_match[] = { + { .compatible = "arm,mhu" }, + {}, +}; +MODULE_DEVICE_TABLE(of, mhu_of_match); + +static struct platform_driver mhu_driver = { + .probe = mhu_probe, + .remove = mhu_remove, + .driver = { + .name = DRIVER_NAME, + .of_match_table = mhu_of_match, + }, +}; + +static int __init mhu_init(void) +{ + return platform_driver_register(&mhu_driver); +} +core_initcall(mhu_init); + +static void __exit mhu_exit(void) +{ + platform_driver_unregister(&mhu_driver); +} +module_exit(mhu_exit); + +MODULE_AUTHOR("Sudeep Holla "); +MODULE_DESCRIPTION("ARM MHU mailbox driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/mailbox/arm_mhu.h b/drivers/mailbox/arm_mhu.h new file mode 100644 index 000000000000..3b5343375c43 --- /dev/null +++ b/drivers/mailbox/arm_mhu.h @@ -0,0 +1,31 @@ +/* + * ARM Message Handling Unit (MHU) driver header + * + * Copyright (C) 2014 ARM Ltd. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + */ +#define CONTROLLER_NAME "mhu_ctlr" + +#define CHANNEL_MAX 2 +#define CHANNEL_LOW_PRIORITY "cpu_to_scp_low" +#define CHANNEL_HIGH_PRIORITY "cpu_to_scp_high" + +struct mhu_data_buf { + u32 cmd; + int tx_size; + void *tx_buf; + int rx_size; + void *rx_buf; + void *cl_data; +}; -- cgit v1.2.3 From 2021582a5ea78e73dd45d8b433e5708ecc6a7341 Mon Sep 17 00:00:00 2001 From: Sudeep Holla Date: Tue, 15 Apr 2014 16:09:42 +0100 Subject: mailbox: add support for System Control and Power Interface(SCPI) protocol This patch add supports for System Control and Power Interface (SCPI) Message Protocol used between the Application Cores(AP) and the System Control Processor(SCP). The MHU peripheral provides a mechanism for inter-processor communication between SCP's M3 processor and AP. SCP offers control and management of the core/cluster power states, various power domain DVFS including the core/cluster, certain system clocks configuration, thermal sensors and many others. This protocol library provides interface for all the client drivers using SCPI to make use of the features offered by the SCP. Signed-off-by: Sudeep Holla Signed-off-by: Jon Medhurst --- drivers/mailbox/Kconfig | 13 ++ drivers/mailbox/Makefile | 1 + drivers/mailbox/scpi_protocol.c | 354 ++++++++++++++++++++++++++++++++++++++++ include/linux/scpi_protocol.h | 30 ++++ 4 files changed, 398 insertions(+) create mode 100644 drivers/mailbox/scpi_protocol.c create mode 100644 include/linux/scpi_protocol.h diff --git a/drivers/mailbox/Kconfig b/drivers/mailbox/Kconfig index cbe038319816..8e5e5b1becec 100644 --- a/drivers/mailbox/Kconfig +++ b/drivers/mailbox/Kconfig @@ -20,6 +20,19 @@ config ARM_MHU_MBOX certain system clocks configuration, thermal sensors and many others. +config ARM_SCPI_PROTOCOL + bool "ARM System Control and Power Interface (SCPI) Message Protocol" + select ARM_MHU_MBOX + help + System Control and Power Interface (SCPI) Message Protocol is + defined for the purpose of communication between the Application + Cores(AP) and the System Control Processor(SCP). The MHU peripheral + provides a mechanism for inter-processor communication between SCP + and AP. + + This protocol library provides interface for all the client drivers + making use of the features offered by the SCP. + config PL320_MBOX bool "ARM PL320 Mailbox" depends on ARM_AMBA diff --git a/drivers/mailbox/Makefile b/drivers/mailbox/Makefile index ff640e5c957b..e31c84b6bc40 100644 --- a/drivers/mailbox/Makefile +++ b/drivers/mailbox/Makefile @@ -3,6 +3,7 @@ obj-$(CONFIG_MAILBOX) += mailbox.o obj-$(CONFIG_ARM_MHU_MBOX) += arm_mhu.o +obj-$(CONFIG_ARM_SCPI_PROTOCOL) += scpi_protocol.o obj-$(CONFIG_PL320_MBOX) += pl320-ipc.o diff --git a/drivers/mailbox/scpi_protocol.c b/drivers/mailbox/scpi_protocol.c new file mode 100644 index 000000000000..7a1ff2dd2687 --- /dev/null +++ b/drivers/mailbox/scpi_protocol.c @@ -0,0 +1,354 @@ +/* + * System Control and Power Interface (SCPI) Message Protocol driver + * + * Copyright (C) 2014 ARM Ltd. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "arm_mhu.h" + +#define CMD_ID_SHIFT 0 +#define CMD_ID_MASK 0xff +#define CMD_SENDER_ID_SHIFT 8 +#define CMD_SENDER_ID_MASK 0xff +#define CMD_DATA_SIZE_SHIFT 20 +#define CMD_DATA_SIZE_MASK 0x1ff +#define PACK_SCPI_CMD(cmd, sender, txsz) \ + ((((cmd) & CMD_ID_MASK) << CMD_ID_SHIFT) | \ + (((sender) & CMD_SENDER_ID_MASK) << CMD_SENDER_ID_SHIFT) | \ + (((txsz) & CMD_DATA_SIZE_MASK) << CMD_DATA_SIZE_SHIFT)) + +#define MAX_DVFS_DOMAINS 3 +#define MAX_DVFS_OPPS 4 +#define DVFS_LATENCY(hdr) ((hdr) >> 16) +#define DVFS_OPP_COUNT(hdr) (((hdr) >> 8) & 0xff) + +enum scpi_error_codes { + SCPI_SUCCESS = 0, /* Success */ + SCPI_ERR_PARAM = 1, /* Invalid parameter(s) */ + SCPI_ERR_ALIGN = 2, /* Invalid alignment */ + SCPI_ERR_SIZE = 3, /* Invalid size */ + SCPI_ERR_HANDLER = 4, /* Invalid handler/callback */ + SCPI_ERR_ACCESS = 5, /* Invalid access/permission denied */ + SCPI_ERR_RANGE = 6, /* Value out of range */ + SCPI_ERR_TIMEOUT = 7, /* Timeout has occurred */ + SCPI_ERR_NOMEM = 8, /* Invalid memory area or pointer */ + SCPI_ERR_PWRSTATE = 9, /* Invalid power state */ + SCPI_ERR_SUPPORT = 10, /* Not supported or disabled */ + SCPI_ERR_DEVICE = 11, /* Device error */ + SCPI_ERR_MAX +}; + +enum scpi_client_id { + SCPI_CL_NONE, + SCPI_CL_CLOCKS, + SCPI_CL_DVFS, + SCPI_CL_POWER, + SCPI_MAX, +}; + +enum scpi_std_cmd { + SCPI_CMD_INVALID = 0x00, + SCPI_CMD_SCPI_READY = 0x01, + SCPI_CMD_SCPI_CAPABILITIES = 0x02, + SCPI_CMD_EVENT = 0x03, + SCPI_CMD_SET_CSS_PWR_STATE = 0x04, + SCPI_CMD_GET_CSS_PWR_STATE = 0x05, + SCPI_CMD_CFG_PWR_STATE_STAT = 0x06, + SCPI_CMD_GET_PWR_STATE_STAT = 0x07, + SCPI_CMD_SYS_PWR_STATE = 0x08, + SCPI_CMD_L2_READY = 0x09, + SCPI_CMD_SET_AP_TIMER = 0x0a, + SCPI_CMD_CANCEL_AP_TIME = 0x0b, + SCPI_CMD_DVFS_CAPABILITIES = 0x0c, + SCPI_CMD_GET_DVFS_INFO = 0x0d, + SCPI_CMD_SET_DVFS = 0x0e, + SCPI_CMD_GET_DVFS = 0x0f, + SCPI_CMD_GET_DVFS_STAT = 0x10, + SCPI_CMD_SET_RTC = 0x11, + SCPI_CMD_GET_RTC = 0x12, + SCPI_CMD_CLOCK_CAPABILITIES = 0x13, + SCPI_CMD_SET_CLOCK_INDEX = 0x14, + SCPI_CMD_SET_CLOCK_VALUE = 0x15, + SCPI_CMD_GET_CLOCK_VALUE = 0x16, + SCPI_CMD_PSU_CAPABILITIES = 0x17, + SCPI_CMD_SET_PSU = 0x18, + SCPI_CMD_GET_PSU = 0x19, + SCPI_CMD_SENSOR_CAPABILITIES = 0x1a, + SCPI_CMD_SENSOR_INFO = 0x1b, + SCPI_CMD_SENSOR_VALUE = 0x1c, + SCPI_CMD_SENSOR_CFG_PERIODIC = 0x1d, + SCPI_CMD_SENSOR_CFG_BOUNDS = 0x1e, + SCPI_CMD_SENSOR_ASYNC_VALUE = 0x1f, + SCPI_CMD_COUNT +}; + +struct scpi_data_buf { + int client_id; + struct mhu_data_buf *data; + struct completion complete; +}; + +static int high_priority_cmds[] = { + SCPI_CMD_GET_CSS_PWR_STATE, + SCPI_CMD_CFG_PWR_STATE_STAT, + SCPI_CMD_GET_PWR_STATE_STAT, + SCPI_CMD_SET_DVFS, + SCPI_CMD_GET_DVFS, + SCPI_CMD_SET_RTC, + SCPI_CMD_GET_RTC, + SCPI_CMD_SET_CLOCK_INDEX, + SCPI_CMD_SET_CLOCK_VALUE, + SCPI_CMD_GET_CLOCK_VALUE, + SCPI_CMD_SET_PSU, + SCPI_CMD_GET_PSU, + SCPI_CMD_SENSOR_VALUE, + SCPI_CMD_SENSOR_CFG_PERIODIC, + SCPI_CMD_SENSOR_CFG_BOUNDS, +}; + +static struct scpi_opp *scpi_opps[MAX_DVFS_DOMAINS]; + +static int scpi_linux_errmap[SCPI_ERR_MAX] = { + 0, -EINVAL, -ENOEXEC, -EMSGSIZE, + -EINVAL, -EACCES, -ERANGE, -ETIMEDOUT, + -ENOMEM, -EINVAL, -EOPNOTSUPP, -EIO, +}; + +static inline int scpi_to_linux_errno(int errno) +{ + if (errno >= SCPI_SUCCESS && errno < SCPI_ERR_MAX) + return scpi_linux_errmap[errno]; + return -EIO; +} + +static bool high_priority_chan_supported(int cmd) +{ + int idx; + for (idx = 0; idx < ARRAY_SIZE(high_priority_cmds); idx++) + if (cmd == high_priority_cmds[idx]) + return true; + return false; +} + +static void scpi_rx_callback(struct mbox_client *cl, void *msg) +{ + struct mhu_data_buf *data = (struct mhu_data_buf *)msg; + struct scpi_data_buf *scpi_buf = data->cl_data; + complete(&scpi_buf->complete); +} + +static int send_scpi_cmd(struct scpi_data_buf *scpi_buf, bool high_priority) +{ + struct mbox_chan *chan; + struct mbox_client cl; + struct mhu_data_buf *data = scpi_buf->data; + u32 status; + + cl.rx_callback = scpi_rx_callback; + cl.tx_done = NULL; + cl.tx_block = true; + cl.tx_tout = 50; /* 50 msec */ + cl.link_data = NULL; + cl.knows_txdone = false; + cl.chan_name = high_priority ? + CONTROLLER_NAME":"CHANNEL_HIGH_PRIORITY : + CONTROLLER_NAME":"CHANNEL_LOW_PRIORITY; + + chan = mbox_request_channel(&cl); + if (IS_ERR(chan)) + return PTR_ERR(chan); + + init_completion(&scpi_buf->complete); + if (mbox_send_message(chan, (void *)data)) + return -EIO; + + if (!wait_for_completion_timeout(&scpi_buf->complete, + msecs_to_jiffies(50))) + status = SCPI_ERR_TIMEOUT; + else + status = *(u32 *)(data->rx_buf); /* read first word */ + + mbox_free_channel(chan); + + return scpi_to_linux_errno(status); +} + +#define SCPI_SETUP_DBUF(scpi_buf, mhu_buf, _client_id,\ + _cmd, _tx_buf, _rx_buf) \ +do { \ + struct mhu_data_buf *pdata = &mhu_buf; \ + pdata->cmd = _cmd; \ + pdata->tx_buf = &_tx_buf; \ + pdata->tx_size = sizeof(_tx_buf); \ + pdata->rx_buf = &_rx_buf; \ + pdata->rx_size = sizeof(_rx_buf); \ + scpi_buf.client_id = _client_id; \ + scpi_buf.data = pdata; \ +} while (0) + +static int scpi_execute_cmd(struct scpi_data_buf *scpi_buf) +{ + struct mhu_data_buf *data; + bool high_priority; + + if (!scpi_buf || !scpi_buf->data) + return -EINVAL; + + data = scpi_buf->data; + high_priority = high_priority_chan_supported(data->cmd); + data->cmd = PACK_SCPI_CMD(data->cmd, scpi_buf->client_id, + data->tx_size); + data->cl_data = scpi_buf; + + return send_scpi_cmd(scpi_buf, high_priority); +} + +unsigned long scpi_clk_get_val(u16 clk_id) +{ + struct scpi_data_buf sdata; + struct mhu_data_buf mdata; + struct { + u32 status; + u32 clk_rate; + } buf; + + SCPI_SETUP_DBUF(sdata, mdata, SCPI_CL_CLOCKS, + SCPI_CMD_GET_CLOCK_VALUE, clk_id, buf); + if (scpi_execute_cmd(&sdata)) + return 0; + + return buf.clk_rate; +} +EXPORT_SYMBOL_GPL(scpi_clk_get_val); + +int scpi_clk_set_val(u16 clk_id, unsigned long rate) +{ + struct scpi_data_buf sdata; + struct mhu_data_buf mdata; + int stat; + struct { + u32 clk_rate; + u16 clk_id; + } buf; + + buf.clk_rate = (u32)rate; + buf.clk_id = clk_id; + + SCPI_SETUP_DBUF(sdata, mdata, SCPI_CL_CLOCKS, + SCPI_CMD_SET_CLOCK_VALUE, buf, stat); + return scpi_execute_cmd(&sdata); +} +EXPORT_SYMBOL_GPL(scpi_clk_set_val); + +struct scpi_opp *scpi_dvfs_get_opps(u8 domain) +{ + struct scpi_data_buf sdata; + struct mhu_data_buf mdata; + struct { + u32 status; + u32 header; + u32 freqs[MAX_DVFS_OPPS]; + } buf; + struct scpi_opp *opp; + size_t freqs_sz; + int count, ret; + + if (domain >= MAX_DVFS_DOMAINS) + return ERR_PTR(-EINVAL); + + if (scpi_opps[domain]) /* data already populated */ + return scpi_opps[domain]; + + SCPI_SETUP_DBUF(sdata, mdata, SCPI_CL_DVFS, + SCPI_CMD_GET_DVFS_INFO, domain, buf); + ret = scpi_execute_cmd(&sdata); + if (ret) + return ERR_PTR(ret); + + opp = kmalloc(sizeof(*opp), GFP_KERNEL); + if (!opp) + return ERR_PTR(-ENOMEM); + + count = DVFS_OPP_COUNT(buf.header); + freqs_sz = count * sizeof(*(opp->freqs)); + + opp->count = count; + opp->latency = DVFS_LATENCY(buf.header); + opp->freqs = kmalloc(freqs_sz, GFP_KERNEL); + if (!opp->freqs) { + kfree(opp); + return ERR_PTR(-ENOMEM); + } + + memcpy(opp->freqs, &buf.freqs[0], freqs_sz); + scpi_opps[domain] = opp; + + return opp; +} +EXPORT_SYMBOL_GPL(scpi_dvfs_get_opps); + +int scpi_dvfs_get_idx(u8 domain) +{ + struct scpi_data_buf sdata; + struct mhu_data_buf mdata; + struct { + u32 status; + u8 dvfs_idx; + } buf; + int ret; + + if (domain >= MAX_DVFS_DOMAINS) + return -EINVAL; + + SCPI_SETUP_DBUF(sdata, mdata, SCPI_CL_DVFS, + SCPI_CMD_GET_DVFS, domain, buf); + ret = scpi_execute_cmd(&sdata); + + if (!ret) + ret = buf.dvfs_idx; + return ret; +} +EXPORT_SYMBOL_GPL(scpi_dvfs_get_idx); + +int scpi_dvfs_set_idx(u8 domain, u8 idx) +{ + struct scpi_data_buf sdata; + struct mhu_data_buf mdata; + struct { + u8 dvfs_domain; + u8 dvfs_idx; + } buf; + int stat; + + buf.dvfs_idx = idx; + buf.dvfs_domain = domain; + + if (domain >= MAX_DVFS_DOMAINS) + return -EINVAL; + + SCPI_SETUP_DBUF(sdata, mdata, SCPI_CL_DVFS, + SCPI_CMD_SET_DVFS, buf, stat); + return scpi_execute_cmd(&sdata); +} +EXPORT_SYMBOL_GPL(scpi_dvfs_set_idx); diff --git a/include/linux/scpi_protocol.h b/include/linux/scpi_protocol.h new file mode 100644 index 000000000000..66e5eb3710ab --- /dev/null +++ b/include/linux/scpi_protocol.h @@ -0,0 +1,30 @@ +/* + * SCPI Message Protocol driver header + * + * Copyright (C) 2014 ARM Ltd. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + */ +#include + +struct scpi_opp { + u32 *freqs; + u32 latency; /* in usecs */ + int count; +}; + +unsigned long scpi_clk_get_val(u16 clk_id); +int scpi_clk_set_val(u16 clk_id, unsigned long rate); +int scpi_dvfs_get_idx(u8 domain); +int scpi_dvfs_set_idx(u8 domain, u8 idx); +struct scpi_opp *scpi_dvfs_get_opps(u8 domain); -- cgit v1.2.3 From ffc575699b56dec5ccae3e41bf89862f67319183 Mon Sep 17 00:00:00 2001 From: Jon Medhurst Date: Fri, 27 Jun 2014 16:54:07 +0100 Subject: mailbox: get mhu driver working with new (v7) mailbox framework Signed-off-by: Jon Medhurst --- drivers/mailbox/arm_mhu.c | 65 ++++++++++++++++------------------------- drivers/mailbox/arm_mhu.h | 2 ++ drivers/mailbox/scpi_protocol.c | 6 ++-- 3 files changed, 30 insertions(+), 43 deletions(-) diff --git a/drivers/mailbox/arm_mhu.c b/drivers/mailbox/arm_mhu.c index 841b0cb1b710..6256caae9133 100644 --- a/drivers/mailbox/arm_mhu.c +++ b/drivers/mailbox/arm_mhu.c @@ -46,7 +46,9 @@ #include "arm_mhu.h" -#define DRIVER_NAME CONTROLLER_NAME"_drv" +struct device* the_scpi_device; + +#define DRIVER_NAME "arm_mhu" /* * +--------------------+-------+---------------+ @@ -98,7 +100,6 @@ struct mhu_chan { int index; int rx_irq; - struct mbox_link link; struct mhu_ctlr *ctlr; struct mhu_data_buf *data; }; @@ -111,18 +112,10 @@ struct mhu_ctlr { struct mhu_chan channels[CHANNEL_MAX]; }; -static inline struct mhu_chan *to_mhu_chan(struct mbox_link *lnk) -{ - if (!lnk) - return NULL; - - return container_of(lnk, struct mhu_chan, link); -} - static irqreturn_t mbox_handler(int irq, void *p) { - struct mbox_link *link = (struct mbox_link *)p; - struct mhu_chan *chan = to_mhu_chan(link); + struct mbox_chan *link = (struct mbox_chan *)p; + struct mhu_chan *chan = link->con_priv; struct mhu_ctlr *ctlr = chan->ctlr; void __iomem *mbox_base = ctlr->mbox_base; void __iomem *payload = ctlr->payload_base; @@ -138,15 +131,15 @@ static irqreturn_t mbox_handler(int irq, void *p) data->rx_size); chan->data = NULL; writel(~0, mbox_base + RX_CLEAR(idx)); - mbox_link_received_data(link, data); + mbox_chan_received_data(link, data); } return IRQ_HANDLED; } -static int mhu_send_data(struct mbox_link *link, void *msg) +static int mhu_send_data(struct mbox_chan *link, void *msg) { - struct mhu_chan *chan = to_mhu_chan(link); + struct mhu_chan *chan = link->con_priv; struct mhu_ctlr *ctlr = chan->ctlr; void __iomem *mbox_base = ctlr->mbox_base; void __iomem *payload = ctlr->payload_base; @@ -164,31 +157,27 @@ static int mhu_send_data(struct mbox_link *link, void *msg) return 0; } -static int mhu_startup(struct mbox_link *link, void *ignored) +static int mhu_startup(struct mbox_chan *link) { - struct mhu_chan *chan = to_mhu_chan(link); + struct mhu_chan *chan = link->con_priv; int err, mbox_irq = chan->rx_irq; err = request_threaded_irq(mbox_irq, NULL, mbox_handler, IRQF_ONESHOT, - link->link_name, link); - if (err) - return err; - - chan->data = NULL; - return 0; + DRIVER_NAME, link); + return err; } -static void mhu_shutdown(struct mbox_link *link) +static void mhu_shutdown(struct mbox_chan *link) { - struct mhu_chan *chan = to_mhu_chan(link); + struct mhu_chan *chan = link->con_priv; chan->data = NULL; free_irq(chan->rx_irq, link); } -static bool mhu_last_tx_done(struct mbox_link *link) +static bool mhu_last_tx_done(struct mbox_chan *link) { - struct mhu_chan *chan = to_mhu_chan(link); + struct mhu_chan *chan = link->con_priv; struct mhu_ctlr *ctlr = chan->ctlr; void __iomem *mbox_base = ctlr->mbox_base; int idx = chan->index; @@ -196,7 +185,7 @@ static bool mhu_last_tx_done(struct mbox_link *link) return !readl(mbox_base + TX_STATUS(idx)); } -static struct mbox_link_ops mhu_ops = { +static struct mbox_chan_ops mhu_ops = { .send_data = mhu_send_data, .startup = mhu_startup, .shutdown = mhu_shutdown, @@ -208,7 +197,7 @@ static int mhu_probe(struct platform_device *pdev) struct mhu_ctlr *ctlr; struct mhu_chan *chan; struct device *dev = &pdev->dev; - struct mbox_link **l; + struct mbox_chan *l; struct resource *res; int idx; static const char * const channel_names[] = { @@ -249,17 +238,17 @@ static int mhu_probe(struct platform_device *pdev) ctlr->dev = dev; platform_set_drvdata(pdev, ctlr); - l = devm_kzalloc(dev, sizeof(*l) * (CHANNEL_MAX + 1), GFP_KERNEL); + l = devm_kzalloc(dev, sizeof(*l) * CHANNEL_MAX, GFP_KERNEL); if (!l) { dev_err(dev, "failed to allocate memory\n"); return -ENOMEM; } - ctlr->mbox_con.links = l; + ctlr->mbox_con.chans = l; + ctlr->mbox_con.num_chans = CHANNEL_MAX; ctlr->mbox_con.txdone_poll = true; ctlr->mbox_con.txpoll_period = 10; ctlr->mbox_con.ops = &mhu_ops; - snprintf(ctlr->mbox_con.controller_name, 16, CONTROLLER_NAME); ctlr->mbox_con.dev = dev; for (idx = 0; idx < CHANNEL_MAX; idx++) { @@ -272,17 +261,15 @@ static int mhu_probe(struct platform_device *pdev) channel_names[idx]); return -ENXIO; } - l[idx] = &chan->link; - snprintf(l[idx]->link_name, 16, channel_names[idx]); + l[idx].con_priv = chan; } - l[idx] = NULL; if (mbox_controller_register(&ctlr->mbox_con)) { dev_err(dev, "failed to register mailbox controller\n"); return -ENOMEM; } - _dev_info(dev, "registered mailbox controller %s\n", - ctlr->mbox_con.controller_name); + + the_scpi_device = dev; return 0; } @@ -292,9 +279,7 @@ static int mhu_remove(struct platform_device *pdev) struct device *dev = &pdev->dev; mbox_controller_unregister(&ctlr->mbox_con); - _dev_info(dev, "unregistered mailbox controller %s\n", - ctlr->mbox_con.controller_name); - devm_kfree(dev, ctlr->mbox_con.links); + devm_kfree(dev, ctlr->mbox_con.chans); devm_iounmap(dev, ctlr->payload_base); devm_iounmap(dev, ctlr->mbox_base); diff --git a/drivers/mailbox/arm_mhu.h b/drivers/mailbox/arm_mhu.h index 3b5343375c43..f78c1fa369c8 100644 --- a/drivers/mailbox/arm_mhu.h +++ b/drivers/mailbox/arm_mhu.h @@ -29,3 +29,5 @@ struct mhu_data_buf { void *rx_buf; void *cl_data; }; + +extern struct device* the_scpi_device; diff --git a/drivers/mailbox/scpi_protocol.c b/drivers/mailbox/scpi_protocol.c index 7a1ff2dd2687..c8a824a60a43 100644 --- a/drivers/mailbox/scpi_protocol.c +++ b/drivers/mailbox/scpi_protocol.c @@ -165,15 +165,15 @@ static int send_scpi_cmd(struct scpi_data_buf *scpi_buf, bool high_priority) struct mhu_data_buf *data = scpi_buf->data; u32 status; + cl.dev = the_scpi_device; cl.rx_callback = scpi_rx_callback; cl.tx_done = NULL; cl.tx_block = true; cl.tx_tout = 50; /* 50 msec */ - cl.link_data = NULL; cl.knows_txdone = false; cl.chan_name = high_priority ? - CONTROLLER_NAME":"CHANNEL_HIGH_PRIORITY : - CONTROLLER_NAME":"CHANNEL_LOW_PRIORITY; + CHANNEL_HIGH_PRIORITY : + CHANNEL_LOW_PRIORITY; chan = mbox_request_channel(&cl); if (IS_ERR(chan)) -- cgit v1.2.3 From ef14a390957fd886ed593b816a4c6fdad7a34734 Mon Sep 17 00:00:00 2001 From: Sudeep Holla Date: Thu, 24 Apr 2014 16:58:11 +0100 Subject: clk: add support for clocks provided by system control processor On some ARM based systems, a separate Cortex-M based System Control Processor(SCP) provides the overall power, clock, reset and system control. System Control and Power Interface(SCPI) Message Protocol is defined for the communication between the Application Cores(AP) and the SCP. This patch adds support for the clocks provided by SCP using SCPI protocol. Signed-off-by: Sudeep Holla Signed-off-by: Jon Medhurst --- Documentation/devicetree/bindings/clock/scpi.txt | 34 +++ drivers/clk/Kconfig | 10 + drivers/clk/Makefile | 1 + drivers/clk/clk-scpi.c | 309 +++++++++++++++++++++++ 4 files changed, 354 insertions(+) create mode 100644 Documentation/devicetree/bindings/clock/scpi.txt create mode 100644 drivers/clk/clk-scpi.c diff --git a/Documentation/devicetree/bindings/clock/scpi.txt b/Documentation/devicetree/bindings/clock/scpi.txt new file mode 100644 index 000000000000..b2b7035018f4 --- /dev/null +++ b/Documentation/devicetree/bindings/clock/scpi.txt @@ -0,0 +1,34 @@ +Device Tree Clock bindings for the clocks based on +System Control and Power Interface (SCPI) Message Protocol + +This binding uses the common clock binding[1]. + +Required properties: +- compatible : shall be one of the following: + "arm,scpi-clks" - for the container node with all the clocks + based on the SCPI protocol + "arm,scpi-clk-indexed" - all the clocks that are variable and index + based. These clocks don't provide the full range between the + limits but only discrete points within the range. The firmware + provides the mapping for each such operating frequency and the + index associated with it. + "arm,scpi-clk-range" - all the clocks that are variable and provide + full range within the specified range + +Required properties for all clocks(all from common clock binding): +- #clock-cells : ; shall be set to 0 or 1 depending on whether it has single + or multiple clock outputs. +- clock-output-names : shall be the corresponding names of the outputs. +- clock-indices: The identifyng number for the clocks in the node as expected + by the firmware. It can be non linear and hence provide the mapping + of identifiers into the clock-output-names array. +- frequency-range: The allowed range of clock frequency supported specified + in the form of minimum and maximum limits(two u32 fields) + This is required only if compatible is "arm,scpi-clk-range" + +Clock consumers should specify the desired clocks they use with a +"clocks" phandle cell. Consumers should also provide an additional ID +in their clock property. This ID refers to the specific clock in the clock +provider list. + +[1] Documentation/devicetree/bindings/clock/clock-bindings.txt diff --git a/drivers/clk/Kconfig b/drivers/clk/Kconfig index cfd3af7b2cbd..900c33735001 100644 --- a/drivers/clk/Kconfig +++ b/drivers/clk/Kconfig @@ -32,6 +32,16 @@ config COMMON_CLK_WM831X source "drivers/clk/versatile/Kconfig" +config COMMON_CLK_SCPI + bool "Clock driver controlled via SCPI interface" + depends on ARM_SCPI_PROTOCOL + ---help--- + This driver provides support for clocks that are controlled + by firmware that implements the SCPI interface. + + This driver uses SCPI Message Protocol to interact with the + firware providing all the clock controls. + config COMMON_CLK_MAX77686 tristate "Clock driver for Maxim 77686 MFD" depends on MFD_MAX77686 diff --git a/drivers/clk/Makefile b/drivers/clk/Makefile index f537a0b1f798..2df0d1717f97 100644 --- a/drivers/clk/Makefile +++ b/drivers/clk/Makefile @@ -29,6 +29,7 @@ obj-$(CONFIG_ARCH_NSPIRE) += clk-nspire.o obj-$(CONFIG_COMMON_CLK_PALMAS) += clk-palmas.o obj-$(CONFIG_CLK_PPC_CORENET) += clk-ppc-corenet.o obj-$(CONFIG_COMMON_CLK_S2MPS11) += clk-s2mps11.o +obj-$(CONFIG_COMMON_CLK_SCPI) += clk-scpi.o obj-$(CONFIG_COMMON_CLK_SI5351) += clk-si5351.o obj-$(CONFIG_COMMON_CLK_SI570) += clk-si570.o obj-$(CONFIG_CLK_TWL6040) += clk-twl6040.o diff --git a/drivers/clk/clk-scpi.c b/drivers/clk/clk-scpi.c new file mode 100644 index 000000000000..2d707663542f --- /dev/null +++ b/drivers/clk/clk-scpi.c @@ -0,0 +1,309 @@ +/* + * System Control and Power Interface (SCPI) Protocol based clock driver + * + * Copyright (C) 2014 ARM Ltd. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +struct scpi_clk { + u32 id; + const char *name; + struct clk_hw hw; + struct scpi_opp *opps; + unsigned long rate_min; + unsigned long rate_max; +}; + +#define to_scpi_clk(clk) container_of(clk, struct scpi_clk, hw) + +static unsigned long scpi_clk_recalc_rate(struct clk_hw *hw, + unsigned long parent_rate) +{ + struct scpi_clk *clk = to_scpi_clk(hw); + return scpi_clk_get_val(clk->id); +} + +static long scpi_clk_round_rate(struct clk_hw *hw, unsigned long rate, + unsigned long *parent_rate) +{ + struct scpi_clk *clk = to_scpi_clk(hw); + if (clk->rate_min && rate < clk->rate_min) + rate = clk->rate_min; + if (clk->rate_max && rate > clk->rate_max) + rate = clk->rate_max; + + return rate; +} + +static int scpi_clk_set_rate(struct clk_hw *hw, unsigned long rate, + unsigned long parent_rate) +{ + struct scpi_clk *clk = to_scpi_clk(hw); + return scpi_clk_set_val(clk->id, rate); +} + +static struct clk_ops scpi_clk_ops = { + .recalc_rate = scpi_clk_recalc_rate, + .round_rate = scpi_clk_round_rate, + .set_rate = scpi_clk_set_rate, +}; + +/* find closest match to given frequency in OPP table */ +static int __scpi_dvfs_round_rate(struct scpi_clk *clk, unsigned long rate) +{ + int idx, max_opp = clk->opps->count; + u32 *freqs = clk->opps->freqs; + u32 fmin = 0, fmax = ~0, ftmp; + + for (idx = 0; idx < max_opp; idx++, freqs++) { + ftmp = *freqs; + if (ftmp >= (u32)rate) { + if (ftmp <= fmax) + fmax = ftmp; + } else { + if (ftmp >= fmin) + fmin = ftmp; + } + } + if (fmax != ~0) + return fmax; + else + return fmin; +} + +static unsigned long scpi_dvfs_recalc_rate(struct clk_hw *hw, + unsigned long parent_rate) +{ + struct scpi_clk *clk = to_scpi_clk(hw); + int idx = scpi_dvfs_get_idx(clk->id); + u32 *freqs = clk->opps->freqs; + + if (idx < 0) + return 0; + else + return *(freqs + idx); +} + +static long scpi_dvfs_round_rate(struct clk_hw *hw, unsigned long rate, + unsigned long *parent_rate) +{ + struct scpi_clk *clk = to_scpi_clk(hw); + return __scpi_dvfs_round_rate(clk, rate); +} + +static int __scpi_find_dvfs_index(struct scpi_clk *clk, unsigned long rate) +{ + int idx, max_opp = clk->opps->count; + u32 *freqs = clk->opps->freqs; + + for (idx = 0; idx < max_opp; idx++, freqs++) + if (*freqs == (u32)rate) + break; + return (idx == max_opp) ? -EINVAL : idx; +} + +static int scpi_dvfs_set_rate(struct clk_hw *hw, unsigned long rate, + unsigned long parent_rate) +{ + struct scpi_clk *clk = to_scpi_clk(hw); + int ret = __scpi_find_dvfs_index(clk, rate); + + if (ret < 0) + return ret; + else + return scpi_dvfs_set_idx(clk->id, (u8)ret); +} + +static struct clk_ops scpi_dvfs_ops = { + .recalc_rate = scpi_dvfs_recalc_rate, + .round_rate = scpi_dvfs_round_rate, + .set_rate = scpi_dvfs_set_rate, +}; + +static struct clk * +scpi_dvfs_ops_init(struct device *dev, struct device_node *np, + struct scpi_clk *sclk) +{ + struct clk_init_data init; + struct scpi_opp *opp; + + init.name = sclk->name; + init.flags = CLK_IS_ROOT; + init.num_parents = 0; + init.ops = &scpi_dvfs_ops; + sclk->hw.init = &init; + + opp = scpi_dvfs_get_opps(sclk->id); + if (IS_ERR(opp)) + return (struct clk *)opp; + + sclk->opps = opp; + + return devm_clk_register(dev, &sclk->hw); +} + +static struct clk * +scpi_clk_ops_init(struct device *dev, struct device_node *np, + struct scpi_clk *sclk) +{ + struct clk_init_data init; + u32 range[2]; + int ret; + + init.name = sclk->name; + init.flags = CLK_IS_ROOT; + init.num_parents = 0; + init.ops = &scpi_clk_ops; + sclk->hw.init = &init; + + ret = of_property_read_u32_array(np, "frequency-range", range, + ARRAY_SIZE(range)); + if (ret) + return ERR_PTR(ret); + sclk->rate_min = range[0]; + sclk->rate_max = range[1]; + + return devm_clk_register(dev, &sclk->hw); +} + +static int scpi_clk_setup(struct device *dev, struct device_node *np, + const void *data) +{ + struct clk *(*setup_ops)(struct device *, struct device_node *, + struct scpi_clk *) = data; + struct clk_onecell_data *clk_data; + struct clk **clks; + size_t count; + int idx; + + count = of_property_count_strings(np, "clock-output-names"); + if (count < 0) { + dev_err(dev, "%s: invalid clock output count\n", np->name); + return -EINVAL; + } + + clk_data = devm_kmalloc(dev, sizeof(*clk_data), GFP_KERNEL); + if (!clk_data) { + dev_err(dev, "failed to allocate clock provider data\n"); + return -ENOMEM; + } + + clks = devm_kmalloc(dev, count * sizeof(*clks), GFP_KERNEL); + if (!clks) { + dev_err(dev, "failed to allocate clock providers\n"); + return -ENOMEM; + } + + for (idx = 0; idx < count; idx++) { + struct scpi_clk *sclk; + u32 val; + + sclk = devm_kzalloc(dev, sizeof(*sclk), GFP_KERNEL); + if (!sclk) { + dev_err(dev, "failed to allocate scpi clocks\n"); + return -ENOMEM; + } + + if (of_property_read_string_index(np, "clock-output-names", + idx, &sclk->name)) { + dev_err(dev, "invalid clock name @ %s\n", np->name); + return -EINVAL; + } + + if (of_property_read_u32_index(np, "clock-indices", + idx, &val)) { + dev_err(dev, "invalid clock index @ %s\n", np->name); + return -EINVAL; + } + + sclk->id = val; + + clks[idx] = setup_ops(dev, np, sclk); + if (IS_ERR(clks[idx])) { + dev_err(dev, "failed to register clock '%s'\n", + sclk->name); + return PTR_ERR(clks[idx]); + } + + dev_dbg(dev, "Registered clock '%s'\n", sclk->name); + } + + clk_data->clks = clks; + clk_data->clk_num = count; + of_clk_add_provider(np, of_clk_src_onecell_get, clk_data); + + return 0; +} + +static const struct of_device_id clk_match[] = { + { .compatible = "arm,scpi-clk-indexed", .data = scpi_dvfs_ops_init, }, + { .compatible = "arm,scpi-clk-range", .data = &scpi_clk_ops_init, }, + {} +}; + +static int scpi_clk_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct device_node *np = dev->of_node, *child; + const struct of_device_id *match; + int ret; + + for_each_child_of_node(np, child) { + match = of_match_node(clk_match, child); + if (!match) + continue; + ret = scpi_clk_setup(dev, child, match->data); + if (ret) + return ret; + } + return 0; +} + +static struct of_device_id scpi_clk_ids[] = { + { .compatible = "arm,scpi-clks", }, + {} +}; + +static struct platform_driver scpi_clk_driver = { + .driver = { + .name = "scpi_clocks", + .of_match_table = scpi_clk_ids, + }, + .probe = scpi_clk_probe, +}; + +static int __init scpi_clk_init(void) +{ + return platform_driver_register(&scpi_clk_driver); +} +postcore_initcall(scpi_clk_init); + +static void __exit scpi_clk_exit(void) +{ + platform_driver_unregister(&scpi_clk_driver); +} +module_exit(scpi_clk_exit); + +MODULE_AUTHOR("Sudeep Holla "); +MODULE_DESCRIPTION("ARM SCPI clock driver"); +MODULE_LICENSE("GPL"); -- cgit v1.2.3 From aeefe021b2d75da3dcf64e7b6f7534fcc528ca40 Mon Sep 17 00:00:00 2001 From: Sudeep Holla Date: Thu, 8 May 2014 17:47:48 +0100 Subject: cpufreq: arm_big_little: add SPCI interface driver On some ARM based systems, a separate Cortex-M based System Control Processor(SCP) provides the overall power, clock, reset and system control including CPU DVFS. SCPI Message Protocol is used to communicate with the SCPI. This patch adds a interface driver for adding OPPs and registering the arm_big_little cpufreq driver for such systems. Signed-off-by: Sudeep Holla Signed-off-by: Jon Medhurst --- drivers/cpufreq/Kconfig.arm | 9 ++++ drivers/cpufreq/Makefile | 1 + drivers/cpufreq/scpi-cpufreq.c | 99 ++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 109 insertions(+) create mode 100644 drivers/cpufreq/scpi-cpufreq.c diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm index 7364a538e056..c3843f06608c 100644 --- a/drivers/cpufreq/Kconfig.arm +++ b/drivers/cpufreq/Kconfig.arm @@ -24,6 +24,15 @@ config ARM_VEXPRESS_SPC_CPUFREQ This add the CPUfreq driver support for Versatile Express big.LITTLE platforms using SPC for power management. +config ARM_SPCI_CPUFREQ + tristate "SPCI based CPUfreq driver" + depends on ARM_BIG_LITTLE_CPUFREQ && ARM_SCPI_PROTOCOL + help + This add the CPUfreq driver support for ARM big.LITTLE platforms + using SCPI interface for CPU power management. + + This driver works only if firmware the supporting CPU DVFS adhere + to SCPI protocol. config ARM_EXYNOS_CPUFREQ bool diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile index db6d9a2fea4d..a1f753763a4a 100644 --- a/drivers/cpufreq/Makefile +++ b/drivers/cpufreq/Makefile @@ -77,6 +77,7 @@ obj-$(CONFIG_ARM_SA1110_CPUFREQ) += sa1110-cpufreq.o obj-$(CONFIG_ARM_SPEAR_CPUFREQ) += spear-cpufreq.o obj-$(CONFIG_ARM_TEGRA_CPUFREQ) += tegra-cpufreq.o obj-$(CONFIG_ARM_VEXPRESS_SPC_CPUFREQ) += vexpress-spc-cpufreq.o +obj-$(CONFIG_ARM_SPCI_CPUFREQ) += scpi-cpufreq.o ################################################################################## # PowerPC platform drivers diff --git a/drivers/cpufreq/scpi-cpufreq.c b/drivers/cpufreq/scpi-cpufreq.c new file mode 100644 index 000000000000..60725199b9aa --- /dev/null +++ b/drivers/cpufreq/scpi-cpufreq.c @@ -0,0 +1,99 @@ +/* + * SCPI CPUFreq Interface driver + * + * It provides necessary ops to arm_big_little cpufreq driver. + * + * Copyright (C) 2014 ARM Ltd. + * Sudeep Holla + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed "as is" WITHOUT ANY WARRANTY of any + * kind, whether express or implied; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include + +#include "arm_big_little.h" + +static int scpi_init_opp_table(struct device *cpu_dev) +{ + u8 domain = topology_physical_package_id(cpu_dev->id); + struct scpi_opp *opp; + int idx, ret = 0, max_opp; + u32 *freqs; + + opp = scpi_dvfs_get_opps(domain); + if (IS_ERR(opp)) + return PTR_ERR(opp); + + freqs = opp->freqs; + max_opp = opp->count; + for (idx = 0; idx < max_opp; idx++, freqs++) { + ret = dev_pm_opp_add(cpu_dev, *freqs, 900000000 /* TODO */); + if (ret) { + dev_warn(cpu_dev, "failed to add opp %u\n", *freqs); + return ret; + } + } + return ret; +} + +static int scpi_get_transition_latency(struct device *cpu_dev) +{ + u8 domain = topology_physical_package_id(cpu_dev->id); + struct scpi_opp *opp; + + opp = scpi_dvfs_get_opps(domain); + if (IS_ERR(opp)) + return PTR_ERR(opp); + + return opp->latency * 1000; /* SCPI returns in uS */ +} + +static struct cpufreq_arm_bL_ops scpi_cpufreq_ops = { + .name = "scpi", + .get_transition_latency = scpi_get_transition_latency, + .init_opp_table = scpi_init_opp_table, +}; + +static int scpi_cpufreq_probe(struct platform_device *pdev) +{ + return bL_cpufreq_register(&scpi_cpufreq_ops); +} + +static int scpi_cpufreq_remove(struct platform_device *pdev) +{ + bL_cpufreq_unregister(&scpi_cpufreq_ops); + return 0; +} + +static struct of_device_id scpi_cpufreq_of_match[] = { + { .compatible = "arm,scpi-cpufreq" }, + {}, +}; +MODULE_DEVICE_TABLE(of, scpi_cpufreq_of_match); + +static struct platform_driver scpi_cpufreq_platdrv = { + .driver = { + .name = "scpi-cpufreq", + .owner = THIS_MODULE, + .of_match_table = scpi_cpufreq_of_match, + }, + .probe = scpi_cpufreq_probe, + .remove = scpi_cpufreq_remove, +}; +module_platform_driver(scpi_cpufreq_platdrv); + +MODULE_LICENSE("GPL"); -- cgit v1.2.3 From 1ef8cb084314375c59b7e296b09282b12343d1ee Mon Sep 17 00:00:00 2001 From: Jon Medhurst Date: Wed, 28 May 2014 15:15:49 +0100 Subject: [HACK] cpufreq: arm_big_little: Fall back to getting clock from cpu device The driver in LSK assumes a hard-coded name for cluster clock, as used by vexpress TC2. Modify this to allow also clocks to be obtained from the cpu device; as Juno requires and as seems more like the correct way. Signed-off-by: Jon Medhurst --- drivers/cpufreq/arm_big_little.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/cpufreq/arm_big_little.c b/drivers/cpufreq/arm_big_little.c index a46c223c2506..91bf1c829358 100644 --- a/drivers/cpufreq/arm_big_little.c +++ b/drivers/cpufreq/arm_big_little.c @@ -341,7 +341,9 @@ static int _get_cluster_clk_and_freq_table(struct device *cpu_dev) } name[12] = cluster + '0'; - clk[cluster] = clk_get(cpu_dev, name); + clk[cluster] = clk_get_sys(name, NULL); + if (IS_ERR(clk[cluster])) + clk[cluster] = clk_get(cpu_dev, NULL); if (!IS_ERR(clk[cluster])) { dev_dbg(cpu_dev, "%s: clk: %p & freq table: %p, cluster: %d\n", __func__, clk[cluster], freq_table[cluster], -- cgit v1.2.3 From 94e9e9412ab094560d23c8a4b6bfe35dcc3e4817 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Fri, 9 May 2014 17:24:17 +0100 Subject: arm64: Add big.LITTLE switcher stub The big.LITTLE cpufreq driver is useful on arm64 big.LITTLE systems even without IKS support since it implements support for clusters with shared clocks (a common big.LITTLE configuration). In order to allow it to be built provide the non-IKS stubs for arm64, enabling cpufreq with all the cores available. It may make sense to make an asm-generic version of these stubs instead but given that there's only likely to be these two architectures using the code and asm-generic stubs also need per architecture updates it's probably more trouble than it's worth. Signed-off-by: Mark Brown --- arch/arm64/include/asm/bL_switcher.h | 54 ++++++++++++++++++++++++++++++++++++ 1 file changed, 54 insertions(+) create mode 100644 arch/arm64/include/asm/bL_switcher.h diff --git a/arch/arm64/include/asm/bL_switcher.h b/arch/arm64/include/asm/bL_switcher.h new file mode 100644 index 000000000000..2bee500b7f54 --- /dev/null +++ b/arch/arm64/include/asm/bL_switcher.h @@ -0,0 +1,54 @@ +/* + * Based on the stubs for the ARM implementation which is: + * + * Created by: Nicolas Pitre, April 2012 + * Copyright: (C) 2012-2013 Linaro Limited + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef ASM_BL_SWITCHER_H +#define ASM_BL_SWITCHER_H + +#include +#include + +typedef void (*bL_switch_completion_handler)(void *cookie); + +static inline int bL_switch_request(unsigned int cpu, + unsigned int new_cluster_id) +{ + return -ENOTSUPP; +} + +/* + * Register here to be notified about runtime enabling/disabling of + * the switcher. + * + * The notifier chain is called with the switcher activation lock held: + * the switcher will not be enabled or disabled during callbacks. + * Callbacks must not call bL_switcher_{get,put}_enabled(). + */ +#define BL_NOTIFY_PRE_ENABLE 0 +#define BL_NOTIFY_POST_ENABLE 1 +#define BL_NOTIFY_PRE_DISABLE 2 +#define BL_NOTIFY_POST_DISABLE 3 + +static inline int bL_switcher_register_notifier(struct notifier_block *nb) +{ + return 0; +} + +static inline int bL_switcher_unregister_notifier(struct notifier_block *nb) +{ + return 0; +} + +static inline bool bL_switcher_get_enabled(void) { return false; } +static inline void bL_switcher_put_enabled(void) { } +static inline int bL_switcher_trace_trigger(void) { return 0; } +static inline int bL_switcher_get_logical_index(u32 mpidr) { return -EUNATCH; } + +#endif -- cgit v1.2.3 From 0d6ba59e873beffe2460f41ba5cb0fdcad29ecde Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Fri, 9 May 2014 17:40:31 +0100 Subject: cpufreq: Enable big.LITTLE cpufreq driver on arm64 There are arm64 big.LITTLE systems so enable the big.LITTLE cpufreq driver. While IKS is not available for these systems the driver is still useful since it manages clusters with shared frequencies which is the common case for these systems. Long term combining the cpufreq-cpu0 and big.LITTLE drivers may be a more sensible option but that is substantially more complex especially in the case of IKS. Signed-off-by: Mark Brown Acked-by: Viresh Kumar Signed-off-by: Rafael J. Wysocki (cherry picked from commit 4920ab84979d8cd2eb7e3c4fefcc924efabf1cb2) Signed-off-by: Mark Brown Signed-off-by: Jon Medhurst --- drivers/cpufreq/Kconfig.arm | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm index c3843f06608c..daf4e516f415 100644 --- a/drivers/cpufreq/Kconfig.arm +++ b/drivers/cpufreq/Kconfig.arm @@ -5,7 +5,8 @@ # big LITTLE core layer and glue drivers config ARM_BIG_LITTLE_CPUFREQ tristate "Generic ARM big LITTLE CPUfreq driver" - depends on ARM && BIG_LITTLE && ARM_CPU_TOPOLOGY && HAVE_CLK + depends on ARM_CPU_TOPOLOGY || (ARM64 && SMP) + depends on HAVE_CLK select PM_OPP help This enables the Generic CPUfreq driver for ARM big.LITTLE platforms. -- cgit v1.2.3 From 6c981cb128e7be145284c3cc900473675dfd08b3 Mon Sep 17 00:00:00 2001 From: Jon Medhurst Date: Mon, 23 Jun 2014 11:39:09 +0100 Subject: arm64: topology: Use new name for SCHED_POWER_SHIFT Commit ca8ce3d0b1 (sched: Final power vs. capacity cleanups) renamed SCHED_POWER_SHIFT to SCHED_CAPACITY_SHIFT so we need to use that name. Signed-off-by: Jon Medhurst --- arch/arm64/kernel/topology.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/arch/arm64/kernel/topology.c b/arch/arm64/kernel/topology.c index 7924ecb6faf7..4af25776eb95 100644 --- a/arch/arm64/kernel/topology.c +++ b/arch/arm64/kernel/topology.c @@ -321,10 +321,10 @@ static void __init parse_dt_cpu_power(void) return; else if (4 * max_capacity < (3 * (max_capacity + min_capacity))) middle_capacity = (min_capacity + max_capacity) - >> (SCHED_POWER_SHIFT+1); + >> (SCHED_CAPACITY_SHIFT+1); else middle_capacity = ((max_capacity / 3) - >> (SCHED_POWER_SHIFT-1)) + 1; + >> (SCHED_CAPACITY_SHIFT-1)) + 1; } /* @@ -438,7 +438,7 @@ static void __init reset_cpu_power(void) unsigned int cpu; for_each_possible_cpu(cpu) - set_power_scale(cpu, SCHED_POWER_SCALE); + set_power_scale(cpu, SCHED_CAPACITY_SCALE); } void __init init_cpu_topology(void) -- cgit v1.2.3 From 49a1169f72aba9d69f544a859e62123ebaaa3293 Mon Sep 17 00:00:00 2001 From: Liviu Dudau Date: Tue, 22 Jul 2014 18:34:54 +0100 Subject: mailbox: Pack SCPI structures used for messages. The System Control Processor expects data sent in the messages to be contiguos. When using unpacked structures to describe the data being transmitted we increase the general size of the message which leads to SCP rejecting our request. Signed-off-by: Liviu Dudau Signed-off-by: Jon Medhurst --- drivers/mailbox/scpi_protocol.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/drivers/mailbox/scpi_protocol.c b/drivers/mailbox/scpi_protocol.c index c8a824a60a43..4e442667ed7f 100644 --- a/drivers/mailbox/scpi_protocol.c +++ b/drivers/mailbox/scpi_protocol.c @@ -228,7 +228,7 @@ unsigned long scpi_clk_get_val(u16 clk_id) { struct scpi_data_buf sdata; struct mhu_data_buf mdata; - struct { + struct __packed { u32 status; u32 clk_rate; } buf; @@ -247,7 +247,7 @@ int scpi_clk_set_val(u16 clk_id, unsigned long rate) struct scpi_data_buf sdata; struct mhu_data_buf mdata; int stat; - struct { + struct __packed { u32 clk_rate; u16 clk_id; } buf; @@ -265,7 +265,7 @@ struct scpi_opp *scpi_dvfs_get_opps(u8 domain) { struct scpi_data_buf sdata; struct mhu_data_buf mdata; - struct { + struct __packed { u32 status; u32 header; u32 freqs[MAX_DVFS_OPPS]; @@ -312,7 +312,7 @@ int scpi_dvfs_get_idx(u8 domain) { struct scpi_data_buf sdata; struct mhu_data_buf mdata; - struct { + struct __packed { u32 status; u8 dvfs_idx; } buf; @@ -335,7 +335,7 @@ int scpi_dvfs_set_idx(u8 domain, u8 idx) { struct scpi_data_buf sdata; struct mhu_data_buf mdata; - struct { + struct __packed { u8 dvfs_domain; u8 dvfs_idx; } buf; -- cgit v1.2.3 From a95f147c430445b124b02c2da0434b1a83099362 Mon Sep 17 00:00:00 2001 From: Liviu Dudau Date: Thu, 24 Jul 2014 11:14:21 +0100 Subject: mailbox: scpi: Free the mailbox channel when we fail to queue a message. When sending an SCPI command we aquire a channel and queue the message in the mailbox. If the queuing failed we were not releasing the channel hence preventing everyone else from using it. Signed-off-by: Punit Agrawal Signed-off-by: Liviu Dudau Signed-off-by: Jon Medhurst --- drivers/mailbox/scpi_protocol.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/drivers/mailbox/scpi_protocol.c b/drivers/mailbox/scpi_protocol.c index 4e442667ed7f..195f86c6fd58 100644 --- a/drivers/mailbox/scpi_protocol.c +++ b/drivers/mailbox/scpi_protocol.c @@ -180,8 +180,10 @@ static int send_scpi_cmd(struct scpi_data_buf *scpi_buf, bool high_priority) return PTR_ERR(chan); init_completion(&scpi_buf->complete); - if (mbox_send_message(chan, (void *)data)) - return -EIO; + if (mbox_send_message(chan, (void *)data) < 0) { + status = SCPI_ERR_TIMEOUT; + goto free_channel; + } if (!wait_for_completion_timeout(&scpi_buf->complete, msecs_to_jiffies(50))) @@ -189,6 +191,7 @@ static int send_scpi_cmd(struct scpi_data_buf *scpi_buf, bool high_priority) else status = *(u32 *)(data->rx_buf); /* read first word */ +free_channel: mbox_free_channel(chan); return scpi_to_linux_errno(status); -- cgit v1.2.3 From 75b6de03c444eec95f8453c3e05bfa609ca97bf7 Mon Sep 17 00:00:00 2001 From: Liviu Dudau Date: Thu, 24 Jul 2014 12:21:17 +0100 Subject: mailbox: mhu: Acknowledge the interrupt only after data is pushed According to the mailbox documentation the controller should ACK the RX only after it has finished pushing the data up the link. Signed-off-by: Punit Agrawal Signed-off-by: Liviu Dudau Signed-off-by: Jon Medhurst --- drivers/mailbox/arm_mhu.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/mailbox/arm_mhu.c b/drivers/mailbox/arm_mhu.c index 6256caae9133..5029af71780d 100644 --- a/drivers/mailbox/arm_mhu.c +++ b/drivers/mailbox/arm_mhu.c @@ -130,8 +130,8 @@ static irqreturn_t mbox_handler(int irq, void *p) memcpy(data->rx_buf, payload + RX_PAYLOAD(idx), data->rx_size); chan->data = NULL; - writel(~0, mbox_base + RX_CLEAR(idx)); mbox_chan_received_data(link, data); + writel(~0, mbox_base + RX_CLEAR(idx)); } return IRQ_HANDLED; -- cgit v1.2.3 From d65170635a519dfc2e9b722af209ce70bc230648 Mon Sep 17 00:00:00 2001 From: Jon Medhurst Date: Tue, 29 Jul 2014 14:02:26 +0100 Subject: mailbox: Remove all message timeouts and block until they complete Neither the mailbox framework nor the scpi_protocol code correctly handle timeouts if a message is subsequently completed by the SCP, in that case they end up accessing no-longer live stack based objects. Even if the code was reworked to fix those issues, we are still left with problems with the scpi protocol because a delayed message response may look like a reply to a later message. To hopefully avoid all these problems this patch removes all timeouts and forces things block until each message completes. Signed-off-by: Jon Medhurst On branch test Untracked files: 000000_scpi_protocol.c nothing added to commit but untracked files present (use "git add" to track) Signed-off-by: Jon Medhurst --- drivers/mailbox/arm_mhu.c | 3 +-- drivers/mailbox/scpi_protocol.c | 10 +++------- 2 files changed, 4 insertions(+), 9 deletions(-) diff --git a/drivers/mailbox/arm_mhu.c b/drivers/mailbox/arm_mhu.c index 5029af71780d..c6842784e410 100644 --- a/drivers/mailbox/arm_mhu.c +++ b/drivers/mailbox/arm_mhu.c @@ -246,8 +246,7 @@ static int mhu_probe(struct platform_device *pdev) ctlr->mbox_con.chans = l; ctlr->mbox_con.num_chans = CHANNEL_MAX; - ctlr->mbox_con.txdone_poll = true; - ctlr->mbox_con.txpoll_period = 10; + ctlr->mbox_con.txdone_irq = true; ctlr->mbox_con.ops = &mhu_ops; ctlr->mbox_con.dev = dev; diff --git a/drivers/mailbox/scpi_protocol.c b/drivers/mailbox/scpi_protocol.c index 195f86c6fd58..edcf47ea06ab 100644 --- a/drivers/mailbox/scpi_protocol.c +++ b/drivers/mailbox/scpi_protocol.c @@ -168,8 +168,7 @@ static int send_scpi_cmd(struct scpi_data_buf *scpi_buf, bool high_priority) cl.dev = the_scpi_device; cl.rx_callback = scpi_rx_callback; cl.tx_done = NULL; - cl.tx_block = true; - cl.tx_tout = 50; /* 50 msec */ + cl.tx_block = false; cl.knows_txdone = false; cl.chan_name = high_priority ? CHANNEL_HIGH_PRIORITY : @@ -185,11 +184,8 @@ static int send_scpi_cmd(struct scpi_data_buf *scpi_buf, bool high_priority) goto free_channel; } - if (!wait_for_completion_timeout(&scpi_buf->complete, - msecs_to_jiffies(50))) - status = SCPI_ERR_TIMEOUT; - else - status = *(u32 *)(data->rx_buf); /* read first word */ + wait_for_completion(&scpi_buf->complete); + status = *(u32 *)(data->rx_buf); /* read first word */ free_channel: mbox_free_channel(chan); -- cgit v1.2.3 From d1916dff0a68fdc20989d19e9d867a3ee363e1c6 Mon Sep 17 00:00:00 2001 From: Jon Medhurst Date: Fri, 22 Aug 2014 14:59:45 +0100 Subject: mailbox: mhu: Replace use of devm_request_and_ioremap() This deprecated API is removed in Linux 3.17 Signed-off-by: Jon Medhurst --- drivers/mailbox/arm_mhu.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/drivers/mailbox/arm_mhu.c b/drivers/mailbox/arm_mhu.c index c6842784e410..28fb4f01b413 100644 --- a/drivers/mailbox/arm_mhu.c +++ b/drivers/mailbox/arm_mhu.c @@ -217,10 +217,10 @@ static int mhu_probe(struct platform_device *pdev) return -ENXIO; } - ctlr->mbox_base = devm_request_and_ioremap(dev, res); - if (!ctlr->mbox_base) { + ctlr->mbox_base = devm_ioremap_resource(dev, res); + if (IS_ERR(ctlr->mbox_base)) { dev_err(dev, "failed to request or ioremap mailbox control\n"); - return -EADDRNOTAVAIL; + return PTR_ERR(ctlr->mbox_base); } res = platform_get_resource(pdev, IORESOURCE_MEM, 1); @@ -229,10 +229,10 @@ static int mhu_probe(struct platform_device *pdev) return -ENXIO; } - ctlr->payload_base = devm_request_and_ioremap(dev, res); - if (!ctlr->payload_base) { + ctlr->payload_base = devm_ioremap_resource(dev, res); + if (IS_ERR(ctlr->payload_base)) { dev_err(dev, "failed to request or ioremap mailbox payload\n"); - return -EADDRNOTAVAIL; + return PTR_ERR(ctlr->payload_base); } ctlr->dev = dev; -- cgit v1.2.3 From f2407d1325fc2b597bced07953b649462e243cd3 Mon Sep 17 00:00:00 2001 From: Jon Medhurst Date: Fri, 22 Aug 2014 13:07:50 +0100 Subject: mailbox: mhu: Update for new version of mailbox patches Signed-off-by: Jon Medhurst --- drivers/mailbox/scpi_protocol.c | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/drivers/mailbox/scpi_protocol.c b/drivers/mailbox/scpi_protocol.c index edcf47ea06ab..49b500cd87ef 100644 --- a/drivers/mailbox/scpi_protocol.c +++ b/drivers/mailbox/scpi_protocol.c @@ -170,11 +170,8 @@ static int send_scpi_cmd(struct scpi_data_buf *scpi_buf, bool high_priority) cl.tx_done = NULL; cl.tx_block = false; cl.knows_txdone = false; - cl.chan_name = high_priority ? - CHANNEL_HIGH_PRIORITY : - CHANNEL_LOW_PRIORITY; - chan = mbox_request_channel(&cl); + chan = mbox_request_channel(&cl, high_priority); if (IS_ERR(chan)) return PTR_ERR(chan); -- cgit v1.2.3