提交 f2855eec 编写于 作者: L Linus Torvalds

Merge tag 'mailbox-v6.2' of git://git.linaro.org/landing-teams/working/fujitsu/integration

Pull mailbox updates from Jassi Brar:

 - qcom: enable sc8280xp, sm8550 and sm4250 support

 - ti: default to ARCH_K3 for msg manager

 - mediatek:
    - add mt8188 and mt8186 support
    - request irq only after got ready

 - zynq-ipi: fix error handling after device_register

 - mpfs: check sys-con status

 - rockchip: simplify by using device_get_match_data

* tag 'mailbox-v6.2' of git://git.linaro.org/landing-teams/working/fujitsu/integration:
  dt-bindings: mailbox: qcom-ipcc: Add compatible for SM8550
  mailbox: mtk-cmdq: Do not request irq until we are ready
  mailbox: zynq-ipi: fix error handling while device_register() fails
  mailbox: mtk-cmdq-mailbox: Use platform data directly instead of copying
  mailbox: arm_mhuv2: Fix return value check in mhuv2_probe()
  dt-bindings: mailbox: mediatek,gce-mailbox: add mt8188 compatible name
  dt-bindings: mailbox: add GCE header file for mt8188
  mailbox: mpfs: read the system controller's status
  mailbox: mtk-cmdq: add MT8186 support
  mailbox: mtk-cmdq: add gce ddr enable support flow
  mailbox: mtk-cmdq: add gce software ddr enable private data
  mailbox: mtk-cmdq: Use GCE_CTRL_BY_SW definition instead of number
  mailbox: rockchip: Use device_get_match_data() to simplify the code
  dt-bindings: mailbox: qcom-ipcc: Add sc8280xp compatible
  mailbox: config: ti-msgmgr: Default set to ARCH_K3 for TI msg manager
  mailbox: qcom-apcs-ipc: Add SM4250 APCS IPC support
  dt-bindings: mailbox: qcom: Add SM4250 APCS compatible
......@@ -21,6 +21,7 @@ properties:
- mediatek,mt8173-gce
- mediatek,mt8183-gce
- mediatek,mt8186-gce
- mediatek,mt8188-gce
- mediatek,mt8192-gce
- mediatek,mt8195-gce
......
......@@ -28,6 +28,7 @@ properties:
- qcom,sc8180x-apss-shared
- qcom,sdm660-apcs-hmss-global
- qcom,sdm845-apss-shared
- qcom,sm4250-apcs-hmss-global
- qcom,sm6125-apcs-hmss-global
- qcom,sm6115-apcs-hmss-global
- qcom,sm8150-apss-shared
......
......@@ -24,12 +24,14 @@ properties:
compatible:
items:
- enum:
- qcom,sc7280-ipcc
- qcom,sc8280xp-ipcc
- qcom,sm6350-ipcc
- qcom,sm6375-ipcc
- qcom,sm8250-ipcc
- qcom,sm8350-ipcc
- qcom,sm8450-ipcc
- qcom,sc7280-ipcc
- qcom,sm8550-ipcc
- const: qcom,ipcc
reg:
......
......@@ -136,6 +136,7 @@ config STI_MBOX
config TI_MESSAGE_MANAGER
tristate "Texas Instruments Message Manager Driver"
depends on ARCH_KEYSTONE || ARCH_K3
default ARCH_K3
help
An implementation of Message Manager slave driver for Keystone
and K3 architecture SoCs from Texas Instruments. Message Manager
......
......@@ -1062,8 +1062,8 @@ static int mhuv2_probe(struct amba_device *adev, const struct amba_id *id)
int ret = -EINVAL;
reg = devm_of_iomap(dev, dev->of_node, 0, NULL);
if (!reg)
return -ENOMEM;
if (IS_ERR(reg))
return PTR_ERR(reg);
mhu = devm_kzalloc(dev, sizeof(*mhu), GFP_KERNEL);
if (!mhu)
......
......@@ -2,7 +2,7 @@
/*
* Microchip PolarFire SoC (MPFS) system controller/mailbox controller driver
*
* Copyright (c) 2020 Microchip Corporation. All rights reserved.
* Copyright (c) 2020-2022 Microchip Corporation. All rights reserved.
*
* Author: Conor Dooley <conor.dooley@microchip.com>
*
......@@ -56,7 +56,7 @@
#define SCB_STATUS_NOTIFY_MASK BIT(SCB_STATUS_NOTIFY)
#define SCB_STATUS_POS (16)
#define SCB_STATUS_MASK GENMASK_ULL(SCB_STATUS_POS + SCB_MASK_WIDTH, SCB_STATUS_POS)
#define SCB_STATUS_MASK GENMASK(SCB_STATUS_POS + SCB_MASK_WIDTH - 1, SCB_STATUS_POS)
struct mpfs_mbox {
struct mbox_controller controller;
......@@ -130,13 +130,38 @@ static void mpfs_mbox_rx_data(struct mbox_chan *chan)
struct mpfs_mbox *mbox = (struct mpfs_mbox *)chan->con_priv;
struct mpfs_mss_response *response = mbox->response;
u16 num_words = ALIGN((response->resp_size), (4)) / 4U;
u32 i;
u32 i, status;
if (!response->resp_msg) {
dev_err(mbox->dev, "failed to assign memory for response %d\n", -ENOMEM);
return;
}
/*
* The status is stored in bits 31:16 of the SERVICES_SR register.
* It is only valid when BUSY == 0.
* We should *never* get an interrupt while the controller is
* still in the busy state. If we do, something has gone badly
* wrong & the content of the mailbox would not be valid.
*/
if (mpfs_mbox_busy(mbox)) {
dev_err(mbox->dev, "got an interrupt but system controller is busy\n");
response->resp_status = 0xDEAD;
return;
}
status = readl_relaxed(mbox->ctrl_base + SERVICES_SR_OFFSET);
/*
* If the status of the individual servers is non-zero, the service has
* failed. The contents of the mailbox at this point are not be valid,
* so don't bother reading them. Set the status so that the driver
* implementing the service can handle the result.
*/
response->resp_status = (status & SCB_STATUS_MASK) >> SCB_STATUS_POS;
if (response->resp_status)
return;
if (!mpfs_mbox_busy(mbox)) {
for (i = 0; i < num_words; i++) {
response->resp_msg[i] =
......
......@@ -38,6 +38,8 @@
#define CMDQ_THR_PRIORITY 0x40
#define GCE_GCTL_VALUE 0x48
#define GCE_CTRL_BY_SW GENMASK(2, 0)
#define GCE_DDR_EN GENMASK(18, 16)
#define CMDQ_THR_ACTIVE_SLOT_CYCLES 0x3200
#define CMDQ_THR_ENABLED 0x1
......@@ -73,28 +75,38 @@ struct cmdq {
struct mbox_controller mbox;
void __iomem *base;
int irq;
u32 thread_nr;
u32 irq_mask;
const struct gce_plat *pdata;
struct cmdq_thread *thread;
struct clk_bulk_data clocks[CMDQ_GCE_NUM_MAX];
bool suspended;
u8 shift_pa;
bool control_by_sw;
u32 gce_num;
};
struct gce_plat {
u32 thread_nr;
u8 shift;
bool control_by_sw;
bool sw_ddr_en;
u32 gce_num;
};
static void cmdq_sw_ddr_enable(struct cmdq *cmdq, bool enable)
{
WARN_ON(clk_bulk_enable(cmdq->pdata->gce_num, cmdq->clocks));
if (enable)
writel(GCE_DDR_EN | GCE_CTRL_BY_SW, cmdq->base + GCE_GCTL_VALUE);
else
writel(GCE_CTRL_BY_SW, cmdq->base + GCE_GCTL_VALUE);
clk_bulk_disable(cmdq->pdata->gce_num, cmdq->clocks);
}
u8 cmdq_get_shift_pa(struct mbox_chan *chan)
{
struct cmdq *cmdq = container_of(chan->mbox, struct cmdq, mbox);
return cmdq->shift_pa;
return cmdq->pdata->shift;
}
EXPORT_SYMBOL(cmdq_get_shift_pa);
......@@ -126,14 +138,21 @@ static void cmdq_thread_resume(struct cmdq_thread *thread)
static void cmdq_init(struct cmdq *cmdq)
{
int i;
u32 gctl_regval = 0;
WARN_ON(clk_bulk_enable(cmdq->pdata->gce_num, cmdq->clocks));
if (cmdq->pdata->control_by_sw)
gctl_regval = GCE_CTRL_BY_SW;
if (cmdq->pdata->sw_ddr_en)
gctl_regval |= GCE_DDR_EN;
if (gctl_regval)
writel(gctl_regval, cmdq->base + GCE_GCTL_VALUE);
WARN_ON(clk_bulk_enable(cmdq->gce_num, cmdq->clocks));
if (cmdq->control_by_sw)
writel(0x7, cmdq->base + GCE_GCTL_VALUE);
writel(CMDQ_THR_ACTIVE_SLOT_CYCLES, cmdq->base + CMDQ_THR_SLOT_CYCLES);
for (i = 0; i <= CMDQ_MAX_EVENT; i++)
writel(i, cmdq->base + CMDQ_SYNC_TOKEN_UPDATE);
clk_bulk_disable(cmdq->gce_num, cmdq->clocks);
clk_bulk_disable(cmdq->pdata->gce_num, cmdq->clocks);
}
static int cmdq_thread_reset(struct cmdq *cmdq, struct cmdq_thread *thread)
......@@ -178,7 +197,7 @@ static void cmdq_task_insert_into_thread(struct cmdq_task *task)
prev_task->pkt->cmd_buf_size, DMA_TO_DEVICE);
prev_task_base[CMDQ_NUM_CMD(prev_task->pkt) - 1] =
(u64)CMDQ_JUMP_BY_PA << 32 |
(task->pa_base >> task->cmdq->shift_pa);
(task->pa_base >> task->cmdq->pdata->shift);
dma_sync_single_for_device(dev, prev_task->pa_base,
prev_task->pkt->cmd_buf_size, DMA_TO_DEVICE);
......@@ -212,7 +231,7 @@ static void cmdq_task_handle_error(struct cmdq_task *task)
next_task = list_first_entry_or_null(&thread->task_busy_list,
struct cmdq_task, list_entry);
if (next_task)
writel(next_task->pa_base >> cmdq->shift_pa,
writel(next_task->pa_base >> cmdq->pdata->shift,
thread->base + CMDQ_THR_CURR_ADDR);
cmdq_thread_resume(thread);
}
......@@ -243,7 +262,7 @@ static void cmdq_thread_irq_handler(struct cmdq *cmdq,
else
return;
curr_pa = readl(thread->base + CMDQ_THR_CURR_ADDR) << cmdq->shift_pa;
curr_pa = readl(thread->base + CMDQ_THR_CURR_ADDR) << cmdq->pdata->shift;
list_for_each_entry_safe(task, tmp, &thread->task_busy_list,
list_entry) {
......@@ -266,7 +285,7 @@ static void cmdq_thread_irq_handler(struct cmdq *cmdq,
if (list_empty(&thread->task_busy_list)) {
cmdq_thread_disable(cmdq, thread);
clk_bulk_disable(cmdq->gce_num, cmdq->clocks);
clk_bulk_disable(cmdq->pdata->gce_num, cmdq->clocks);
}
}
......@@ -280,7 +299,7 @@ static irqreturn_t cmdq_irq_handler(int irq, void *dev)
if (!(irq_status ^ cmdq->irq_mask))
return IRQ_NONE;
for_each_clear_bit(bit, &irq_status, cmdq->thread_nr) {
for_each_clear_bit(bit, &irq_status, cmdq->pdata->thread_nr) {
struct cmdq_thread *thread = &cmdq->thread[bit];
spin_lock_irqsave(&thread->chan->lock, flags);
......@@ -300,7 +319,7 @@ static int cmdq_suspend(struct device *dev)
cmdq->suspended = true;
for (i = 0; i < cmdq->thread_nr; i++) {
for (i = 0; i < cmdq->pdata->thread_nr; i++) {
thread = &cmdq->thread[i];
if (!list_empty(&thread->task_busy_list)) {
task_running = true;
......@@ -311,7 +330,10 @@ static int cmdq_suspend(struct device *dev)
if (task_running)
dev_warn(dev, "exist running task(s) in suspend\n");
clk_bulk_unprepare(cmdq->gce_num, cmdq->clocks);
if (cmdq->pdata->sw_ddr_en)
cmdq_sw_ddr_enable(cmdq, false);
clk_bulk_unprepare(cmdq->pdata->gce_num, cmdq->clocks);
return 0;
}
......@@ -320,8 +342,12 @@ static int cmdq_resume(struct device *dev)
{
struct cmdq *cmdq = dev_get_drvdata(dev);
WARN_ON(clk_bulk_prepare(cmdq->gce_num, cmdq->clocks));
WARN_ON(clk_bulk_prepare(cmdq->pdata->gce_num, cmdq->clocks));
cmdq->suspended = false;
if (cmdq->pdata->sw_ddr_en)
cmdq_sw_ddr_enable(cmdq, true);
return 0;
}
......@@ -329,7 +355,10 @@ static int cmdq_remove(struct platform_device *pdev)
{
struct cmdq *cmdq = platform_get_drvdata(pdev);
clk_bulk_unprepare(cmdq->gce_num, cmdq->clocks);
if (cmdq->pdata->sw_ddr_en)
cmdq_sw_ddr_enable(cmdq, false);
clk_bulk_unprepare(cmdq->pdata->gce_num, cmdq->clocks);
return 0;
}
......@@ -355,7 +384,7 @@ static int cmdq_mbox_send_data(struct mbox_chan *chan, void *data)
task->pkt = pkt;
if (list_empty(&thread->task_busy_list)) {
WARN_ON(clk_bulk_enable(cmdq->gce_num, cmdq->clocks));
WARN_ON(clk_bulk_enable(cmdq->pdata->gce_num, cmdq->clocks));
/*
* The thread reset will clear thread related register to 0,
......@@ -365,9 +394,9 @@ static int cmdq_mbox_send_data(struct mbox_chan *chan, void *data)
*/
WARN_ON(cmdq_thread_reset(cmdq, thread) < 0);
writel(task->pa_base >> cmdq->shift_pa,
writel(task->pa_base >> cmdq->pdata->shift,
thread->base + CMDQ_THR_CURR_ADDR);
writel((task->pa_base + pkt->cmd_buf_size) >> cmdq->shift_pa,
writel((task->pa_base + pkt->cmd_buf_size) >> cmdq->pdata->shift,
thread->base + CMDQ_THR_END_ADDR);
writel(thread->priority, thread->base + CMDQ_THR_PRIORITY);
......@@ -376,20 +405,20 @@ static int cmdq_mbox_send_data(struct mbox_chan *chan, void *data)
} else {
WARN_ON(cmdq_thread_suspend(cmdq, thread) < 0);
curr_pa = readl(thread->base + CMDQ_THR_CURR_ADDR) <<
cmdq->shift_pa;
cmdq->pdata->shift;
end_pa = readl(thread->base + CMDQ_THR_END_ADDR) <<
cmdq->shift_pa;
cmdq->pdata->shift;
/* check boundary */
if (curr_pa == end_pa - CMDQ_INST_SIZE ||
curr_pa == end_pa) {
/* set to this task directly */
writel(task->pa_base >> cmdq->shift_pa,
writel(task->pa_base >> cmdq->pdata->shift,
thread->base + CMDQ_THR_CURR_ADDR);
} else {
cmdq_task_insert_into_thread(task);
smp_mb(); /* modify jump before enable thread */
}
writel((task->pa_base + pkt->cmd_buf_size) >> cmdq->shift_pa,
writel((task->pa_base + pkt->cmd_buf_size) >> cmdq->pdata->shift,
thread->base + CMDQ_THR_END_ADDR);
cmdq_thread_resume(thread);
}
......@@ -428,7 +457,7 @@ static void cmdq_mbox_shutdown(struct mbox_chan *chan)
}
cmdq_thread_disable(cmdq, thread);
clk_bulk_disable(cmdq->gce_num, cmdq->clocks);
clk_bulk_disable(cmdq->pdata->gce_num, cmdq->clocks);
done:
/*
......@@ -468,7 +497,7 @@ static int cmdq_mbox_flush(struct mbox_chan *chan, unsigned long timeout)
cmdq_thread_resume(thread);
cmdq_thread_disable(cmdq, thread);
clk_bulk_disable(cmdq->gce_num, cmdq->clocks);
clk_bulk_disable(cmdq->pdata->gce_num, cmdq->clocks);
out:
spin_unlock_irqrestore(&thread->chan->lock, flags);
......@@ -515,7 +544,6 @@ static int cmdq_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct cmdq *cmdq;
int err, i;
struct gce_plat *plat_data;
struct device_node *phandle = dev->of_node;
struct device_node *node;
int alias_id = 0;
......@@ -534,31 +562,21 @@ static int cmdq_probe(struct platform_device *pdev)
if (cmdq->irq < 0)
return cmdq->irq;
plat_data = (struct gce_plat *)of_device_get_match_data(dev);
if (!plat_data) {
cmdq->pdata = device_get_match_data(dev);
if (!cmdq->pdata) {
dev_err(dev, "failed to get match data\n");
return -EINVAL;
}
cmdq->thread_nr = plat_data->thread_nr;
cmdq->shift_pa = plat_data->shift;
cmdq->control_by_sw = plat_data->control_by_sw;
cmdq->gce_num = plat_data->gce_num;
cmdq->irq_mask = GENMASK(cmdq->thread_nr - 1, 0);
err = devm_request_irq(dev, cmdq->irq, cmdq_irq_handler, IRQF_SHARED,
"mtk_cmdq", cmdq);
if (err < 0) {
dev_err(dev, "failed to register ISR (%d)\n", err);
return err;
}
cmdq->irq_mask = GENMASK(cmdq->pdata->thread_nr - 1, 0);
dev_dbg(dev, "cmdq device: addr:0x%p, va:0x%p, irq:%d\n",
dev, cmdq->base, cmdq->irq);
if (cmdq->gce_num > 1) {
if (cmdq->pdata->gce_num > 1) {
for_each_child_of_node(phandle->parent, node) {
alias_id = of_alias_get_id(node, clk_name);
if (alias_id >= 0 && alias_id < cmdq->gce_num) {
if (alias_id >= 0 && alias_id < cmdq->pdata->gce_num) {
cmdq->clocks[alias_id].id = clk_names[alias_id];
cmdq->clocks[alias_id].clk = of_clk_get(node, 0);
if (IS_ERR(cmdq->clocks[alias_id].clk)) {
......@@ -580,12 +598,12 @@ static int cmdq_probe(struct platform_device *pdev)
}
cmdq->mbox.dev = dev;
cmdq->mbox.chans = devm_kcalloc(dev, cmdq->thread_nr,
cmdq->mbox.chans = devm_kcalloc(dev, cmdq->pdata->thread_nr,
sizeof(*cmdq->mbox.chans), GFP_KERNEL);
if (!cmdq->mbox.chans)
return -ENOMEM;
cmdq->mbox.num_chans = cmdq->thread_nr;
cmdq->mbox.num_chans = cmdq->pdata->thread_nr;
cmdq->mbox.ops = &cmdq_mbox_chan_ops;
cmdq->mbox.of_xlate = cmdq_xlate;
......@@ -593,12 +611,12 @@ static int cmdq_probe(struct platform_device *pdev)
cmdq->mbox.txdone_irq = false;
cmdq->mbox.txdone_poll = false;
cmdq->thread = devm_kcalloc(dev, cmdq->thread_nr,
cmdq->thread = devm_kcalloc(dev, cmdq->pdata->thread_nr,
sizeof(*cmdq->thread), GFP_KERNEL);
if (!cmdq->thread)
return -ENOMEM;
for (i = 0; i < cmdq->thread_nr; i++) {
for (i = 0; i < cmdq->pdata->thread_nr; i++) {
cmdq->thread[i].base = cmdq->base + CMDQ_THR_BASE +
CMDQ_THR_SIZE * i;
INIT_LIST_HEAD(&cmdq->thread[i].task_busy_list);
......@@ -613,10 +631,17 @@ static int cmdq_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, cmdq);
WARN_ON(clk_bulk_prepare(cmdq->gce_num, cmdq->clocks));
WARN_ON(clk_bulk_prepare(cmdq->pdata->gce_num, cmdq->clocks));
cmdq_init(cmdq);
err = devm_request_irq(dev, cmdq->irq, cmdq_irq_handler, IRQF_SHARED,
"mtk_cmdq", cmdq);
if (err < 0) {
dev_err(dev, "failed to register ISR (%d)\n", err);
return err;
}
return 0;
}
......@@ -660,9 +685,18 @@ static const struct gce_plat gce_plat_v6 = {
.gce_num = 2
};
static const struct gce_plat gce_plat_v7 = {
.thread_nr = 24,
.shift = 3,
.control_by_sw = true,
.sw_ddr_en = true,
.gce_num = 1
};
static const struct of_device_id cmdq_of_ids[] = {
{.compatible = "mediatek,mt8173-gce", .data = (void *)&gce_plat_v2},
{.compatible = "mediatek,mt8183-gce", .data = (void *)&gce_plat_v3},
{.compatible = "mediatek,mt8186-gce", .data = (void *)&gce_plat_v7},
{.compatible = "mediatek,mt6779-gce", .data = (void *)&gce_plat_v4},
{.compatible = "mediatek,mt8192-gce", .data = (void *)&gce_plat_v5},
{.compatible = "mediatek,mt8195-gce", .data = (void *)&gce_plat_v6},
......
......@@ -156,6 +156,7 @@ static const struct of_device_id qcom_apcs_ipc_of_match[] = {
{ .compatible = "qcom,sc8180x-apss-shared", .data = &apps_shared_apcs_data },
{ .compatible = "qcom,sdm660-apcs-hmss-global", .data = &msm8994_apcs_data },
{ .compatible = "qcom,sdm845-apss-shared", .data = &apps_shared_apcs_data },
{ .compatible = "qcom,sm4250-apcs-hmss-global", .data = &msm8994_apcs_data },
{ .compatible = "qcom,sm6125-apcs-hmss-global", .data = &msm8994_apcs_data },
{ .compatible = "qcom,sm8150-apss-shared", .data = &apps_shared_apcs_data },
{ .compatible = "qcom,sm6115-apcs-hmss-global", .data = &msm8994_apcs_data },
......
......@@ -164,7 +164,6 @@ MODULE_DEVICE_TABLE(of, rockchp_mbox_of_match);
static int rockchip_mbox_probe(struct platform_device *pdev)
{
struct rockchip_mbox *mb;
const struct of_device_id *match;
const struct rockchip_mbox_data *drv_data;
struct resource *res;
int ret, irq, i;
......@@ -172,8 +171,7 @@ static int rockchip_mbox_probe(struct platform_device *pdev)
if (!pdev->dev.of_node)
return -ENODEV;
match = of_match_node(rockchip_mbox_of_match, pdev->dev.of_node);
drv_data = (const struct rockchip_mbox_data *)match->data;
drv_data = (const struct rockchip_mbox_data *) device_get_match_data(&pdev->dev);
mb = devm_kzalloc(&pdev->dev, sizeof(*mb), GFP_KERNEL);
if (!mb)
......
......@@ -493,6 +493,7 @@ static int zynqmp_ipi_mbox_probe(struct zynqmp_ipi_mbox *ipi_mbox,
ret = device_register(&ipi_mbox->dev);
if (ret) {
dev_err(dev, "Failed to register ipi mbox dev.\n");
put_device(&ipi_mbox->dev);
return ret;
}
mdev = &ipi_mbox->dev;
......@@ -619,7 +620,8 @@ static void zynqmp_ipi_free_mboxes(struct zynqmp_ipi_pdata *pdata)
ipi_mbox = &pdata->ipi_mboxes[i];
if (ipi_mbox->dev.parent) {
mbox_controller_unregister(&ipi_mbox->mbox);
device_unregister(&ipi_mbox->dev);
if (device_is_registered(&ipi_mbox->dev))
device_unregister(&ipi_mbox->dev);
}
}
}
......
此差异已折叠。
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册