diff options
author | Wendy Liang <wendy.liang@xilinx.com> | 2017-04-13 09:17:19 -0700 |
---|---|---|
committer | Michal Simek <michal.simek@xilinx.com> | 2017-04-18 09:24:09 +0200 |
commit | 87385123930706de28354fcd66801349e78c0d32 (patch) | |
tree | 508ccd46e6176f1ddcf267ac2a2cb395e642f1aa | |
parent | fcb6a67661a0845b8b8c96e441f8936fa3384e2b (diff) | |
download | linux-87385123930706de28354fcd66801349e78c0d32.tar.gz |
remoteproc: zynqmp_r5: Allow multiple pd_ids for single mem pool
When remoteproc in lockstep mode, the TCM memories are viewed
as contiguous. And thus, there should be just a single memory
pool for the 4 TCMs. However, each TCM has its own power domain
ID.
Here is the representation of RPU lockstep TCM in device tree:
r5_tcm: tcm@ffe00000 {
compatible = "mmio-sram";
reg = <0 0xFFE00000 0x0 0x40000>;
pd-handle = <&pd_tcm_0_a
&pd_tcm_0_b
&pd_tcm_1_a
&pd_tcm_1_b>;
};
This patch is to allow a single firmware memory to have multiple
power domain ids.
Signed-off-by: Wendy Liang <jliang@xilinx.com>
Signed-off-by: Michal Simek <michal.simek@xilinx.com>
-rw-r--r-- | drivers/remoteproc/zynqmp_r5_remoteproc.c | 50 |
1 files changed, 38 insertions, 12 deletions
diff --git a/drivers/remoteproc/zynqmp_r5_remoteproc.c b/drivers/remoteproc/zynqmp_r5_remoteproc.c index a060890cd1a400..d33528401846ee 100644 --- a/drivers/remoteproc/zynqmp_r5_remoteproc.c +++ b/drivers/remoteproc/zynqmp_r5_remoteproc.c @@ -104,11 +104,17 @@ enum rpu_core_conf { SPLIT, }; +/* Power domain id list element */ +struct pd_id_st { + struct list_head node; + u32 id; +}; + /* On-chip memory pool element */ struct mem_pool_st { struct list_head node; struct gen_pool *pool; - u32 pd_id; + struct list_head pd_ids; }; /** @@ -232,8 +238,10 @@ static int r5_request_tcm(struct zynqmp_r5_rproc_pdata *pdata) r5_mode_config(pdata); list_for_each_entry(mem_node, &pdata->mem_pools, node) { - if (mem_node->pd_id) - zynqmp_pm_request_node(mem_node->pd_id, + struct pd_id_st *pd_id; + + list_for_each_entry(pd_id, &mem_node->pd_ids, node) + zynqmp_pm_request_node(pd_id->id, ZYNQMP_PM_CAPABILITY_ACCESS, 0, ZYNQMP_PM_REQUEST_ACK_BLOCKING); } @@ -253,8 +261,10 @@ static void r5_release_tcm(struct zynqmp_r5_rproc_pdata *pdata) struct mem_pool_st *mem_node; list_for_each_entry(mem_node, &pdata->mem_pools, node) { - if (mem_node->pd_id) - zynqmp_pm_release_node(mem_node->pd_id); + struct pd_id_st *pd_id; + + list_for_each_entry(pd_id, &mem_node->pd_ids, node) + zynqmp_pm_release_node(pd_id->id); } } @@ -663,15 +673,31 @@ static int zynqmp_r5_remoteproc_probe(struct platform_device *pdev) mem_name, 0); if (tmp_node) { struct device_node *pd_node; - - pd_node = of_parse_phandle(tmp_node, - "pd-handle", 0); - if (pd_node) + struct pd_id_st *pd_id; + int j; + + INIT_LIST_HEAD(&mem_node->pd_ids); + for (j = 0; ; j++) { + pd_node = of_parse_phandle(tmp_node, + "pd-handle", j); + if (!pd_node) + break; + pd_id = devm_kzalloc(&pdev->dev, + sizeof(*pd_id), + GFP_KERNEL); + if (!pd_id) { + ret = -ENOMEM; + goto rproc_fault; + } of_property_read_u32(pd_node, - "pd-id", &mem_node->pd_id); + "pd-id", &pd_id->id); + list_add_tail(&pd_id->node, + &mem_node->pd_ids); + dev_dbg(&pdev->dev, + "mem[%d] pd_id = %d.\n", + i, pd_id->id); + } } - dev_dbg(&pdev->dev, "mem[%d] pd_id = %d.\n", - i, mem_node->pd_id); list_add_tail(&mem_node->node, &local->mem_pools); } } |