aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorWingMan Kwok <w-kwok2@ti.com>2012-08-08 11:58:36 -0400
committerCyril Chemparathy <cyril@ti.com>2012-09-21 10:44:15 -0400
commit20d0a30ad28e1eb4952e4e764c3ec29e72c7b860 (patch)
tree9ad25ce04aa5f00bbbe3890466b0b5106dd44551
parent349829da60edc631f45fc9de427641e88ef78c4d (diff)
downloadlinux-keystone-20d0a30ad28e1eb4952e4e764c3ec29e72c7b860.tar.gz
rapidio: keystone: add rapid io driver
Signed-off-by: WingMan Kwok <w-kwok2@ti.com> Signed-off-by: Aurelien Jacquiot <a-jacquiot@ti.com>
-rw-r--r--drivers/rapidio/devices/Kconfig7
-rw-r--r--drivers/rapidio/devices/Makefile1
-rw-r--r--drivers/rapidio/devices/keystone_rio.c1949
-rw-r--r--drivers/rapidio/devices/keystone_rio.h624
4 files changed, 2581 insertions, 0 deletions
diff --git a/drivers/rapidio/devices/Kconfig b/drivers/rapidio/devices/Kconfig
index 12a9d7f7040b6c..9b50bf2aed1139 100644
--- a/drivers/rapidio/devices/Kconfig
+++ b/drivers/rapidio/devices/Kconfig
@@ -8,3 +8,10 @@ config RAPIDIO_TSI721
default "n"
---help---
Include support for IDT Tsi721 PCI Express Serial RapidIO controller.
+
+config TI_KEYSTONE_RAPIDIO
+ tristate "TI Keystone RapidIO support"
+ depends on RAPIDIO && TI_KEYSTONE
+ default "n"
+ ---help---
+ This driver supports TI's Keystone RapidIO.
diff --git a/drivers/rapidio/devices/Makefile b/drivers/rapidio/devices/Makefile
index 7b62860f34f805..3d5c42e769cd2b 100644
--- a/drivers/rapidio/devices/Makefile
+++ b/drivers/rapidio/devices/Makefile
@@ -6,3 +6,4 @@ obj-$(CONFIG_RAPIDIO_TSI721) += tsi721.o
ifeq ($(CONFIG_RAPIDIO_DMA_ENGINE),y)
obj-$(CONFIG_RAPIDIO_TSI721) += tsi721_dma.o
endif
+obj-$(CONFIG_TI_KEYSTONE_RAPIDIO) += keystone_rio.o
diff --git a/drivers/rapidio/devices/keystone_rio.c b/drivers/rapidio/devices/keystone_rio.c
new file mode 100644
index 00000000000000..656d56815fab16
--- /dev/null
+++ b/drivers/rapidio/devices/keystone_rio.c
@@ -0,0 +1,1949 @@
+/*
+ * Copyright (C) 2010, 2011, 2012 Texas Instruments Incorporated
+ * Authors: Aurelien Jacquiot <a-jacquiot@ti.com>
+ *
+ * Copyright (C) 2012 Texas Instruments Incorporated
+ * WingMan Kwok <w-kwok2@ti.com>
+ * - Updated for support on TI KeyStone platform.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#include <linux/of.h>
+#include <linux/clk.h>
+#include <linux/timer.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+#include <linux/dma-mapping.h>
+#include <linux/delay.h>
+#include <linux/hardirq.h>
+#include <linux/rio.h>
+#include <linux/rio_drv.h>
+#include <linux/keystone-dma.h>
+#include "keystone_rio.h"
+
+#define DRIVER_VER "v1.1"
+
+/*
+ * Main KeyStone RapidIO driver data
+ */
+struct keystone_rio_data {
+ struct device *dev;
+
+ struct clk *clk;
+ struct completion lsu_completion;
+ struct mutex lsu_lock;
+ u32 rio_pe_feat;
+
+ u32 ports_registering;
+ u32 port_chk_cnt;
+ struct work_struct port_chk_task;
+ struct timer_list timer;
+ struct tasklet_struct task;
+ unsigned long rxu_map_bitmap[2];
+#ifdef CONFIG_RIONET
+ u32 rionet_started;
+#endif
+
+ struct dma_chan *tx_channel;
+ const char *tx_chan_name;
+ u32 tx_queue_depth;
+
+ struct keystone_rio_mbox_info tx_mbox[KEYSTONE_RIO_MAX_MBOX];
+ struct keystone_rio_mbox_info rx_mbox[KEYSTONE_RIO_MAX_MBOX];
+
+ struct keystone_rio_rx_chan_info rx_channels[KEYSTONE_RIO_MAX_MBOX];
+
+ u32 __iomem *jtagid_reg;
+ u32 __iomem *serdes_sts_reg;
+ struct keystone_srio_serdes_regs __iomem *serdes_regs;
+ struct keystone_rio_regs __iomem *regs;
+
+ struct keystone_rio_car_csr_regs __iomem *car_csr_regs;
+ struct keystone_rio_serial_port_regs __iomem *serial_port_regs;
+ struct keystone_rio_err_mgmt_regs __iomem *err_mgmt_regs;
+ struct keystone_rio_phy_layer_regs __iomem *phy_regs;
+ struct keystone_rio_transport_layer_regs __iomem *transport_regs;
+ struct keystone_rio_pkt_buf_regs __iomem *pkt_buf_regs;
+ struct keystone_rio_evt_mgmt_regs __iomem *evt_mgmt_regs;
+ struct keystone_rio_port_write_regs __iomem *port_write_regs;
+ struct keystone_rio_link_layer_regs __iomem *link_regs;
+ struct keystone_rio_fabric_regs __iomem *fabric_regs;
+ u32 car_csr_regs_base;
+
+ struct keystone_rio_board_controller_info board_rio_cfg;
+};
+
+/*---------------------------- Direct I/O -------------------------------*/
+
+static u32 keystone_rio_dio_get_lsu_cc(u32 lsu_id, u8 ltid, u8 *lcb,
+ struct keystone_rio_data *krio_priv)
+{
+ u32 idx;
+ u32 shift;
+ u32 value;
+ u32 cc;
+ /* lSU shadow register status mapping */
+ u32 lsu_index[8] = { 0, 9, 15, 20, 24, 33, 39, 44 };
+
+ /* Compute LSU stat index from LSU id and LTID */
+ idx = (lsu_index[lsu_id] + ltid) >> 3;
+ shift = ((lsu_index[lsu_id] + ltid) & 0x7) << 2;
+
+ /* Get completion code and context */
+ value = __raw_readl(&(krio_priv->regs->lsu_stat_reg[idx]));
+ cc = (value >> (shift + 1)) & 0x7;
+ *lcb = (value >> shift) & 0x1;
+
+ return cc;
+}
+
+/*---------------------- Maintenance Request Management ---------------------*/
+
+/**
+ * maint_request - Perform a maintenance request
+ * @index: ID of the RapidIO interface
+ * @destid: destination ID of target device
+ * @hopcount: hopcount for this request
+ * @offset: offset in the RapidIO configuration space
+ * @buff: dma address of the data on the host
+ * @buff_len: length of the data
+ * @size: 1 for 16bit, 0 for 8bit ID size
+ * @type: packet type
+ *
+ * Returns %0 on success or %-EINVAL, %-EIO, %-EAGAIN or %-EBUSY on failure.
+ */
+static inline int maint_request(int index, u32 dest_id, u8 hopcount,
+ u32 offset, dma_addr_t buff, int buff_len,
+ u16 size, u16 type, struct keystone_rio_data *krio_priv)
+{
+ unsigned int count;
+ unsigned int status = 0;
+ unsigned int res = 0;
+ u8 context;
+ u8 ltid;
+
+ mutex_lock(&krio_priv->lsu_lock);
+
+ /* Check is there is space in the LSU shadow reg and that it is free */
+ count = 0;
+ while (1) {
+ status = __raw_readl(&(krio_priv->regs->lsu_reg[0].busy_full));
+ if (((status & KEYSTONE_RIO_LSU_FULL_MASK) == 0x0)
+ && ((status & KEYSTONE_RIO_LSU_BUSY_MASK) == 0x0))
+ break;
+ count++;
+
+ if (count >= KEYSTONE_RIO_TIMEOUT_CNT) {
+ dev_dbg(krio_priv->dev,
+ "no LSU available, status = 0x%x\n", status);
+ res = -EIO;
+ goto out;
+ }
+ ndelay(1000);
+ }
+
+ /* Get LCB and LTID, LSU reg 6 is already read */
+ context = (status >> 4) & 0x1;
+ ltid = status & 0xf;
+
+ /* LSU Reg 0 - MSB of RapidIO address */
+ __raw_writel(0, &(krio_priv->regs->lsu_reg[0].addr_msb));
+
+ /* LSU Reg 1 - LSB of destination */
+ __raw_writel(offset, &(krio_priv->regs->lsu_reg[0].addr_lsb_cfg_ofs));
+
+ /* LSU Reg 2 - source address */
+ __raw_writel(buff, &(krio_priv->regs->lsu_reg[0].dsp_addr));
+
+ /* LSU Reg 3 - byte count */
+ __raw_writel(buff_len,
+ &(krio_priv->regs->lsu_reg[0].dbell_val_byte_cnt));
+
+ /* LSU Reg 4 - */
+ __raw_writel(((index << 8)
+ | (KEYSTONE_RIO_LSU_PRIO << 4)
+ | (size ? (1 << 10) : 0)
+ | ((u32) dest_id << 16)),
+ &(krio_priv->regs->lsu_reg[0].destid));
+
+ /* LSU Reg 5 */
+ __raw_writel(((hopcount & 0xff) << 8) | (type & 0xff),
+ &(krio_priv->regs->lsu_reg[0].dbell_info_fttype));
+
+ /* Retrieve our completion code */
+ count = 0;
+ res = 0;
+ while (1) {
+ u8 lcb;
+ status = keystone_rio_dio_get_lsu_cc(0, ltid, &lcb, krio_priv);
+ if (lcb == context)
+ break;
+ count++;
+ if (count >= KEYSTONE_RIO_TIMEOUT_CNT) {
+ dev_dbg(krio_priv->dev,
+ "timeout %d, ltid = %d, context = %d, "
+ "lcb = %d, cc = %d\n",
+ count, ltid, context, lcb, status);
+ res = -EIO;
+ break;
+ }
+ ndelay(1000);
+ }
+out:
+ mutex_unlock(&krio_priv->lsu_lock);
+
+ if (res)
+ return res;
+
+ if (status)
+ dev_err(krio_priv->dev, "transfer error = 0x%x\n", status);
+
+ switch (status) {
+ case KEYSTONE_RIO_LSU_CC_TIMEOUT:
+ case KEYSTONE_RIO_LSU_CC_XOFF:
+ case KEYSTONE_RIO_LSU_CC_ERROR:
+ case KEYSTONE_RIO_LSU_CC_INVALID:
+ case KEYSTONE_RIO_LSU_CC_DMA:
+ return -EIO;
+ break;
+ case KEYSTONE_RIO_LSU_CC_RETRY:
+ return -EBUSY;
+ break;
+ case KEYSTONE_RIO_LSU_CC_CANCELED:
+ return -EAGAIN;
+ break;
+ default:
+ break;
+ }
+ return 0;
+}
+/*------------------------- RapidIO hw controller setup ---------------------*/
+
+/**
+ * keystone_rio_hw_init - Configure a RapidIO controller
+ * @mode: serdes configuration
+ * @hostid: device id of the host
+ */
+static void keystone_rio_hw_init(u32 mode, struct keystone_rio_data *krio_priv)
+{
+ u32 val;
+ u32 block;
+ u32 port;
+ struct keystone_serdes_config *serdes_config
+ = &(krio_priv->board_rio_cfg.serdes_config[mode]);
+
+ /* Set sRIO out of reset */
+ __raw_writel(0x00000011, &krio_priv->regs->pcr);
+
+ /* Clear BOOT_COMPLETE bit (allowing write) */
+ __raw_writel(0x00000000, &krio_priv->regs->per_set_cntl);
+
+ /* Enable blocks */
+ __raw_writel(1, &krio_priv->regs->gbl_en);
+ for (block = 0; block <= KEYSTONE_RIO_BLK_NUM; block++)
+ __raw_writel(1, &(krio_priv->regs->blk[block].enable));
+
+ /* Set control register 1 configuration */
+ __raw_writel(0x00000000, &krio_priv->regs->per_set_cntl1);
+
+ /* Set Control register */
+ __raw_writel(serdes_config->cfg_cntl, &krio_priv->regs->per_set_cntl);
+
+ /* Serdes main configuration */
+ __raw_writel(serdes_config->serdes_cfg_pll,
+ &krio_priv->serdes_regs->pll);
+
+ /* Per-port SerDes configuration */
+ for (port = 0; port < KEYSTONE_RIO_MAX_PORT; port++) {
+ __raw_writel(serdes_config->rx_chan_config[port],
+ &krio_priv->serdes_regs->channel[port].rx);
+ __raw_writel(serdes_config->tx_chan_config[port],
+ &krio_priv->serdes_regs->channel[port].tx);
+ }
+
+ /* Check for RIO SerDes PLL lock */
+ do {
+ val = __raw_readl(krio_priv->serdes_sts_reg);
+ } while ((val & 0x1) != 0x1);
+
+ /* Set prescalar for ip_clk */
+ __raw_writel(serdes_config->prescalar_srv_clk,
+ &krio_priv->link_regs->prescalar_srv_clk);
+
+ /* Peripheral-specific configuration and capabilities */
+ __raw_writel(KEYSTONE_RIO_DEV_ID_VAL,
+ &krio_priv->car_csr_regs->dev_id);
+ __raw_writel(KEYSTONE_RIO_DEV_INFO_VAL,
+ &krio_priv->car_csr_regs->dev_info);
+ __raw_writel(KEYSTONE_RIO_ID_TI,
+ &krio_priv->car_csr_regs->assembly_id);
+ __raw_writel(KEYSTONE_RIO_EXT_FEAT_PTR,
+ &krio_priv->car_csr_regs->assembly_info);
+
+ krio_priv->rio_pe_feat = RIO_PEF_PROCESSOR
+ | RIO_PEF_CTLS
+ | KEYSTONE_RIO_PEF_FLOW_CONTROL
+ | RIO_PEF_EXT_FEATURES
+ | RIO_PEF_ADDR_34
+ | RIO_PEF_STD_RT
+ | RIO_PEF_INB_DOORBELL
+ | RIO_PEF_INB_MBOX;
+
+ __raw_writel(krio_priv->rio_pe_feat,
+ &krio_priv->car_csr_regs->pe_feature);
+
+ __raw_writel(KEYSTONE_RIO_MAX_PORT << 8,
+ &krio_priv->car_csr_regs->sw_port);
+
+ __raw_writel((RIO_SRC_OPS_READ
+ | RIO_SRC_OPS_WRITE
+ | RIO_SRC_OPS_STREAM_WRITE
+ | RIO_SRC_OPS_WRITE_RESPONSE
+ | RIO_SRC_OPS_DATA_MSG
+ | RIO_SRC_OPS_DOORBELL
+ | RIO_SRC_OPS_ATOMIC_TST_SWP
+ | RIO_SRC_OPS_ATOMIC_INC
+ | RIO_SRC_OPS_ATOMIC_DEC
+ | RIO_SRC_OPS_ATOMIC_SET
+ | RIO_SRC_OPS_ATOMIC_CLR
+ | RIO_SRC_OPS_PORT_WRITE),
+ &krio_priv->car_csr_regs->src_op);
+
+ __raw_writel((RIO_DST_OPS_READ
+ | RIO_DST_OPS_WRITE
+ | RIO_DST_OPS_STREAM_WRITE
+ | RIO_DST_OPS_WRITE_RESPONSE
+ | RIO_DST_OPS_DATA_MSG
+ | RIO_DST_OPS_DOORBELL
+ | RIO_DST_OPS_PORT_WRITE),
+ &krio_priv->car_csr_regs->dest_op);
+
+ __raw_writel(RIO_PELL_ADDR_34,
+ &krio_priv->car_csr_regs->pe_logical_ctl);
+
+ val = (((KEYSTONE_RIO_SP_HDR_NEXT_BLK_PTR & 0xffff) << 16) |
+ KEYSTONE_RIO_SP_HDR_EP_REC_ID);
+ __raw_writel(val, &krio_priv->serial_port_regs->sp_maint_blk_hdr);
+
+ /* clear high bits of local config space base addr */
+ __raw_writel(0x00000000, &krio_priv->car_csr_regs->local_cfg_hbar);
+
+ /* set local config space base addr */
+ __raw_writel(0x00520000, &krio_priv->car_csr_regs->local_cfg_bar);
+
+ /* Enable HOST BIT(31) & MASTER_ENABLE BIT(30) bits */
+ __raw_writel(0xc0000000, &krio_priv->serial_port_regs->sp_gen_ctl);
+
+ /* set link timeout value */
+ __raw_writel(0x000FFF00,
+ &krio_priv->serial_port_regs->sp_link_timeout_ctl);
+
+ /* set response timeout value */
+ __raw_writel(0x000FFF00,
+ &krio_priv->serial_port_regs->sp_rsp_timeout_ctl);
+
+ /* allows SELF_RESET and PWDN_PORT resets to clear stcky reg bits */
+ __raw_writel(0x00000001, &krio_priv->link_regs->reg_rst_ctl);
+
+ /* Set error detection mode */
+ /* clear all errors */
+ __raw_writel(0x00000000, &krio_priv->err_mgmt_regs->err_det);
+ /* enable all error detection */
+ __raw_writel(0x00000000, &krio_priv->err_mgmt_regs->err_en);
+
+ /* set err det block header */
+ val = (((KEYSTONE_RIO_ERR_HDR_NEXT_BLK_PTR & 0xffff) << 16) |
+ KEYSTONE_RIO_ERR_EXT_FEAT_ID);
+ __raw_writel(val, &krio_priv->err_mgmt_regs->err_report_blk_hdr);
+
+ /* clear msb of err catptured addr reg */
+ __raw_writel(0x00000000, &krio_priv->err_mgmt_regs->h_addr_capt);
+
+ /* clear lsb of err catptured addr reg */
+ __raw_writel(0x00000000, &krio_priv->err_mgmt_regs->addr_capt);
+
+ /* clear err catptured source and dest devID reg */
+ __raw_writel(0x00000000, &krio_priv->err_mgmt_regs->id_capt);
+
+ /* clear err catptured packet info */
+ __raw_writel(0x00000000, &krio_priv->err_mgmt_regs->ctrl_capt);
+
+ __raw_writel(0x41004141, &krio_priv->phy_regs->phy_sp[0].__rsvd[3]);
+
+ /* Force all writes to finish */
+ val = __raw_readl(&krio_priv->err_mgmt_regs->ctrl_capt);
+}
+
+/**
+ * keystone_rio_start - Start RapidIO controller
+ */
+static void keystone_rio_start(struct keystone_rio_data *krio_priv)
+{
+ u32 val;
+
+ /* Set PEREN bit to enable logical layer data flow */
+ val = (KEYSTONE_RIO_PER_EN | KEYSTONE_RIO_PER_FREE);
+ __raw_writel(val, &krio_priv->regs->pcr);
+
+ /* Set BOOT_COMPLETE bit */
+ val = __raw_readl(&krio_priv->regs->per_set_cntl);
+ __raw_writel(val | KEYSTONE_RIO_BOOT_COMPLETE,
+ &krio_priv->regs->per_set_cntl);
+}
+
+static int
+keystone_rio_test_link(struct keystone_rio_data *krio_priv)
+{
+ u32 *tbuf;
+ int res;
+ dma_addr_t dma;
+ struct device *dev = krio_priv->dev;
+ size_t align_len = L1_CACHE_ALIGN(4);
+
+ tbuf = kzalloc(align_len, GFP_KERNEL);
+ if (!tbuf)
+ return -ENOMEM;
+
+ dma = dma_map_single(dev, tbuf, 4, DMA_FROM_DEVICE);
+
+ /* Send a maint req to test the link */
+ res = maint_request(0, 0xff, 0, 0, dma, 4,
+ krio_priv->board_rio_cfg.size,
+ KEYSTONE_RIO_PACKET_TYPE_MAINT_R,
+ krio_priv);
+
+ dma_unmap_single(dev, dma, 4, DMA_FROM_DEVICE);
+
+ kfree(tbuf);
+
+ return res;
+}
+
+/**
+ * keystone_rio_port_status - Return if the port is OK or not
+ * @port: index of the port
+ *
+ * Return %0 if the port is ready or %-EIO on failure.
+ */
+static int keystone_rio_port_status(int port,
+ struct keystone_rio_data *krio_priv)
+{
+ unsigned int count, value, portok;
+ int res = 0;
+
+ count = 0;
+ portok = 0;
+
+ if (port >= KEYSTONE_RIO_MAX_PORT)
+ return -EINVAL;
+
+ /* Check port status */
+ value = __raw_readl(&(krio_priv->serial_port_regs->sp[port].err_stat));
+
+ if ((value & RIO_PORT_N_ERR_STS_PORT_OK) != 0) {
+ res = keystone_rio_test_link(krio_priv);
+ if (0 != res)
+ return -EIO;
+ else
+ return 0; /* port must be solid OK */
+ } else
+ return -EIO;
+}
+
+/**
+ * keystone_rio_port_init - Configure a RapidIO port
+ * @port: index of the port to configure
+ * @mode: serdes configuration
+ */
+static int keystone_rio_port_init(u32 port, u32 mode,
+ struct keystone_rio_data *krio_priv)
+{
+ u32 path_mode =
+ krio_priv->board_rio_cfg.serdes_config[mode].path_mode[port];
+
+ if (port >= KEYSTONE_RIO_MAX_PORT)
+ return -EINVAL;
+
+ /* Send both link request and PNA control symbols
+ (this will clear error states) */
+ __raw_writel(0x2003f044,
+ &krio_priv->phy_regs->phy_sp[port].long_cs_tx1);
+
+ /* Disable packet forwarding */
+ __raw_writel(0xffffffff, &(krio_priv->regs->pkt_fwd_cntl[port].pf_16b));
+ __raw_writel(0x0003ffff, &(krio_priv->regs->pkt_fwd_cntl[port].pf_8b));
+
+ /* Silence and discovery timers */
+ __raw_writel(0x20000000,
+ &(krio_priv->phy_regs->phy_sp[port].silence_timer));
+ __raw_writel(0x20000000,
+ &(krio_priv->phy_regs->phy_sp[port].discovery_timer));
+
+ /* Enable port in input and output */
+ __raw_writel(0x600000, &(krio_priv->serial_port_regs->sp[port].ctl));
+
+ /* Program channel allocation to ports (1x, 2x or 4x) */
+ __raw_writel(path_mode, &(krio_priv->phy_regs->phy_sp[port].path_ctl));
+
+ return 0;
+}
+
+/**
+ * keystone_rio_port_activate - Start using a RapidIO port
+ * @port: index of the port to configure
+ */
+static int keystone_rio_port_activate(u32 port,
+ struct keystone_rio_data *krio_priv)
+{
+ u32 val;
+
+ /* Enable interrupt for reset request */
+ val = __raw_readl(&(krio_priv->evt_mgmt_regs->evt_mgmt_rst_int_en));
+ __raw_writel(val | (1 << port),
+ &(krio_priv->evt_mgmt_regs->evt_mgmt_rst_int_en));
+
+ /* Enable all PLM interrupts */
+ __raw_writel(0xffffffff,
+ &(krio_priv->phy_regs->phy_sp[port].int_enable));
+ __raw_writel(1, &(krio_priv->phy_regs->phy_sp[port].all_int_en));
+
+ /* Enable all errors */
+ __raw_writel(0xffffffff,
+ &(krio_priv->err_mgmt_regs->sp_err[port].rate_en));
+
+ /* Cleanup port error status */
+ __raw_writel(KEYSTONE_RIO_PORT_ERROR_MASK,
+ &(krio_priv->serial_port_regs->sp[port].err_stat));
+ __raw_writel(0, &(krio_priv->err_mgmt_regs->sp_err[port].det));
+
+ /* Enable promiscuous */
+ __raw_writel(0x00309000,
+ &(krio_priv->transport_regs->transport_sp[port].control));
+
+ /* Enable Port-write reception capture */
+ __raw_writel(0, &(krio_priv->port_write_regs->port_wr_rx_capt[port]));
+
+ return 0;
+}
+
+/*------------------------- Configuration space mngt ----------------------*/
+
+/**
+ * keystone_local_config_read - Generate a KeyStone local config space read
+ * @mport: RapidIO master port info
+ * @index: ID of RapidIO interface
+ * @offset: Offset into configuration space
+ * @len: Length (in bytes) of the maintenance transaction
+ * @data: Value to be read into
+ *
+ * Generates a KeyStone local configuration space read. Returns %0 on
+ * success or %-EINVAL on failure.
+ */
+static int keystone_local_config_read(struct rio_mport *mport,
+ int index, u32 offset, int len, u32 * data)
+{
+ struct keystone_rio_data *krio_priv = mport->priv;
+
+ /*
+ * Workaround for rionet: the processing element features must content
+ * RIO_PEF_INB_MBOX and RIO_PEF_INB_DOORBELL bits that cannot be set on
+ * KeyStone hardware. So cheat the read value in this case...
+ */
+ if (unlikely(offset == RIO_PEF_CAR))
+ *data = krio_priv->rio_pe_feat;
+ else
+ *data = __raw_readl((void __iomem *)
+ (krio_priv->car_csr_regs_base + offset));
+
+ dev_dbg(krio_priv->dev,
+ "index %d offset 0x%x data 0x%x\n", index, offset, *data);
+ return 0;
+}
+
+/**
+ * keystone_local_config_write - Generate a KeyStone local config space write
+ * @mport: RapidIO master port info
+ * @index: ID of RapidIO interface
+ * @offset: Offset into configuration space
+ * @len: Length (in bytes) of the maintenance transaction
+ * @data: Value to be written
+ *
+ * Generates a KeyStone local configuration space write. Returns %0 on
+ * success or %-EINVAL on failure.
+ */
+static int keystone_local_config_write(struct rio_mport *mport,
+ int index, u32 offset, int len, u32 data)
+{
+ struct keystone_rio_data *krio_priv = mport->priv;
+
+ dev_dbg(krio_priv->dev,
+ "index %d offset 0x%x data 0x%x\n", index, offset, data);
+ __raw_writel(data,
+ (void __iomem *)(krio_priv->car_csr_regs_base + offset));
+
+ return 0;
+}
+
+/**
+ * keystone_rio_config_read - Generate a KeyStone read maintenance transaction
+ * @mport: RapidIO master port info
+ * @index: ID of RapidIO interface
+ * @destid: Destination ID of transaction
+ * @hopcount: Number of hops to target device
+ * @offset: Offset into configuration space
+ * @len: Length (in bytes) of the maintenance transaction
+ * @val: Location to be read into
+ *
+ * Generates a KeyStone read maintenance transaction. Returns %0 on
+ * success or %-EINVAL on failure.
+ */
+static int
+keystone_rio_config_read(struct rio_mport *mport, int index, u16 destid,
+ u8 hopcount, u32 offset, int len, u32* val)
+{
+ u32* tbuf;
+ int res;
+ dma_addr_t dma;
+ struct device *dev = ((struct keystone_rio_data *)(mport->priv))->dev;
+ size_t align_len = L1_CACHE_ALIGN(len);
+
+ tbuf = (u32*) kzalloc(align_len, GFP_KERNEL);
+ if (!tbuf)
+ return -ENOMEM;
+
+ dma = dma_map_single(dev, tbuf, len, DMA_FROM_DEVICE);
+
+ res = maint_request(index, destid, hopcount, offset, dma, len,
+ mport->sys_size, KEYSTONE_RIO_PACKET_TYPE_MAINT_R,
+ (struct keystone_rio_data *)(mport->priv));
+
+ dma_unmap_single(dev, dma, len, DMA_FROM_DEVICE);
+
+ /* Taking care of byteswap */
+ switch (len) {
+ case 1:
+ *val = *((u8*) tbuf);
+ break;
+ case 2:
+ *val = ntohs(*((u16*) tbuf));
+ break;
+ default:
+ *val = ntohl(*((u32*) tbuf));
+ break;
+ }
+
+ kfree(tbuf);
+
+ dev_dbg(dev,
+ "index %d destid %d hopcount %d offset 0x%x "
+ "len %d val 0x%x res %d\n",
+ index, destid, hopcount, offset, len, *val, res);
+
+ return res;
+}
+
+/**
+ * keystone__rio_config_write - Generate a KeyStone write
+ * maintenance transaction
+ * @mport: RapidIO master port info
+ * @index: ID of RapidIO interface
+ * @destid: Destination ID of transaction
+ * @hopcount: Number of hops to target device
+ * @offset: Offset into configuration space
+ * @len: Length (in bytes) of the maintenance transaction
+ * @val: Value to be written
+ *
+ * Generates an KeyStone write maintenance transaction. Returns %0 on
+ * success or %-EINVAL on failure.
+ */
+static int
+keystone_rio_config_write(struct rio_mport *mport, int index, u16 destid,
+ u8 hopcount, u32 offset, int len, u32 val)
+{
+ u32* tbuf;
+ int res;
+ dma_addr_t dma;
+ struct device *dev = ((struct keystone_rio_data *)(mport->priv))->dev;
+ size_t align_len = L1_CACHE_ALIGN(len);
+
+ tbuf = (u32*) kzalloc(align_len, GFP_KERNEL);
+ if (!tbuf)
+ return -ENOMEM;
+
+ /* Taking care of byteswap */
+ switch (len) {
+ case 1:
+ *tbuf = ((u8) val);
+ break;
+ case 2:
+ *tbuf = htons((u16) val);
+ break;
+ default:
+ *tbuf = htonl((u32) val);
+ break;
+ }
+
+ dma = dma_map_single(dev, tbuf, len, DMA_TO_DEVICE);
+
+ res = maint_request(index, destid, hopcount, offset, dma, len,
+ mport->sys_size,
+ KEYSTONE_RIO_PACKET_TYPE_MAINT_W,
+ (struct keystone_rio_data *)(mport->priv));
+
+ dma_unmap_single(dev, dma, len, DMA_TO_DEVICE);
+
+ dev_dbg(dev,
+ "index %d destid %d hopcount %d offset 0x%x "
+ "len %d val 0x%x res %d\n",
+ index, destid, hopcount, offset, len, val, res);
+
+ kfree(tbuf);
+
+ return res;
+}
+
+/*------------------------- Message passing management ----------------------*/
+
+static void keystone_rio_rx_complete(void *data)
+{
+ struct keystone_rio_packet *p_info = data;
+ struct keystone_rio_data *krio_priv = p_info->priv;
+ struct keystone_rio_rx_chan_info *krx_chan;
+ struct keystone_rio_mbox_info *p_mbox;
+ int mbox;
+ u32 src_id, dest_id;
+
+ src_id = ((p_info->psdata[0] & 0xffff0000) >> 16);
+ dest_id = (p_info->psdata[0] & 0x0000ffff);
+ mbox = (p_info->psdata[1] & 0x3f);
+ p_info->mbox = mbox;
+
+ krx_chan = &(krio_priv->rx_channels[mbox]);
+
+ p_info->status = dma_async_is_tx_complete(krx_chan->dma_channel,
+ p_info->cookie, NULL, NULL);
+ WARN_ON(p_info->status != DMA_SUCCESS && p_info->status != DMA_ERROR);
+
+ p_mbox = &(krio_priv->rx_mbox[mbox]);
+ p_mbox->p_info_temp = p_info;
+
+ dev_dbg(krio_priv->dev,
+ "Received message for mbox = %d, src=%d, dest=%d\n",
+ mbox, src_id, dest_id);
+
+ if (p_mbox->running) {
+ /* Client callback (slot is not used) */
+ p_mbox->port->inb_msg[p_mbox->id].mcback(p_mbox->port,
+ p_mbox->dev_id, p_mbox->id, 0);
+ }
+}
+
+static void keystone_rio_chan_work_handler(unsigned long data)
+{
+ struct keystone_rio_data *krio_priv = (struct keystone_rio_data *)data;
+ struct keystone_rio_rx_chan_info *krx_chan;
+ int i;
+
+ for (i=0; i<KEYSTONE_RIO_MAX_MBOX; i++) {
+ krx_chan = &(krio_priv->rx_channels[i]);
+ if (krx_chan->dma_channel) {
+ dma_poll(krx_chan->dma_channel, -1);
+ dmaengine_resume(krx_chan->dma_channel);
+ }
+ }
+}
+
+static void keystone_rio_rx_notify(struct dma_chan *chan, void *arg)
+{
+ struct keystone_rio_data *krio_priv = arg;
+ struct keystone_rio_rx_chan_info *krx_chan;
+ int i;
+
+ for (i=0; i<KEYSTONE_RIO_MAX_MBOX; i++) {
+ krx_chan = &(krio_priv->rx_channels[i]);
+ if (krx_chan->dma_channel)
+ dmaengine_pause(krx_chan->dma_channel);
+ }
+ tasklet_schedule(&krio_priv->task);
+
+ return;
+}
+
+/* Release a free receive buffer */
+static void keystone_rio_rxpool_free(void *arg, unsigned q_num,
+ unsigned bufsize, struct dma_async_tx_descriptor *desc)
+{
+ struct keystone_rio_rx_chan_info *krx_chan = arg;
+ struct keystone_rio_data *krio_priv = krx_chan->priv;
+ struct keystone_rio_packet *p_info = desc->callback_param;
+
+ dma_unmap_sg(krio_priv->dev, &p_info->sg[2], 1, DMA_FROM_DEVICE);
+ p_info->buff = NULL;
+ kfree(p_info);
+
+ return;
+}
+
+/* Allocate a free receive buffer */
+static struct dma_async_tx_descriptor *keystone_rio_rxpool_alloc(void *arg,
+ unsigned q_num, unsigned bufsize)
+{
+ struct keystone_rio_rx_chan_info *krx_chan = arg;
+ struct keystone_rio_data *krio_priv = krx_chan->priv;
+ struct dma_async_tx_descriptor *desc = NULL;
+ struct keystone_rio_packet *p_info;
+ u32 err = 0;
+
+ if (krx_chan->buff_temp == NULL)
+ /* No inb_buffer added */
+ return NULL;
+
+ /* Allocate a primary receive queue entry */
+ p_info = kzalloc(sizeof(*p_info), GFP_ATOMIC);
+ if (!p_info) {
+ dev_err(krio_priv->dev, "packet alloc failed\n");
+ return NULL;
+ }
+ p_info->priv = krio_priv;
+ p_info->buff = krx_chan->buff_temp;
+
+ sg_init_table(p_info->sg, KEYSTONE_RIO_SGLIST_SIZE);
+ sg_set_buf(&p_info->sg[0], p_info->epib, sizeof(p_info->epib));
+ sg_set_buf(&p_info->sg[1], p_info->psdata, sizeof(p_info->psdata));
+ sg_set_buf(&p_info->sg[2], krx_chan->buff_temp,
+ krx_chan->buffer_sizes[q_num]);
+
+ krx_chan->buff_temp = NULL;
+
+ p_info->sg_ents = 2 + dma_map_sg(krio_priv->dev, &p_info->sg[2],
+ 1, DMA_FROM_DEVICE);
+
+ if (p_info->sg_ents != 3) {
+ dev_err(krio_priv->dev, "dma map failed\n");
+ p_info->buff = NULL;
+ kfree(p_info);
+ return NULL;
+ }
+
+ desc = dmaengine_prep_slave_sg(krx_chan->dma_channel, p_info->sg,
+ p_info->sg_ents, DMA_DEV_TO_MEM,
+ DMA_HAS_EPIB | DMA_HAS_PSINFO);
+
+ if (IS_ERR_OR_NULL(desc)) {
+ dma_unmap_sg(krio_priv->dev, &p_info->sg[2],
+ 1, DMA_FROM_DEVICE);
+ p_info->buff = NULL;
+ kfree(p_info);
+ err = PTR_ERR(desc);
+ if (err != -ENOMEM) {
+ dev_err(krio_priv->dev,
+ "dma prep failed, error %d\n", err);
+ }
+ return NULL;
+ }
+
+ desc->callback_param = p_info;
+ desc->callback = keystone_rio_rx_complete;
+ p_info->cookie = desc->cookie;
+
+ return desc;
+}
+
+static void keystone_rio_mp_inb_exit(int mbox,
+ struct keystone_rio_data *krio_priv)
+{
+ struct keystone_rio_rx_chan_info *krx_chan;
+
+ krx_chan = &(krio_priv->rx_channels[mbox]);
+
+ if (!(krx_chan->dma_channel))
+ return;
+
+ dmaengine_pause(krx_chan->dma_channel);
+ dma_release_channel(krx_chan->dma_channel);
+ krx_chan->dma_channel = NULL;
+ return;
+}
+
+static int keystone_rio_mp_inb_init(int mbox,
+ struct keystone_rio_data *krio_priv)
+{
+ struct keystone_rio_rx_chan_info *krx_chan;
+ struct dma_keystone_info config;
+ dma_cap_mask_t mask;
+ int err = -ENODEV;
+ int i;
+
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_SLAVE, mask);
+
+ /* DMA RX channel */
+ krx_chan = &(krio_priv->rx_channels[mbox]);
+ krx_chan->priv = krio_priv;
+ krx_chan->chan_num = mbox;
+ krx_chan->dma_channel =
+ dma_request_channel_by_name(mask, krx_chan->name);
+ if (IS_ERR_OR_NULL(krx_chan->dma_channel))
+ goto fail;
+
+ memset(&config, 0, sizeof(config));
+ config.direction = DMA_DEV_TO_MEM;
+ config.scatterlist_size = KEYSTONE_RIO_SGLIST_SIZE;
+ config.rxpool_allocator = keystone_rio_rxpool_alloc;
+ config.rxpool_destructor = keystone_rio_rxpool_free;
+ config.rxpool_param = krx_chan;
+ config.rxpool_thresh_enable = DMA_THRESH_NONE;
+
+ for (i = 0; i < KEYSTONE_QUEUES_PER_CHAN &&
+ krx_chan->queue_depths[i] &&
+ krx_chan->buffer_sizes[i]; ++i) {
+ config.rxpools[i].pool_depth = krx_chan->queue_depths[i];
+ config.rxpools[i].buffer_size = krx_chan->buffer_sizes[i];
+ dev_dbg(krio_priv->dev, "rx_pool[%d] depth %d, size %d\n", i,
+ config.rxpools[i].pool_depth,
+ config.rxpools[i].buffer_size);
+ }
+ config.rxpool_count = i;
+
+ err = dma_keystone_config(krx_chan->dma_channel, &config);
+ if (err) {
+ dev_err(krio_priv->dev,
+ "Error configuring RX channel, err %d\n", err);
+ goto fail;
+ }
+
+ tasklet_init(&krio_priv->task, keystone_rio_chan_work_handler,
+ (unsigned long) krio_priv);
+
+ dma_set_notify(krx_chan->dma_channel,
+ keystone_rio_rx_notify,
+ krio_priv);
+
+ krx_chan->flow_num = dma_get_rx_flow(krx_chan->dma_channel);
+ krx_chan->queue_num = dma_get_rx_queue(krx_chan->dma_channel);
+
+ dev_info(krio_priv->dev,
+ "Opened rx channel: %p (mbox=%d, flow=%d, rx_q=%d)\n",
+ krx_chan->dma_channel, mbox, krx_chan->flow_num,
+ krx_chan->queue_num);
+
+ return 0;
+
+fail:
+ if (krx_chan->dma_channel) {
+ dma_release_channel(krx_chan->dma_channel);
+ krx_chan->dma_channel = NULL;
+ }
+ return err;
+}
+
+static int keystone_rio_get_rxu_map(struct keystone_rio_data *krio_priv)
+{
+ int id;
+ unsigned long bit_sz = 2 * 8 * sizeof(u32);
+
+ id = find_first_zero_bit(&(krio_priv->rxu_map_bitmap[0]), bit_sz);
+ if (id >= bit_sz)
+ return -1;
+
+ __set_bit(id, &(krio_priv->rxu_map_bitmap[0]));
+ return id;
+}
+
+static void keystone_rio_free_rxu_map(int id,
+ struct keystone_rio_data *krio_priv)
+{
+ clear_bit(id, &(krio_priv->rxu_map_bitmap[0]));
+}
+
+/**
+ * keystone_rio_map_mbox - Map a mailbox to a given queue.
+ * for both type 11 and type 9 packets.
+ * @mbox: mailbox to map
+ * @queue: associated queue number
+ *
+ * Returns %0 on success or %-ENOMEM on failure.
+ */
+static int keystone_rio_map_mbox(int mbox,
+ int queue, int flowid, int size,
+ struct keystone_rio_data *krio_priv)
+{
+ struct keystone_rio_mbox_info *rx_mbox = &krio_priv->rx_mbox[mbox];
+ u32 mapping_entry_low;
+ u32 mapping_entry_high;
+ u32 mapping_entry_qid;
+ int i;
+
+ /* Map the multi-segment mailbox to the corresponding Rx
+ queue for type 11 */
+ mapping_entry_low = ((mbox & 0x1f) << 16)
+ | (0x3f000000); /* Given mailbox, all letters, srcid = 0 */
+
+ mapping_entry_high = KEYSTONE_RIO_MAP_FLAG_SEGMENT /* multi-segment messaging */
+ | KEYSTONE_RIO_MAP_FLAG_SRC_PROMISC
+ | KEYSTONE_RIO_MAP_FLAG_DST_PROMISC; /* promiscuous (don't care about src/dst id) */
+
+ /* Set TT flag */
+ if (size) {
+ mapping_entry_high |= KEYSTONE_RIO_MAP_FLAG_TT_16;
+ }
+
+ /* QMSS/PktDMA mapping */
+ mapping_entry_qid = (queue & 0x3fff) | (flowid << 16);
+
+ i = keystone_rio_get_rxu_map(krio_priv);
+
+ if (i < 0)
+ return -ENOMEM;
+
+ rx_mbox->rxu_map_id[0] = i;
+ dev_dbg(krio_priv->dev,
+ "Using RXU map %d @ 0x%08x: mbox = %d,"
+ " flowid = %d, queue = %d\n",
+ i, (u32)&(krio_priv->regs->rxu_map[i]), mbox, flowid, queue);
+
+ __raw_writel(mapping_entry_low,
+ &(krio_priv->regs->rxu_map[i].ltr_mbox_src));
+
+ __raw_writel(mapping_entry_high,
+ &(krio_priv->regs->rxu_map[i].dest_prom_seg));
+
+ __raw_writel(mapping_entry_qid,
+ &(krio_priv->regs->rxu_map[i].flow_qid));
+
+ /*
+ * The RapidIO peripheral looks at the incoming RapidIO msgs
+ * and if there is only one segment (the whole msg fits into one
+ * RapidIO msg), the peripheral uses the single segment mapping
+ * table. Therefore we need to map the single-segment mailbox too.
+ * The same Rx CPPI Queue is used (as for the multi-segment
+ * mailbox).
+ */
+ mapping_entry_high &= ~KEYSTONE_RIO_MAP_FLAG_SEGMENT;
+
+ i = keystone_rio_get_rxu_map(krio_priv);
+
+ if (i < 0)
+ return -ENOMEM;
+
+ rx_mbox->rxu_map_id[1] = i;
+ __raw_writel(mapping_entry_low,
+ &(krio_priv->regs->rxu_map[i].ltr_mbox_src));
+
+ __raw_writel(mapping_entry_high,
+ &(krio_priv->regs->rxu_map[i].dest_prom_seg));
+
+ __raw_writel(mapping_entry_qid,
+ &(krio_priv->regs->rxu_map[i].flow_qid));
+
+ return 0;
+}
+
+/**
+ * keystone_rio_open_inb_mbox - Initialize KeyStone inbound mailbox
+ * @mport: Master port implementing the inbound message unit
+ * @dev_id: Device specific pointer to pass on event
+ * @mbox: Mailbox to open
+ * @entries: Number of entries in the inbound mailbox ring
+ *
+ * Initializes buffer ring, request the inbound message interrupt,
+ * and enables the inbound message unit. Returns %0 on success
+ * and %-EINVAL or %-ENOMEM on failure.
+ */
+static int keystone_rio_open_inb_mbox(
+ struct rio_mport *mport,
+ void *dev_id,
+ int mbox,
+ int entries
+)
+{
+ struct keystone_rio_data *krio_priv = mport->priv;
+ struct keystone_rio_mbox_info *rx_mbox = &krio_priv->rx_mbox[mbox];
+ struct keystone_rio_rx_chan_info *krx_chan =
+ &krio_priv->rx_channels[mbox];
+ int res;
+
+ dev_dbg(krio_priv->dev,
+ "open inb mbox: mport = 0x%x, dev_id = 0x%x,"
+ " mbox = %d, entries = %d\n",
+ (u32) mport, (u32) dev_id, mbox, entries);
+
+ /* Check if the port is already registered in this queue */
+ if (rx_mbox->port == mport)
+ return 0;
+
+ /* Initialization of RapidIO inbound MP */
+ if (!(krx_chan->dma_channel)) {
+ res = keystone_rio_mp_inb_init(mbox, krio_priv);
+ if (res)
+ return res;
+ }
+
+ rx_mbox->dev_id = dev_id;
+ rx_mbox->entries = entries;
+ rx_mbox->port = mport;
+ rx_mbox->id = mbox;
+ rx_mbox->running = 1;
+
+ /* Map the mailbox to queue/flow */
+ res = keystone_rio_map_mbox(mbox,
+ krx_chan->queue_num,
+ krx_chan->flow_num,
+ mport->sys_size, krio_priv);
+ if (res)
+ return res;
+
+ return 0;
+}
+
+/**
+ * keystone_rio_close_inb_mbox - Shut down KeyStone inbound mailbox
+ * @mport: Master port implementing the inbound message unit
+ * @mbox: Mailbox to close
+ *
+ * Disables the outbound message unit, stop queues and free all resources
+ */
+static void keystone_rio_close_inb_mbox(struct rio_mport *mport, int mbox)
+{
+ struct keystone_rio_data *krio_priv = mport->priv;
+ struct keystone_rio_mbox_info *rx_mbox = &krio_priv->rx_mbox[mbox];
+
+ dev_info(krio_priv->dev, "close inb mbox: mport = 0x%x, mbox = %d\n",
+ (u32) mport, mbox);
+
+ rx_mbox->running = 0;
+
+ if (!(rx_mbox->port))
+ return;
+
+ rx_mbox->port = NULL;
+
+ /* Release associated resource */
+ keystone_rio_free_rxu_map(rx_mbox->rxu_map_id[0], krio_priv);
+ keystone_rio_free_rxu_map(rx_mbox->rxu_map_id[1], krio_priv);
+
+ keystone_rio_mp_inb_exit(mbox, krio_priv);
+
+ return;
+}
+
+/**
+ * keystone_rio_hw_add_inb_buffer - Add buffer to the KeyStone
+ * inbound message queue
+ * @mport: Master port implementing the inbound message unit
+ * @mbox: Inbound mailbox number
+ * @buf: Buffer to add to inbound queue
+ *
+ * Adds the @buf buffer to the KeyStone inbound message queue. Returns
+ * %0 on success or %-EINVAL on failure.
+ */
+static int keystone_rio_hw_add_inb_buffer(struct rio_mport *mport,
+ int mbox, void *buffer)
+{
+ struct keystone_rio_data *krio_priv = mport->priv;
+ struct keystone_rio_rx_chan_info *krx_chan =
+ &krio_priv->rx_channels[mbox];
+
+ krx_chan->buff_temp = buffer;
+ dma_rxfree_refill(krx_chan->dma_channel);
+
+ return 0;
+}
+
+/**
+ * keystone_rio_hw_get_inb_message - Fetch inbound message from
+ * the KeyStone message unit
+ * @mport: Master port implementing the inbound message unit
+ * @mbox: Inbound mailbox number
+ *
+ * Gets the next available inbound message from the inbound message queue.
+ * A pointer to the message is returned on success or NULL on failure.
+ */
+static void *keystone_rio_hw_get_inb_message(struct rio_mport *mport, int mbox)
+{
+ struct keystone_rio_data *krio_priv = mport->priv;
+ struct keystone_rio_mbox_info *p_mbox = &(krio_priv->rx_mbox[mbox]);
+ struct keystone_rio_packet *p_info;
+ void *buff;
+
+ if (p_mbox->p_info_temp == NULL)
+ return NULL;
+
+ p_info = p_mbox->p_info_temp;
+ buff = p_info->buff;
+
+ p_mbox->p_info_temp = NULL;
+ p_info->buff = NULL;
+
+ dma_unmap_sg(krio_priv->dev, &p_info->sg[2], 1, DMA_FROM_DEVICE);
+ kfree(p_info);
+
+ return buff;
+}
+
+static void keystone_rio_mp_outb_exit(struct keystone_rio_data *krio_priv)
+{
+ if (!(krio_priv->tx_channel))
+ return;
+
+ dmaengine_pause(krio_priv->tx_channel);
+ dma_release_channel(krio_priv->tx_channel);
+ krio_priv->tx_channel = NULL;
+ return;
+}
+
+static int keystone_rio_mp_outb_init(struct keystone_rio_data *krio_priv)
+{
+ struct dma_keystone_info config;
+ dma_cap_mask_t mask;
+ int err = -ENODEV;
+ const char *name;
+
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_SLAVE, mask);
+
+ /* DMA TX channel */
+ name = krio_priv->tx_chan_name;
+ krio_priv->tx_channel = dma_request_channel_by_name(mask, name);
+ if (IS_ERR_OR_NULL(krio_priv->tx_channel)) {
+ dev_err(krio_priv->dev,
+ "Error requesting TX channel, err %d\n", err);
+ goto fail;
+ }
+
+ memset(&config, 0, sizeof(config));
+ config.direction = DMA_MEM_TO_DEV;
+ config.tx_queue_depth = krio_priv->tx_queue_depth;
+ err = dma_keystone_config(krio_priv->tx_channel, &config);
+ if (err) {
+ dev_err(krio_priv->dev,
+ "Error configuring TX channel, err %d\n", err);
+ goto fail;
+ }
+
+ dev_info(krio_priv->dev, "Opened tx channel: %p\n",
+ krio_priv->tx_channel);
+
+ return 0;
+
+fail:
+ if (krio_priv->tx_channel) {
+ dma_release_channel(krio_priv->tx_channel);
+ krio_priv->tx_channel = NULL;
+ }
+
+ return err;
+}
+
+/**
+ * keystone_rio_open_outb_mbox - Initialize KeyStone outbound mailbox
+ * @mport: Master port implementing the outbound message unit
+ * @dev_id: Device specific pointer to pass on event
+ * @mbox: Mailbox to open
+ * @entries: Number of entries in the outbound mailbox ring
+ *
+ * Initializes buffer ring, request the outbound message interrupt,
+ * and enables the outbound message unit. Returns %0 on success and
+ * %-EINVAL or %-ENOMEM on failure.
+ */
+static int keystone_rio_open_outb_mbox (
+ struct rio_mport *mport,
+ void *dev_id,
+ int mbox,
+ int entries
+)
+{
+ struct keystone_rio_data *krio_priv = mport->priv;
+ struct keystone_rio_mbox_info *tx_mbox = &(krio_priv->tx_mbox[mbox]);
+ int res;
+
+ if (mbox >= KEYSTONE_RIO_MAX_MBOX)
+ return -EINVAL;
+
+ dev_dbg(krio_priv->dev,
+ "open_outb_mbox: mport = 0x%x, dev_id = 0x%x,"
+ "mbox = %d, entries = %d\n",
+ (u32) mport, (u32) dev_id, mbox, entries);
+
+ /* Check if already initialized */
+ if (tx_mbox->port == mport)
+ return 0;
+
+ /* Initialization of RapidIO outbound MP */
+ if (!(krio_priv->tx_channel)) {
+ res = keystone_rio_mp_outb_init(krio_priv);
+ if (res)
+ return res;
+ }
+
+ tx_mbox->dev_id = dev_id;
+ tx_mbox->entries = entries;
+ tx_mbox->port = mport;
+ tx_mbox->id = mbox;
+ tx_mbox->slot = 0;
+ tx_mbox->running = 1;
+
+ return 0;
+}
+
+/**
+ * keystone_rio_close_outb_mbox - Shut down KeyStone outbound mailbox
+ * @mport: Master port implementing the outbound message unit
+ * @mbox: Mailbox to close
+ *
+ * Disables the outbound message unit, stop queues and free all resources
+ */
+static void keystone_rio_close_outb_mbox(struct rio_mport *mport, int mbox)
+{
+ struct keystone_rio_data *krio_priv = mport->priv;
+ struct keystone_rio_mbox_info *tx_mbox = &(krio_priv->tx_mbox[mbox]);
+
+ if (mbox >= KEYSTONE_RIO_MAX_MBOX)
+ return;
+
+ dev_info(krio_priv->dev, "close_outb_mbox: mport = 0x%x, mbox = %d\n",
+ (u32) mport, mbox);
+
+ tx_mbox->port = NULL;
+ tx_mbox->running = 0;
+
+ keystone_rio_mp_outb_exit(krio_priv);
+
+ return;
+}
+
+static void keystone_rio_tx_complete(void *data)
+{
+ struct keystone_rio_packet *p_info = data;
+ struct keystone_rio_data *krio_priv = p_info->priv;
+ int mbox_id = p_info->mbox;
+ struct keystone_rio_mbox_info *mbox = &(krio_priv->tx_mbox[mbox_id]);
+ struct rio_mport *port = mbox->port;
+ void *dev_id = mbox->dev_id;
+
+ dev_dbg(krio_priv->dev,
+ "tx_complete: psdata[0] = %08x, psdata[1] = %08x\n",
+ p_info->psdata[0], p_info->psdata[1]);
+
+ p_info->status = dma_async_is_tx_complete(krio_priv->tx_channel,
+ p_info->cookie, NULL, NULL);
+ WARN_ON(p_info->status != DMA_SUCCESS && p_info->status != DMA_ERROR);
+
+ dma_unmap_sg(krio_priv->dev, &p_info->sg[2], 1, DMA_TO_DEVICE);
+
+ if (mbox->running) {
+ /*
+ * Client is in charge of freeing the associated buffers
+ * Because we do not have explicit hardware ring but queues, we
+ * do not know where we are in the sw ring, let use fake slot.
+ * But the semantic hereafter is dangerous in case of re-order:
+ * bad buffer may be released...
+ */
+ port->outb_msg[mbox_id].mcback(port, dev_id,
+ mbox_id, mbox->slot++);
+ if (mbox->slot > mbox->entries)
+ mbox->slot = 0;
+ }
+
+ kfree(p_info);
+}
+
+/**
+ * keystone_rio_hw_add_outb_message - Add a message to the KeyStone
+ * outbound message queue
+ * @mport: Master port with outbound message queue
+ * @rdev: Target of outbound message
+ * @mbox: Outbound mailbox
+ * @buffer: Message to add to outbound queue
+ * @len: Length of message
+ *
+ * Adds the @buffer message to the KeyStone outbound message queue. Returns
+ * %0 on success or %-EBUSY on failure.
+ */
+static int keystone_rio_hw_add_outb_message(
+ struct rio_mport *mport, struct rio_dev *rdev,
+ int mbox, void *buffer, const size_t len)
+{
+ struct keystone_rio_data *krio_priv = mport->priv;
+ struct dma_async_tx_descriptor *desc;
+ struct keystone_rio_packet *p_info;
+ struct dma_device *device;
+ int ret = 0;
+ /* Ensure that the number of bytes being transmitted is a multiple
+ of double-word. This is as per the specification */
+ u32 plen = ((len + 7) & ~0x7);
+
+ p_info = kzalloc(sizeof(*p_info), GFP_ATOMIC);
+ if (!p_info) {
+ dev_warn(krio_priv->dev, "failed to alloc packet info\n");
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ p_info->priv = krio_priv;
+
+ /* Word 1: source id and dest id (common to packet 11 and packet 9) */
+ p_info->psdata[0] = (rdev->destid & 0xffff)
+ | (mport->host_deviceid << 16);
+
+ /* Packet type 11 case (Message) */
+
+ /* Warning - Undocumented HW requirement:
+ For type9, packet type MUST be set to 30 in
+ keystone_hw_desc.desc_info[29:25] bits.
+
+ For type 11, setting packet type to 31 in
+ those bits is optional.
+ */
+
+ /* Word 2: ssize = 32 dword, 4 retries, letter = 0, mbox */
+ p_info->psdata[1] = (KEYSTONE_RIO_MSG_SSIZE << 17) | (4 << 21)
+ | (mbox & 0x3f);
+
+ if (rdev->net->hport->sys_size)
+ p_info->psdata[1] |= KEYSTONE_RIO_DESC_FLAG_TT_16; /* tt */
+
+ dev_dbg(krio_priv->dev,
+ "packet type 11: psdata[0] = %08x, psdata[1] = %08x\n",
+ p_info->psdata[0], p_info->psdata[1]);
+
+ dev_dbg(krio_priv->dev, "buf(len=%d, plen=%d)\n", len, plen);
+
+ p_info->mbox = mbox;
+ p_info->buff = buffer;
+
+ sg_init_table(p_info->sg, KEYSTONE_RIO_SGLIST_SIZE);
+ sg_set_buf(&p_info->sg[0], p_info->epib, sizeof(p_info->epib));
+ sg_set_buf(&p_info->sg[1], p_info->psdata, sizeof(p_info->psdata));
+ sg_set_buf(&p_info->sg[2], buffer, plen);
+
+ p_info->sg_ents = 2 + dma_map_sg(krio_priv->dev, &p_info->sg[2],
+ 1, DMA_TO_DEVICE);
+
+ if (p_info->sg_ents != KEYSTONE_RIO_SGLIST_SIZE) {
+ kfree(p_info);
+ dev_warn(krio_priv->dev, "failed to map transmit packet\n");
+ ret = -ENXIO;
+ goto out;
+ }
+
+ device = krio_priv->tx_channel->device;
+
+ desc = dmaengine_prep_slave_sg(krio_priv->tx_channel, p_info->sg,
+ p_info->sg_ents, DMA_MEM_TO_DEV,
+ DMA_HAS_EPIB | DMA_HAS_PSINFO);
+
+ if (IS_ERR_OR_NULL(desc)) {
+ dma_unmap_sg(krio_priv->dev, &p_info->sg[2], 1, DMA_TO_DEVICE);
+ kfree(p_info);
+ dev_warn(krio_priv->dev, "failed to prep slave dma\n");
+ ret = -ENOBUFS;
+ goto out;
+ }
+
+ desc->callback_param = p_info;
+ desc->callback = keystone_rio_tx_complete;
+ p_info->cookie = dmaengine_submit(desc);
+
+out:
+ return ret;
+}
+
+/*------------------------ Main Linux driver functions -----------------------*/
+
+struct rio_mport *keystone_rio_register_mport(u32 port_id, u32 size,
+ struct keystone_rio_data *krio_priv)
+{
+ struct rio_ops *ops;
+ struct rio_mport *port;
+
+ ops = kzalloc(sizeof(struct rio_ops), GFP_KERNEL);
+
+ ops->lcread = keystone_local_config_read;
+ ops->lcwrite = keystone_local_config_write;
+ ops->cread = keystone_rio_config_read;
+ ops->cwrite = keystone_rio_config_write;
+
+ ops->open_outb_mbox = keystone_rio_open_outb_mbox;
+ ops->close_outb_mbox = keystone_rio_close_outb_mbox;
+ ops->open_inb_mbox = keystone_rio_open_inb_mbox;
+ ops->close_inb_mbox = keystone_rio_close_inb_mbox;
+ ops->add_outb_message = keystone_rio_hw_add_outb_message;
+ ops->add_inb_buffer = keystone_rio_hw_add_inb_buffer;
+ ops->get_inb_message = keystone_rio_hw_get_inb_message;
+
+ port = kzalloc(sizeof(struct rio_mport), GFP_KERNEL);
+ port->id = port_id;
+ port->index = port_id;
+ port->priv = krio_priv;
+ INIT_LIST_HEAD(&port->dbells);
+
+ /* Make a dummy per port region as ports are not
+ really separated on KeyStone */
+ port->iores.start = krio_priv->board_rio_cfg.rio_regs_base +
+ (u32)(krio_priv->serial_port_regs) +
+ offsetof(struct keystone_rio_serial_port_regs,
+ sp[port_id].link_maint_req);
+
+ port->iores.end = krio_priv->board_rio_cfg.rio_regs_base +
+ (u32)(krio_priv->serial_port_regs) +
+ offsetof(struct keystone_rio_serial_port_regs,
+ sp[port_id].ctl);
+
+ port->iores.flags = IORESOURCE_MEM;
+
+ rio_init_dbell_res(&port->riores[RIO_DOORBELL_RESOURCE], 0, 0xffff);
+ rio_init_mbox_res(&port->riores[RIO_INB_MBOX_RESOURCE], 0, 0);
+ rio_init_mbox_res(&port->riores[RIO_OUTB_MBOX_RESOURCE], 0, 0);
+
+ sprintf(port->name, "RIO%d mport", port_id);
+
+ port->ops = ops;
+ port->sys_size = size;
+ port->phy_type = RIO_PHY_SERIAL;
+ /* Hard coded here because in rio_disc_mport(), it is used in
+ rio_enum_complete() before it is retrieved in
+ rio_disc_peer() => rio_setup_device() */
+ port->phys_efptr = 0x100;
+
+ rio_register_mport(port);
+
+ return port;
+}
+
+
+static void keystone_rio_get_controller_defaults(struct device_node *node,
+ struct keystone_rio_data *krio_priv)
+{
+ struct keystone_rio_board_controller_info *c =
+ &krio_priv->board_rio_cfg;
+ struct keystone_rio_rx_chan_info *krx_chan;
+ u32 temp[15];
+ int i;
+
+ if(of_property_read_u32_array (node, "reg", (u32 *)&(temp[0]), 4)) {
+ dev_err(krio_priv->dev, "Could not get default reg\n");
+ } else {
+ c->rio_regs_base = temp[0];
+ c->rio_regs_size = temp[1];
+ c->boot_cfg_regs_base = temp[2];
+ c->boot_cfg_regs_size = temp[3];
+ }
+
+ if(of_property_read_u32 (node, "dev-id-size", (u32 *)&(c->size))) {
+ dev_err(krio_priv->dev, "Could not get default dev-id-size\n");
+ }
+
+ if(of_property_read_u32 (node, "ports", (u32 *)&(c->ports))) {
+ dev_err(krio_priv->dev, "Could not get default ports\n");
+ }
+
+ /* Serdes config */
+ c->serdes_config_num = 1; /* total number of serdes_config[] entries */
+ c->mode = 0; /* default serdes_config[] entry to be used */
+
+ /* Mode 0: sRIO config 0: MPY = 5x, div rate = half,
+ link rate = 3.125 Gbps, mode 1x */
+ c->serdes_config[0].cfg_cntl = 0x0c053860; /* setting control register config */
+ c->serdes_config[0].serdes_cfg_pll = 0x0229; /* SerDes PLL configuration */
+ c->serdes_config[0].prescalar_srv_clk = 0x001e; /* prescalar_srv_clk */
+
+ /* serdes rx_chan_config */
+ for (i=0; i<KEYSTONE_RIO_MAX_PORT; i++) {
+ c->serdes_config[0].rx_chan_config[i] = 0x00440495;
+ }
+
+ /* serdes tx_chan_config */
+ for (i=0; i<KEYSTONE_RIO_MAX_PORT; i++) {
+ c->serdes_config[0].tx_chan_config[i] = 0x00180795;
+ }
+
+ /* path_mode */
+ for (i=0; i<KEYSTONE_RIO_MAX_PORT; i++) {
+ c->serdes_config[0].path_mode[i] = 0x00000000;
+ }
+
+ /* DMA tx chan config */
+ if (of_property_read_string(node, "tx_channel",
+ &krio_priv->tx_chan_name) < 0){
+ dev_err(krio_priv->dev,
+ "missing \"tx_channel\" parameter\n");
+ krio_priv->tx_chan_name = "riotx";
+ }
+
+ if (of_property_read_u32(node, "tx_queue_depth",
+ &krio_priv->tx_queue_depth) < 0) {
+ dev_err(krio_priv->dev,
+ "missing \"tx_queue_depth\" parameter\n");
+ krio_priv->tx_queue_depth = 128;
+ }
+
+ /* DMA rx chan config */
+ krx_chan = &(krio_priv->rx_channels[0]);
+ if (of_property_read_string(node, "rx_channel", &krx_chan->name) < 0){
+ dev_err(krio_priv->dev,
+ "missing \"rx_channel\" parameter\n");
+ krx_chan->name = "riorx";
+ }
+
+ if (of_property_read_u32_array(node, "rx_queue_depth",
+ krx_chan->queue_depths, KEYSTONE_QUEUES_PER_CHAN) < 0) {
+ dev_err(krio_priv->dev,
+ "missing \"rx_queue_depth\" parameter\n");
+ krx_chan->queue_depths[0] = 128;
+ }
+
+ if (of_property_read_u32_array(node, "rx_buffer_size",
+ krx_chan->buffer_sizes, KEYSTONE_QUEUES_PER_CHAN) < 0) {
+ dev_err(krio_priv->dev,
+ "missing \"rx_buffer_size\" parameter\n");
+ krx_chan->buffer_sizes[0] = 1552;
+ }
+}
+
+static void keystone_rio_port_status_timer(unsigned long data)
+{
+ struct keystone_rio_data *krio_priv = (struct keystone_rio_data *)data;
+ u32 ports = krio_priv->ports_registering;
+
+ if ((krio_priv->port_chk_cnt)++ >= 90) {
+ dev_warn(krio_priv->dev,
+ "RIO port register timeout, ports %08x not ready\n",
+ ports);
+ return;
+ }
+
+ schedule_work(&krio_priv->port_chk_task);
+}
+
+static void keystone_rio_port_chk_task(struct work_struct *work)
+{
+ struct keystone_rio_data *krio_priv =
+ container_of(work, struct keystone_rio_data, port_chk_task);
+
+ u32 ports = krio_priv->ports_registering;
+ u32 size = krio_priv->board_rio_cfg.size;
+ struct rio_mport *mport;
+#ifdef CONFIG_RIONET
+ int has_port_ready = 0;
+#endif
+
+ krio_priv->ports_registering = 0;
+ while (ports) {
+ int status;
+ u32 port = __ffs(ports);
+ ports &= ~(1 << port);
+
+ status = keystone_rio_port_status(port, krio_priv);
+ if (status == 0) {
+ /* Register this port */
+ mport = keystone_rio_register_mport(port,
+ size, krio_priv);
+ if (!mport)
+ return;
+
+ /* link is up, clear all errors */
+ __raw_writel(0x00000000,
+ &krio_priv->err_mgmt_regs->err_det);
+ __raw_writel(0x00000000,
+ &(krio_priv->err_mgmt_regs->sp_err[port].det));
+ __raw_writel(
+ __raw_readl(&(krio_priv->serial_port_regs->
+ sp[port].err_stat)),
+ &(krio_priv->serial_port_regs->
+ sp[port].err_stat));
+
+#ifdef CONFIG_RIONET
+ has_port_ready = 1;
+#endif
+
+ dev_info(krio_priv->dev,
+ "RIO: port RIO%d host_deviceid %d registered\n",
+ port, mport->host_deviceid);
+ } else {
+ krio_priv->ports_registering = (1 << port);
+
+ dev_dbg(krio_priv->dev, "RIO: port %d not ready\n",
+ port);
+ }
+ }
+
+ if (krio_priv->ports_registering) {
+ /* setup and start a timer to poll status */
+ krio_priv->timer.function = keystone_rio_port_status_timer;
+ krio_priv->timer.data = (unsigned long)krio_priv;
+ krio_priv->timer.expires = jiffies +
+ KEYSTONE_RIO_REGISTER_DELAY;
+ add_timer(&krio_priv->timer);
+ }
+#ifdef CONFIG_RIONET
+ else if (has_port_ready) {
+ rionet_init();
+ krio_priv->rionet_started = 1;
+ }
+#endif
+}
+
+/*
+ * Platform configuration setup
+ */
+static int __devinit keystone_rio_setup_controller(struct platform_device *pdev,
+ struct keystone_rio_data *krio_priv)
+{
+ u32 ports;
+ u32 p;
+ u32 mode;
+ u32 size = 0;
+ int res = 0;
+#ifdef CONFIG_RIONET
+ int has_port_ready = 0;
+#endif
+ struct rio_mport *mport;
+
+ size = krio_priv->board_rio_cfg.size;
+ ports = krio_priv->board_rio_cfg.ports;
+ mode = krio_priv->board_rio_cfg.mode;
+
+ dev_dbg(&pdev->dev, "size = %d, ports = 0x%x, mode = %d\n",
+ size, ports, mode);
+
+ if (mode >= krio_priv->board_rio_cfg.serdes_config_num) {
+ mode = 0;
+ dev_warn(&pdev->dev,
+ "RIO: invalid port mode, forcing it to %d\n", mode);
+ }
+
+ /* Hardware set up of the controller */
+ keystone_rio_hw_init(mode, krio_priv);
+
+ /*
+ * Configure all ports even if we do not use all of them.
+ * This is needed for 2x and 4x modes.
+ */
+ for (p = 0; p < KEYSTONE_RIO_MAX_PORT; p++) {
+ res = keystone_rio_port_init(p, mode, krio_priv);
+ if (res < 0) {
+ dev_err(&pdev->dev,
+ "RIO: initialization of port %d failed\n", p);
+ return res;
+ }
+ }
+
+ /* Start the controller */
+ keystone_rio_start(krio_priv);
+
+ /* Use and check ports status (but only the requested ones) */
+ krio_priv->ports_registering = 0;
+ while (ports) {
+ int status;
+ u32 port = __ffs(ports);
+ ports &= ~(1 << port);
+
+ /* Start the port */
+ keystone_rio_port_activate(port, krio_priv);
+
+ /*
+ * Check the port status here before calling the generic RapidIO
+ * layer. Port status check is done in rio_mport_is_active() as
+ * well but we need to do it our way first due to some delays in
+ * hw initialization.
+ */
+ status = keystone_rio_port_status(port, krio_priv);
+ if (status == 0) {
+ /* Register this port */
+ mport = keystone_rio_register_mport(port, size,
+ krio_priv);
+ if (!mport)
+ goto out;
+
+#ifdef CONFIG_RIONET
+ has_port_ready = 1;
+#endif
+
+ dev_info(&pdev->dev,
+ "RIO: port RIO%d host_deviceid %d registered\n",
+ port, mport->host_deviceid);
+ } else {
+ dev_warn(&pdev->dev, "RIO: port %d not ready\n", port);
+ krio_priv->ports_registering = (1 << port);
+ }
+ }
+
+ if (krio_priv->ports_registering) {
+ /* setup and start a timer to poll status */
+ init_timer(&krio_priv->timer);
+ krio_priv->port_chk_cnt = 0;
+ krio_priv->timer.function = keystone_rio_port_status_timer;
+ krio_priv->timer.data = (unsigned long)krio_priv;
+ krio_priv->timer.expires = jiffies +
+ KEYSTONE_RIO_REGISTER_DELAY;
+ add_timer(&krio_priv->timer);
+ }
+#ifdef CONFIG_RIONET
+ else if (has_port_ready) {
+ rionet_init();
+ krio_priv->rionet_started = 1;
+ }
+#endif
+
+out:
+ return res;
+}
+
+static int __init keystone_rio_probe(struct platform_device *pdev)
+{
+ struct device_node *node = pdev->dev.of_node;
+ struct keystone_rio_data *krio_priv;
+ int res = 0;
+ void __iomem *regs;
+
+ if (!node) {
+ dev_err(&pdev->dev, "could not find device info\n");
+ return -EINVAL;
+ }
+
+ krio_priv = kzalloc(sizeof(struct keystone_rio_data), GFP_KERNEL);
+ if (!krio_priv) {
+ dev_err(&pdev->dev, "memory allocation failed\n");
+ return -ENOMEM;
+ }
+
+ platform_set_drvdata(pdev, krio_priv);
+ krio_priv->dev = &(pdev->dev);
+
+ krio_priv->rionet_started = 0;
+
+ /* Get default config from device tree */
+ keystone_rio_get_controller_defaults(node, krio_priv);
+
+ /* sRIO main driver (global ressources) */
+ mutex_init(&krio_priv->lsu_lock);
+ init_completion(&krio_priv->lsu_completion);
+ INIT_WORK(&krio_priv->port_chk_task, keystone_rio_port_chk_task);
+
+ regs = ioremap(krio_priv->board_rio_cfg.boot_cfg_regs_base,
+ krio_priv->board_rio_cfg.boot_cfg_regs_size);
+ krio_priv->jtagid_reg = regs + 0x0018;
+ krio_priv->serdes_sts_reg = regs + 0x154;
+ krio_priv->serdes_regs = regs + 0x360;
+
+ regs = ioremap(krio_priv->board_rio_cfg.rio_regs_base,
+ krio_priv->board_rio_cfg.rio_regs_size);
+ krio_priv->regs = regs;
+ krio_priv->car_csr_regs = regs + 0xb000;
+ krio_priv->serial_port_regs = regs + 0xb100;
+ krio_priv->err_mgmt_regs = regs + 0xc000;
+ krio_priv->phy_regs = regs + 0x1b000;
+ krio_priv->transport_regs = regs + 0x1b300;
+ krio_priv->pkt_buf_regs = regs + 0x1b600;
+ krio_priv->evt_mgmt_regs = regs + 0x1b900;
+ krio_priv->port_write_regs = regs + 0x1ba00;
+ krio_priv->link_regs = regs + 0x1bd00;
+ krio_priv->fabric_regs = regs + 0x1be00;
+ krio_priv->car_csr_regs_base = (u32)regs + 0xb000;
+
+ /* Enable srio clock */
+ krio_priv->clk = clk_get(&pdev->dev, "clk_srio");
+ if (IS_ERR(krio_priv->clk)) {
+ dev_err(&pdev->dev, "Unable to get Keystone SRIO clock\n");
+ return -EBUSY;
+ }
+ else {
+ clk_prepare_enable(krio_priv->clk);
+ ndelay(100);
+ clk_disable_unprepare(krio_priv->clk);
+ ndelay(100);
+ clk_prepare_enable(krio_priv->clk);
+ }
+
+ dev_info(&pdev->dev, "KeyStone RapidIO driver %s\n", DRIVER_VER);
+
+ /* Setup the sRIO controller */
+ res = keystone_rio_setup_controller(pdev, krio_priv);
+ if (res < 0)
+ return res;
+
+ return 0;
+}
+
+static void keystone_rio_shutdown(struct platform_device *pdev)
+{
+ struct keystone_rio_data *krio_priv = platform_get_drvdata(pdev);
+ int i;
+
+#ifdef CONFIG_RIONET
+ if (krio_priv->rionet_started)
+ rionet_exit();
+#endif
+
+ keystone_rio_mp_outb_exit(krio_priv);
+
+ for (i=0; i<KEYSTONE_RIO_MAX_MBOX; i++)
+ keystone_rio_mp_inb_exit(i, krio_priv);
+
+ mdelay(1000);
+
+ /* disable blocks */
+ __raw_writel(0, &krio_priv->regs->gbl_en);
+ for (i = 0; i <= KEYSTONE_RIO_BLK_NUM; i++)
+ __raw_writel(0, &(krio_priv->regs->blk[i].enable));
+
+ if (krio_priv->clk) {
+ clk_disable_unprepare(krio_priv->clk);
+ clk_put(krio_priv->clk);
+ }
+
+ platform_set_drvdata(pdev, NULL);
+
+ kfree(krio_priv);
+}
+
+static int __exit keystone_rio_remove(struct platform_device *pdev)
+{
+ keystone_rio_shutdown(pdev);
+ return 0;
+}
+
+static struct of_device_id __devinitdata of_match[] = {
+ { .compatible = "ti,keystone-rapidio", },
+ {},
+};
+
+MODULE_DEVICE_TABLE(of, keystone_hwqueue_of_match);
+
+static struct platform_driver keystone_rio_driver = {
+ .driver = {
+ .name = "keystone-rapidio",
+ .owner = THIS_MODULE,
+ .of_match_table = of_match,
+ },
+ .probe = keystone_rio_probe,
+ .remove = __exit_p(keystone_rio_remove),
+ .shutdown = keystone_rio_shutdown,
+};
+
+static int keystone_rio_module_init(void)
+{
+ return platform_driver_register(&keystone_rio_driver);
+}
+
+static void __exit keystone_rio_module_exit(void)
+{
+ platform_driver_unregister(&keystone_rio_driver);
+}
+
+module_init(keystone_rio_module_init);
+module_exit(keystone_rio_module_exit);
+
+MODULE_AUTHOR("Aurelien Jacquiot");
+MODULE_DESCRIPTION("TI KeyStone RapidIO device driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/rapidio/devices/keystone_rio.h b/drivers/rapidio/devices/keystone_rio.h
new file mode 100644
index 00000000000000..898c5de60fc0fa
--- /dev/null
+++ b/drivers/rapidio/devices/keystone_rio.h
@@ -0,0 +1,624 @@
+/*
+ * Copyright (C) 2010, 2011, 2012 Texas Instruments Incorporated
+ * Author: Aurelien Jacquiot <a-jacquiot@ti.com>
+ *
+ * Copyright (C) 2012 Texas Instruments Incorporated
+ * WingMan Kwok <w-kwok2@ti.com>
+ * - Updated for support on TI KeyStone platform.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef KEYSTONE_RIO_H
+#define KEYSTONE_RIO_H
+
+#include <asm/setup.h>
+#include <asm/cache.h>
+#include <asm/uaccess.h>
+#include <asm/irq.h>
+#include <asm/io.h>
+
+#define KEYSTONE_RIO_MAP_FLAG_SEGMENT BIT(0)
+#define KEYSTONE_RIO_MAP_FLAG_SRC_PROMISC BIT(1)
+#define KEYSTONE_RIO_MAP_FLAG_TT_16 BIT(13)
+#define KEYSTONE_RIO_MAP_FLAG_DST_PROMISC BIT(15)
+#define KEYSTONE_RIO_DESC_FLAG_TT_16 BIT(9)
+
+#define KEYSTONE_RIO_BOOT_COMPLETE BIT(24)
+#define KEYSTONE_RIO_PER_EN BIT(2)
+#define KEYSTONE_RIO_PER_FREE BIT(0)
+#define KEYSTONE_RIO_PEF_FLOW_CONTROL BIT(7)
+
+/*
+ * Packet types
+ */
+#define KEYSTONE_RIO_PACKET_TYPE_NREAD 0x24
+#define KEYSTONE_RIO_PACKET_TYPE_NWRITE 0x54
+#define KEYSTONE_RIO_PACKET_TYPE_NWRITE_R 0x55
+#define KEYSTONE_RIO_PACKET_TYPE_SWRITE 0x60
+#define KEYSTONE_RIO_PACKET_TYPE_DBELL 0xa0
+#define KEYSTONE_RIO_PACKET_TYPE_MAINT_R 0x80
+#define KEYSTONE_RIO_PACKET_TYPE_MAINT_W 0x81
+#define KEYSTONE_RIO_PACKET_TYPE_MAINT_RR 0x82
+#define KEYSTONE_RIO_PACKET_TYPE_MAINT_WR 0x83
+#define KEYSTONE_RIO_PACKET_TYPE_MAINT_PW 0x84
+
+/*
+ * LSU defines
+ */
+#define KEYSTONE_RIO_LSU_PRIO 0
+
+#define KEYSTONE_RIO_LSU_BUSY_MASK BIT(31)
+#define KEYSTONE_RIO_LSU_FULL_MASK BIT(30)
+
+#define KEYSTONE_RIO_LSU_CC_MASK 0x0f
+#define KEYSTONE_RIO_LSU_CC_TIMEOUT 0x01
+#define KEYSTONE_RIO_LSU_CC_XOFF 0x02
+#define KEYSTONE_RIO_LSU_CC_ERROR 0x03
+#define KEYSTONE_RIO_LSU_CC_INVALID 0x04
+#define KEYSTONE_RIO_LSU_CC_DMA 0x05
+#define KEYSTONE_RIO_LSU_CC_RETRY 0x06
+#define KEYSTONE_RIO_LSU_CC_CANCELED 0x07
+
+/* Mask for receiving both error and good completion LSU interrupts */
+#define KEYSTONE_RIO_ICSR_LSU0(src_id) ((0x10001) << (src_id))
+
+/*
+ * Various RIO defines
+ */
+#define KEYSTONE_RIO_TIMEOUT_CNT 1000
+#define KEYSTONE_RIO_REGISTER_DELAY (2*HZ)
+
+/*
+ * RIO error, reset and special event interrupt defines
+ */
+#define KEYSTONE_RIO_PORT_ERROR_OUT_PKT_DROP BIT(26)
+#define KEYSTONE_RIO_PORT_ERROR_OUT_FAILED BIT(25)
+#define KEYSTONE_RIO_PORT_ERROR_OUT_DEGRADED BIT(24)
+#define KEYSTONE_RIO_PORT_ERROR_OUT_RETRY BIT(20)
+#define KEYSTONE_RIO_PORT_ERROR_OUT_ERROR BIT(17)
+#define KEYSTONE_RIO_PORT_ERROR_IN_ERROR BIT(9)
+#define KEYSTONE_RIO_PORT_ERROR_PW_PENDING BIT(4)
+#define KEYSTONE_RIO_PORT_ERROR_PORT_ERR BIT(2)
+
+#define KEYSTONE_RIO_PORT_ERROR_MASK \
+ (KEYSTONE_RIO_PORT_ERROR_OUT_PKT_DROP |\
+ KEYSTONE_RIO_PORT_ERROR_OUT_FAILED |\
+ KEYSTONE_RIO_PORT_ERROR_OUT_DEGRADED |\
+ KEYSTONE_RIO_PORT_ERROR_OUT_RETRY |\
+ KEYSTONE_RIO_PORT_ERROR_OUT_ERROR |\
+ KEYSTONE_RIO_PORT_ERROR_IN_ERROR |\
+ KEYSTONE_RIO_PORT_ERROR_PW_PENDING |\
+ KEYSTONE_RIO_PORT_ERROR_PORT_ERR)
+
+
+#define KEYSTONE_RIO_SP_HDR_NEXT_BLK_PTR 0x1000
+#define KEYSTONE_RIO_SP_HDR_EP_REC_ID 0x0002
+#define KEYSTONE_RIO_ERR_HDR_NEXT_BLK_PTR 0x3000
+#define KEYSTONE_RIO_ERR_EXT_FEAT_ID 0x0007
+
+/*
+ * RapidIO global definitions
+ */
+#define KEYSTONE_RIO_MAX_PORT 4
+#define KEYSTONE_RIO_BLK_NUM 9
+#define KEYSTONE_RIO_MAX_MBOX 4 /* 4 in multi-segment, 64 in single-segment */
+
+#define KEYSTONE_RIO_MAINT_BUF_SIZE 64
+#define KEYSTONE_RIO_MSG_SSIZE 0xe
+#define KEYSTONE_RIO_SGLIST_SIZE 3
+
+/*
+ * Dev Id and dev revision
+ */
+#define KEYSTONE_RIO_DEV_ID_VAL ((((__raw_readl(krio_priv->jtagid_reg)) << 4) & 0xffff0000) | 0x30)
+#define KEYSTONE_RIO_DEV_INFO_VAL (((__raw_readl(krio_priv->jtagid_reg)) >> 28) & 0xf)
+#define KEYSTONE_RIO_ID_TI (0x00000030)
+#define KEYSTONE_RIO_EXT_FEAT_PTR (0x00000100)
+
+/*
+ * Maximum message size fo RIONET
+ */
+#define MACH_RIO_MAX_MSG_SIZE 1552
+
+/*
+ * Definition of the different RapidIO packet types according to the RapidIO
+ * specification 2.0
+ */
+#define RIO_PACKET_TYPE_STREAM 9 /* Data Streaming */
+#define RIO_PACKET_TYPE_MESSAGE 11 /* Message */
+
+/*
+ * SerDes configurations
+ */
+struct keystone_serdes_config {
+ u32 cfg_cntl; /* setting control register config */
+ u16 serdes_cfg_pll; /* SerDes PLL configuration */
+ u16 prescalar_srv_clk; /* prescalar fo ip_clk */
+ u32 rx_chan_config[KEYSTONE_RIO_MAX_PORT]; /* SerDes receive channel configuration (per-port) */
+ u32 tx_chan_config[KEYSTONE_RIO_MAX_PORT]; /* SerDes transmit channel configuration (per-port) */
+ u32 path_mode[KEYSTONE_RIO_MAX_PORT]; /* path config for SerDes */
+};
+
+/*
+ * Per board RIO devices controller configuration
+ */
+struct keystone_rio_board_controller_info {
+ u32 rio_regs_base;
+ u32 rio_regs_size;
+
+ u32 boot_cfg_regs_base;
+ u32 boot_cfg_regs_size;
+
+ u16 ports; /* bitfield of port(s) to probe on this controller */
+ u16 mode; /* hw mode (default serdes config). index into serdes_config[] */
+ u16 id; /* host id */
+ u16 size; /* RapidIO common transport system size.
+ * 0 - Small size. 256 devices.
+ * 1 - Large size, 65536 devices. */
+ u16 serdes_config_num;
+ struct keystone_serdes_config serdes_config[4];
+};
+
+struct keystone_rio_data;
+
+struct keystone_rio_packet {
+ struct scatterlist sg[KEYSTONE_RIO_SGLIST_SIZE];
+ int sg_ents;
+ u32 epib[4];
+ u32 psdata[2];
+ u32 mbox;
+ void *buff;
+ struct keystone_rio_data *priv;
+ enum dma_status status;
+ dma_cookie_t cookie;
+};
+
+struct keystone_rio_mbox_info {
+ struct rio_mport *port;
+ u32 id; /* mbox */
+ u32 running;
+ u32 entries;
+ u32 slot;
+ void *dev_id;
+ int rxu_map_id[2];
+ struct keystone_rio_packet *p_info_temp;
+};
+
+struct keystone_rio_rx_chan_info {
+ struct keystone_rio_data *priv;
+ struct dma_chan *dma_channel;
+ const char *name;
+ u32 queue_depths[KEYSTONE_QUEUES_PER_CHAN];
+ u32 buffer_sizes[KEYSTONE_QUEUES_PER_CHAN];
+ int chan_num; /* idx in rx_channels[] */
+ int queue_num; /* rx complete queue */
+ int flow_num;
+
+ void *buff_temp; /* temp storage when adding inb buffer */
+};
+
+/*
+ * RapidIO Registers
+ */
+
+struct keystone_srio_serdes_regs {
+ u32 pll;
+
+ struct {
+ u32 rx;
+ u32 tx;
+ } channel[4];
+};
+
+/* RIO Registers 0000 - 2fff */
+struct keystone_rio_regs {
+/* Required Peripheral Registers */
+ u32 pid; /* 0000 */
+ u32 pcr; /* 0004 */
+ u32 __rsvd0[3]; /* 0008 - 0010 */
+
+/* Peripheral Settting Control Registers */
+ u32 per_set_cntl; /* 0014 */
+ u32 per_set_cntl1; /* 0018 */
+
+ u32 __rsvd1[2]; /* 001c - 0020 */
+
+ u32 gbl_en; /* 0024 */
+ u32 gbl_en_stat; /* 0028 */
+
+ struct {
+ u32 enable; /* 002c */
+ u32 status; /* 0030 */
+ } blk[10]; /* 002c - 0078 */
+
+ /* ID Registers */
+ u32 __rsvd2[17]; /* 007c - 00bc */
+ u32 multiid_reg[8]; /* 00c0 - 00dc */
+
+/* Hardware Packet Forwarding Registers */
+ struct {
+ u32 pf_16b;
+ u32 pf_8b;
+ } pkt_fwd_cntl[8]; /* 00e0 - 011c */
+
+ u32 __rsvd3[24]; /* 0120 - 017c */
+
+/* Interrupt Registers */
+ struct {
+ u32 status;
+ u32 __rsvd0;
+ u32 clear;
+ u32 __rsvd1;
+ } doorbell_int[4]; /* 0180 - 01bc */
+
+ struct {
+ u32 status;
+ u32 __rsvd0;
+ u32 clear;
+ u32 __rsvd1;
+ } lsu_int[2]; /* 01c0 - 01dc */
+
+ u32 err_rst_evnt_int_stat; /* 01e0 */
+ u32 __rsvd4;
+ u32 err_rst_evnt_int_clear; /* 01e8 */
+ u32 __rsvd5;
+
+ u32 __rsvd6[4]; /* 01f0 - 01fc */
+
+ struct {
+ u32 route; /* 0200 */
+ u32 route2; /* 0204 */
+ u32 __rsvd; /* 0208 */
+ } doorbell_int_route[4]; /* 0200 - 022c */
+
+ u32 lsu0_int_route[4]; /* 0230 - 023c */
+ u32 lsu1_int_route1; /* 0240 */
+
+ u32 __rsvd7[3]; /* 0244 - 024c */
+
+ u32 err_rst_evnt_int_route[3]; /* 0250 - 0258 */
+
+ u32 __rsvd8[2]; /* 025c - 0260 */
+
+ u32 interupt_ctl; /* 0264 */
+
+ u32 __rsvd9[26]; /* 0268, 026c, 0270 - 02cc */
+
+ u32 intdst_rate_cntl[16]; /* 02d0 - 030c */
+ u32 intdst_rate_disable; /* 0310 */
+
+ u32 __rsvd10[59]; /* 0314 - 03fc */
+
+/* RXU Registers */
+ struct {
+ u32 ltr_mbox_src;
+ u32 dest_prom_seg;
+ u32 flow_qid;
+ } rxu_map[64]; /* 0400 - 06fc */
+
+ struct {
+ u32 cos_src;
+ u32 dest_prom;
+ u32 stream;
+ } rxu_type9_map[64]; /* 0700 - 09fc */
+
+ u32 __rsvd11[192]; /* 0a00 - 0cfc */
+
+/* LSU/MAU Registers */
+ struct {
+ u32 addr_msb; /* 0d00 */
+ u32 addr_lsb_cfg_ofs; /* 0d04 */
+ u32 dsp_addr; /* 0d08 */
+ u32 dbell_val_byte_cnt; /* 0d0c */
+ u32 destid; /* 0d10 */
+ u32 dbell_info_fttype; /* 0d14 */
+ u32 busy_full; /* 0d18 */
+ } lsu_reg[8]; /* 0d00 - 0ddc */
+
+ u32 lsu_setup_reg[2]; /* 0de0 - 0de4 */
+ u32 lsu_stat_reg[6]; /* 0de8 - 0dfc */
+ u32 lsu_flow_masks[4]; /* 0e00 - 0e0c */
+
+ u32 __rsvd12[16]; /* 0e10 - 0e4c */
+
+/* Flow Control Registers */
+ u32 flow_cntl[16]; /* 0e50 - 0e8c */
+ u32 __rsvd13[8]; /* 0e90 - 0eac */
+
+/* TXU Registers 0eb0 - 0efc */
+ u32 tx_cppi_flow_masks[8]; /* 0eb0 - 0ecc */
+ u32 tx_queue_sch_info[4]; /* 0ed0 - 0edc */
+ u32 garbage_coll_qid[3]; /* 0ee0 - 0ee8 */
+
+ u32 __rsvd14[69]; /* 0eec, 0ef0 - 0ffc */
+
+};
+
+/* CDMAHP Registers 1000 - 2ffc */
+struct keystone_rio_pktdma_regs {
+ u32 __rsvd[2048]; /* 1000 - 2ffc */
+};
+
+/* CSR/CAR Registers b000+ */
+struct keystone_rio_car_csr_regs {
+ u32 dev_id; /* b000 */
+ u32 dev_info; /* b004 */
+ u32 assembly_id; /* b008 */
+ u32 assembly_info; /* b00c */
+ u32 pe_feature; /* b010 */
+
+ u32 sw_port; /* b014 */
+
+ u32 src_op; /* b018 */
+ u32 dest_op; /* b01c */
+
+ u32 __rsvd1[7]; /* b020 - b038 */
+
+ u32 data_stm_info; /* b03c */
+
+ u32 __rsvd2[2]; /* b040 - b044 */
+
+ u32 data_stm_logical_ctl; /* b048 */
+ u32 pe_logical_ctl; /* b04c */
+
+ u32 __rsvd3[2]; /* b050 - b054 */
+
+ u32 local_cfg_hbar; /* b058 */
+ u32 local_cfg_bar; /* b05c */
+
+ u32 base_dev_id; /* b060 */
+ u32 __rsvd4;
+ u32 host_base_id_lock; /* b068 */
+ u32 component_tag; /* b06c */
+ /* b070 - b0fc */
+};
+
+struct keystone_rio_serial_port_regs {
+ u32 sp_maint_blk_hdr; /* b100 */
+ u32 __rsvd6[7]; /* b104 - b11c */
+
+ u32 sp_link_timeout_ctl; /* b120 */
+ u32 sp_rsp_timeout_ctl; /* b124 */
+ u32 __rsvd7[5]; /* b128 - b138 */
+ u32 sp_gen_ctl; /* b13c */
+
+ struct {
+ u32 link_maint_req; /* b140 */
+ u32 link_maint_resp;/* b144 */
+ u32 ackid_stat; /* b148 */
+ u32 __rsvd[2]; /* b14c - b150 */
+ u32 ctl2; /* b154 */
+ u32 err_stat; /* b158 */
+ u32 ctl; /* b15c */
+ } sp[4]; /* b140 - b1bc */
+
+ /* b1c0 - bffc */
+};
+
+struct keystone_rio_err_mgmt_regs {
+ u32 err_report_blk_hdr; /* c000 */
+ u32 __rsvd9;
+ u32 err_det; /* c008 */
+ u32 err_en; /* c00c */
+ u32 h_addr_capt; /* c010 */
+ u32 addr_capt; /* c014 */
+ u32 id_capt; /* c018 */
+ u32 ctrl_capt; /* c01c */
+ u32 __rsvd10[2]; /* c020 - c024 */
+ u32 port_write_tgt_id; /* c028 */
+ u32 __rsvd11[5]; /* c02c - c03c */
+
+ struct {
+ u32 det; /* c040 */
+ u32 rate_en; /* c044 */
+ u32 attr_capt_dbg0; /* c048 */
+ u32 capt_0_dbg1; /* c04c */
+ u32 capt_1_dbg2; /* c050 */
+ u32 capt_2_dbg3; /* c054 */
+ u32 capt_3_dbg4; /* c058 */
+ u32 __rsvd0[3]; /* c05c - c064 */
+ u32 rate; /* c068 */
+ u32 thresh; /* c06c */
+ u32 __rsvd1[4]; /* c070 - c07c */
+ } sp_err[4]; /* c040 - c13c */
+
+ u32 __rsvd12[1972]; /* c140 - e00c */
+
+ struct {
+ u32 stat0; /* e010 */
+ u32 stat1; /* e014 */
+ u32 __rsvd[6]; /* e018 - e02c */
+ } lane_stat[4]; /* e010 - e08c */
+
+ /* e090 - 1affc */
+};
+
+struct keystone_rio_phy_layer_regs {
+ u32 phy_blk_hdr; /* 1b000 */
+ u32 __rsvd14[31]; /* 1b004 - 1b07c */
+ struct {
+ u32 imp_spec_ctl; /* 1b080 */
+ u32 pwdn_ctl; /* 1b084 */
+ u32 __rsvd0[2];
+
+ u32 status; /* 1b090 */
+ u32 int_enable; /* 1b094 */
+ u32 port_wr_enable; /* 1b098 */
+ u32 event_gen; /* 1b09c */
+
+ u32 all_int_en; /* 1b0a0 */
+ u32 all_port_wr_en; /* 1b0a4 */
+ u32 __rsvd1[2];
+
+ u32 path_ctl; /* 1b0b0 */
+ u32 discovery_timer;/* 1b0b4 */
+ u32 silence_timer; /* 1b0b8 */
+ u32 vmin_exp; /* 1b0bc */
+
+ u32 pol_ctl; /* 1b0c0 */
+ u32 __rsvd2;
+ u32 denial_ctl; /* 1b0c8 */
+ u32 __rsvd3;
+
+ u32 rcvd_mecs; /* 1b0d0 */
+ u32 __rsvd4;
+ u32 mecs_fwd; /* 1b0d8 */
+ u32 __rsvd5;
+
+ u32 long_cs_tx1; /* 1b0e0 */
+ u32 long_cs_tx2; /* 1b0e4 */
+ u32 __rsvd[6]; /* 1b0e8, 1b0ec, 1b0f0 - 1b0fc */
+ } phy_sp[4]; /* 1b080 - 1b27c */
+
+ /* 1b280 - 1b2fc */
+};
+
+struct keystone_rio_transport_layer_regs {
+ u32 transport_blk_hdr; /* 1b300 */
+ u32 __rsvd16[31]; /* 1b304 - 1b37c */
+
+ struct {
+ u32 control; /*1b380 */
+ u32 __rsvd0[3];
+
+ u32 status; /* 1b390 */
+ u32 int_enable; /* 1b394 */
+ u32 port_wr_enable; /* 1b398 */
+ u32 event_gen; /* 1b39c */
+
+ struct {
+ u32 ctl; /* 1b3a0 */
+ u32 pattern_match; /* 1b3a4 */
+ u32 __rsvd[2]; /* 1b3a8 - 1b3ac */
+ } base_route[4]; /* 1b3a0 - 1b3dc */
+
+ u32 __rsvd1[8]; /* 1b3e0 - 1b3fc */
+
+ } transport_sp[4]; /* 1b380 - 1b57c */
+
+ /* 1b580 - 1b5fc */
+};
+
+struct keystone_rio_pkt_buf_regs {
+ u32 pkt_buf_blk_hdr; /* 1b600 */
+ u32 __rsvd18[31]; /* 1b604 - 1b67c */
+
+ struct {
+ u32 control; /* 1b680 */
+ u32 __rsvd0[3];
+
+ u32 status; /* 1b690 */
+ u32 int_enable; /* 1b694 */
+ u32 port_wr_enable; /* 1b698 */
+ u32 event_gen; /* 1b69c */
+
+ u32 ingress_rsc; /* 1b6a0 */
+ u32 egress_rsc; /* 1b6a4 */
+ u32 __rsvd1[2];
+
+ u32 ingress_watermark[4]; /* 1b6b0 - 1b6bc */
+ u32 __rsvd2[16]; /* 1b6c0 - 1b6fc */
+
+ } pkt_buf_sp[4]; /* 1b680 - 1b87c */
+
+ /* 1b880 - 1b8fc */
+};
+
+struct keystone_rio_evt_mgmt_regs {
+ u32 evt_mgmt_blk_hdr; /* 1b900 */
+ u32 __rsvd20[3];
+
+ u32 evt_mgmt_int_stat; /* 1b910 */
+ u32 evt_mgmt_int_enable; /* 1b914 */
+ u32 evt_mgmt_int_port_stat; /* 1b918 */
+ u32 __rsvd21;
+
+ u32 evt_mgmt_port_wr_stat; /* 1b920 */
+ u32 evt_mgmt_port_wr_enable;/* 1b924 */
+ u32 evt_mgmt_port_wr_port_stat; /* 1b928 */
+ u32 __rsvd22;
+
+ u32 evt_mgmt_dev_int_en; /* 1b930 */
+ u32 evt_mgmt_dev_port_wr_en; /* 1b934 */
+ u32 __rsvd23;
+ u32 evt_mgmt_mecs_stat; /* 1b93c */
+
+ u32 evt_mgmt_mecs_int_en; /* 1b940 */
+ u32 evt_mgmt_mecs_cap_en; /* 1b944 */
+ u32 evt_mgmt_mecs_trig_en; /* 1b948 */
+ u32 evt_mgmt_mecs_req; /* 1b94c */
+
+ u32 evt_mgmt_mecs_port_stat;/* 1b950 */
+ u32 __rsvd24[2];
+ u32 evt_mgmt_mecs_event_gen;/* 1b95c */
+
+ u32 evt_mgmt_rst_port_stat; /* 1b960 */
+ u32 __rsvd25;
+ u32 evt_mgmt_rst_int_en; /* 1b968 */
+ u32 __rsvd26;
+
+ u32 evt_mgmt_rst_port_wr_en;/* 1b970 */
+ /* 1b974 - 1b9fc */
+};
+
+struct keystone_rio_port_write_regs {
+ u32 port_wr_blk_hdr; /* 1ba00 */
+ u32 port_wr_ctl; /* 1ba04 */
+ u32 port_wr_route; /* 1ba08 */
+ u32 __rsvd28;
+
+ u32 port_wr_rx_stat; /* 1ba10 */
+ u32 port_wr_rx_event_gen; /* 1ba14 */
+ u32 __rsvd29[2];
+
+ u32 port_wr_rx_capt[4]; /* 1ba20 - 1ba2c */
+ /* 1ba30 - 1bcfc */
+};
+
+struct keystone_rio_link_layer_regs {
+ u32 link_blk_hdr; /* 1bd00 */
+ u32 __rsvd31[8]; /* 1bd04 - 1bd20 */
+ u32 whiteboard; /* 1bd24 */
+ u32 port_number; /* 1bd28 */
+
+ u32 __rsvd32; /* 1bd2c */
+
+ u32 prescalar_srv_clk; /* 1bd30 */
+ u32 reg_rst_ctl; /* 1bd34 */
+ u32 __rsvd33[4]; /* 1bd38, 1bd3c, 1bd40, 1bd44 */
+ u32 local_err_det; /* 1bd48 */
+ u32 local_err_en; /* 1bd4c */
+
+ u32 local_h_addr_capt; /* 1bd50 */
+ u32 local_addr_capt; /* 1bd54 */
+ u32 local_id_capt; /* 1bd58 */
+ u32 local_ctrl_capt; /* 1bd5c */
+
+ /* 1bd60 - 1bdfc */
+};
+
+struct keystone_rio_fabric_regs {
+ u32 fabric_hdr; /* 1be00 */
+ u32 __rsvd35[3]; /* 1be04 - 1be0c */
+
+ u32 fabric_csr; /* 1be10 */
+ u32 __rsvd36[11]; /* 1be14 - 1be3c */
+
+ u32 sp_fabric_status[4]; /* 1be40 - 1be4c */
+};
+
+#ifdef CONFIG_RIONET
+/* built-in rionet */
+extern int rionet_init(void);
+extern void rionet_exit(void);
+#endif
+
+#endif /* KEYSTONE_RIO_H */
+