aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c')
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c127
1 files changed, 103 insertions, 24 deletions
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c
index 8d48b64485c69e..46a41cfff5751b 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Marvell RPM CN10K driver
+/* Marvell RPM CN10K driver
*
* Copyright (C) 2020 Marvell.
*/
@@ -49,6 +49,7 @@ static int lmtst_map_table_ops(struct rvu *rvu, u32 index, u64 *val,
return 0;
}
+#define LMT_MAP_TBL_W1_OFF 8
static u32 rvu_get_lmtst_tbl_index(struct rvu *rvu, u16 pcifunc)
{
return ((rvu_get_pf(pcifunc) * rvu->hw->total_vfs) +
@@ -82,10 +83,10 @@ static int rvu_get_lmtaddr(struct rvu *rvu, u16 pcifunc,
dev_err(rvu->dev, "%s LMTLINE iova transulation failed err:%llx\n", __func__, val);
return -EIO;
}
- /* PA[51:12] = RVU_AF_SMMU_TLN_FLIT1[60:21]
+ /* PA[51:12] = RVU_AF_SMMU_TLN_FLIT0[57:18]
* PA[11:0] = IOVA[11:0]
*/
- pa = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_SMMU_TLN_FLIT1) >> 21;
+ pa = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_SMMU_TLN_FLIT0) >> 18;
pa &= GENMASK_ULL(39, 0);
*lmt_addr = (pa << 12) | (iova & 0xFFF);
@@ -131,9 +132,11 @@ int rvu_mbox_handler_lmtst_tbl_setup(struct rvu *rvu,
struct lmtst_tbl_setup_req *req,
struct msg_rsp *rsp)
{
- u64 lmt_addr, val;
- u32 pri_tbl_idx;
+ struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc);
+ u32 pri_tbl_idx, tbl_idx;
+ u64 lmt_addr;
int err = 0;
+ u64 val;
/* Check if PF_FUNC wants to use it's own local memory as LMTLINE
* region, if so, convert that IOVA to physical address and
@@ -170,7 +173,7 @@ int rvu_mbox_handler_lmtst_tbl_setup(struct rvu *rvu,
dev_err(rvu->dev,
"Failed to read LMT map table: index 0x%x err %d\n",
pri_tbl_idx, err);
- return err;
+ goto error;
}
/* Update the base lmt addr of secondary with primary's base
@@ -181,7 +184,53 @@ int rvu_mbox_handler_lmtst_tbl_setup(struct rvu *rvu,
return err;
}
- return 0;
+ /* This mailbox can also be used to update word1 of APR_LMT_MAP_ENTRY_S
+ * like enabling scheduled LMTST, disable LMTLINE prefetch, disable
+ * early completion for ordered LMTST.
+ */
+ if (req->sch_ena || req->dis_sched_early_comp || req->dis_line_pref) {
+ tbl_idx = rvu_get_lmtst_tbl_index(rvu, req->hdr.pcifunc);
+ err = lmtst_map_table_ops(rvu, tbl_idx + LMT_MAP_TBL_W1_OFF,
+ &val, LMT_TBL_OP_READ);
+ if (err) {
+ dev_err(rvu->dev,
+ "Failed to read LMT map table: index 0x%x err %d\n",
+ tbl_idx + LMT_MAP_TBL_W1_OFF, err);
+ goto error;
+ }
+
+ /* Storing lmt map table entry word1 default value as this needs
+ * to be reverted in FLR. Also making sure this default value
+ * doesn't get overwritten on multiple calls to this mailbox.
+ */
+ if (!pfvf->lmt_map_ent_w1)
+ pfvf->lmt_map_ent_w1 = val;
+
+ /* Disable early completion for Ordered LMTSTs. */
+ if (req->dis_sched_early_comp)
+ val |= (req->dis_sched_early_comp <<
+ APR_LMT_MAP_ENT_DIS_SCH_CMP_SHIFT);
+ /* Enable scheduled LMTST */
+ if (req->sch_ena)
+ val |= (req->sch_ena << APR_LMT_MAP_ENT_SCH_ENA_SHIFT) |
+ req->ssow_pf_func;
+ /* Disables LMTLINE prefetch before receiving store data. */
+ if (req->dis_line_pref)
+ val |= (req->dis_line_pref <<
+ APR_LMT_MAP_ENT_DIS_LINE_PREF_SHIFT);
+
+ err = lmtst_map_table_ops(rvu, tbl_idx + LMT_MAP_TBL_W1_OFF,
+ &val, LMT_TBL_OP_WRITE);
+ if (err) {
+ dev_err(rvu->dev,
+ "Failed to update LMT map table: index 0x%x err %d\n",
+ tbl_idx + LMT_MAP_TBL_W1_OFF, err);
+ goto error;
+ }
+ }
+
+error:
+ return err;
}
/* Resetting the lmtst map table to original base addresses */
@@ -194,27 +243,45 @@ void rvu_reset_lmt_map_tbl(struct rvu *rvu, u16 pcifunc)
if (is_rvu_otx2(rvu))
return;
- if (pfvf->lmt_base_addr) {
+ if (pfvf->lmt_base_addr || pfvf->lmt_map_ent_w1) {
/* This corresponds to lmt map table index */
tbl_idx = rvu_get_lmtst_tbl_index(rvu, pcifunc);
/* Reverting back original lmt base addr for respective
* pcifunc.
*/
- err = lmtst_map_table_ops(rvu, tbl_idx, &pfvf->lmt_base_addr,
- LMT_TBL_OP_WRITE);
- if (err)
- dev_err(rvu->dev,
- "Failed to update LMT map table: index 0x%x err %d\n",
- tbl_idx, err);
- pfvf->lmt_base_addr = 0;
+ if (pfvf->lmt_base_addr) {
+ err = lmtst_map_table_ops(rvu, tbl_idx,
+ &pfvf->lmt_base_addr,
+ LMT_TBL_OP_WRITE);
+ if (err)
+ dev_err(rvu->dev,
+ "Failed to update LMT map table: index 0x%x err %d\n",
+ tbl_idx, err);
+ pfvf->lmt_base_addr = 0;
+ }
+ /* Reverting back to orginal word1 val of lmtst map table entry
+ * which underwent changes.
+ */
+ if (pfvf->lmt_map_ent_w1) {
+ err = lmtst_map_table_ops(rvu,
+ tbl_idx + LMT_MAP_TBL_W1_OFF,
+ &pfvf->lmt_map_ent_w1,
+ LMT_TBL_OP_WRITE);
+ if (err)
+ dev_err(rvu->dev,
+ "Failed to update LMT map table: index 0x%x err %d\n",
+ tbl_idx + LMT_MAP_TBL_W1_OFF, err);
+ pfvf->lmt_map_ent_w1 = 0;
+ }
}
}
int rvu_set_channels_base(struct rvu *rvu)
{
+ u16 nr_lbk_chans, nr_sdp_chans, nr_cgx_chans, nr_cpt_chans;
+ u16 sdp_chan_base, cgx_chan_base, cpt_chan_base;
struct rvu_hwinfo *hw = rvu->hw;
- u16 cpt_chan_base;
- u64 nix_const;
+ u64 nix_const, nix_const1;
int blkaddr;
blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
@@ -222,6 +289,7 @@ int rvu_set_channels_base(struct rvu *rvu)
return blkaddr;
nix_const = rvu_read64(rvu, blkaddr, NIX_AF_CONST);
+ nix_const1 = rvu_read64(rvu, blkaddr, NIX_AF_CONST1);
hw->cgx = (nix_const >> 12) & 0xFULL;
hw->lmac_per_cgx = (nix_const >> 8) & 0xFULL;
@@ -244,14 +312,24 @@ int rvu_set_channels_base(struct rvu *rvu)
* channels such that all channel numbers are contiguous
* leaving no holes. This way the new CPT channels can be
* accomodated. The order of channel numbers assigned is
- * LBK, SDP, CGX and CPT.
+ * LBK, SDP, CGX and CPT. Also the base channel number
+ * of a block must be multiple of number of channels
+ * of the block.
*/
- hw->sdp_chan_base = hw->lbk_chan_base + hw->lbk_links *
- ((nix_const >> 16) & 0xFFULL);
- hw->cgx_chan_base = hw->sdp_chan_base + hw->sdp_links * SDP_CHANNELS;
+ nr_lbk_chans = (nix_const >> 16) & 0xFFULL;
+ nr_sdp_chans = nix_const1 & 0xFFFULL;
+ nr_cgx_chans = nix_const & 0xFFULL;
+ nr_cpt_chans = (nix_const >> 32) & 0xFFFULL;
+
+ sdp_chan_base = hw->lbk_chan_base + hw->lbk_links * nr_lbk_chans;
+ /* Round up base channel to multiple of number of channels */
+ hw->sdp_chan_base = ALIGN(sdp_chan_base, nr_sdp_chans);
+
+ cgx_chan_base = hw->sdp_chan_base + hw->sdp_links * nr_sdp_chans;
+ hw->cgx_chan_base = ALIGN(cgx_chan_base, nr_cgx_chans);
- cpt_chan_base = hw->cgx_chan_base + hw->cgx_links *
- (nix_const & 0xFFULL);
+ cpt_chan_base = hw->cgx_chan_base + hw->cgx_links * nr_cgx_chans;
+ hw->cpt_chan_base = ALIGN(cpt_chan_base, nr_cpt_chans);
/* Out of 4096 channels start CPT from 2048 so
* that MSB for CPT channels is always set
@@ -355,6 +433,7 @@ err_put:
static void __rvu_nix_set_channels(struct rvu *rvu, int blkaddr)
{
+ u64 nix_const1 = rvu_read64(rvu, blkaddr, NIX_AF_CONST1);
u64 nix_const = rvu_read64(rvu, blkaddr, NIX_AF_CONST);
u16 cgx_chans, lbk_chans, sdp_chans, cpt_chans;
struct rvu_hwinfo *hw = rvu->hw;
@@ -364,7 +443,7 @@ static void __rvu_nix_set_channels(struct rvu *rvu, int blkaddr)
cgx_chans = nix_const & 0xFFULL;
lbk_chans = (nix_const >> 16) & 0xFFULL;
- sdp_chans = SDP_CHANNELS;
+ sdp_chans = nix_const1 & 0xFFFULL;
cpt_chans = (nix_const >> 32) & 0xFFFULL;
start = hw->cgx_chan_base;