aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/mbox.c43
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/mbox.h6
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/mcs_rvu_if.c17
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu.c31
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu.h2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c20
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c119
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c71
10 files changed, 225 insertions, 88 deletions
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mbox.c b/drivers/net/ethernet/marvell/octeontx2/af/mbox.c
index b92264d0a77e71..1e5aa539750404 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/mbox.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/mbox.c
@@ -214,11 +214,12 @@ int otx2_mbox_busy_poll_for_rsp(struct otx2_mbox *mbox, int devid)
}
EXPORT_SYMBOL(otx2_mbox_busy_poll_for_rsp);
-void otx2_mbox_msg_send(struct otx2_mbox *mbox, int devid)
+static void otx2_mbox_msg_send_data(struct otx2_mbox *mbox, int devid, u64 data)
{
struct otx2_mbox_dev *mdev = &mbox->dev[devid];
struct mbox_hdr *tx_hdr, *rx_hdr;
void *hw_mbase = mdev->hwbase;
+ u64 intr_val;
tx_hdr = hw_mbase + mbox->tx_start;
rx_hdr = hw_mbase + mbox->rx_start;
@@ -254,14 +255,52 @@ void otx2_mbox_msg_send(struct otx2_mbox *mbox, int devid)
spin_unlock(&mdev->mbox_lock);
+ /* Check if interrupt pending */
+ intr_val = readq((void __iomem *)mbox->reg_base +
+ (mbox->trigger | (devid << mbox->tr_shift)));
+
+ intr_val |= data;
/* The interrupt should be fired after num_msgs is written
* to the shared memory
*/
- writeq(1, (void __iomem *)mbox->reg_base +
+ writeq(intr_val, (void __iomem *)mbox->reg_base +
(mbox->trigger | (devid << mbox->tr_shift)));
}
+
+void otx2_mbox_msg_send(struct otx2_mbox *mbox, int devid)
+{
+ otx2_mbox_msg_send_data(mbox, devid, MBOX_DOWN_MSG);
+}
EXPORT_SYMBOL(otx2_mbox_msg_send);
+void otx2_mbox_msg_send_up(struct otx2_mbox *mbox, int devid)
+{
+ otx2_mbox_msg_send_data(mbox, devid, MBOX_UP_MSG);
+}
+EXPORT_SYMBOL(otx2_mbox_msg_send_up);
+
+bool otx2_mbox_wait_for_zero(struct otx2_mbox *mbox, int devid)
+{
+ u64 data;
+
+ data = readq((void __iomem *)mbox->reg_base +
+ (mbox->trigger | (devid << mbox->tr_shift)));
+
+ /* If data is non-zero wait for ~1ms and return to caller
+ * whether data has changed to zero or not after the wait.
+ */
+ if (!data)
+ return true;
+
+ usleep_range(950, 1000);
+
+ data = readq((void __iomem *)mbox->reg_base +
+ (mbox->trigger | (devid << mbox->tr_shift)));
+
+ return data == 0;
+}
+EXPORT_SYMBOL(otx2_mbox_wait_for_zero);
+
struct mbox_msghdr *otx2_mbox_alloc_msg_rsp(struct otx2_mbox *mbox, int devid,
int size, int size_rsp)
{
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
index 61ab7f66f053cc..eb2a20b5a0d0c6 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
@@ -16,6 +16,9 @@
#define MBOX_SIZE SZ_64K
+#define MBOX_DOWN_MSG 1
+#define MBOX_UP_MSG 2
+
/* AF/PF: PF initiated, PF/VF VF initiated */
#define MBOX_DOWN_RX_START 0
#define MBOX_DOWN_RX_SIZE (46 * SZ_1K)
@@ -101,6 +104,7 @@ int otx2_mbox_regions_init(struct otx2_mbox *mbox, void __force **hwbase,
struct pci_dev *pdev, void __force *reg_base,
int direction, int ndevs, unsigned long *bmap);
void otx2_mbox_msg_send(struct otx2_mbox *mbox, int devid);
+void otx2_mbox_msg_send_up(struct otx2_mbox *mbox, int devid);
int otx2_mbox_wait_for_rsp(struct otx2_mbox *mbox, int devid);
int otx2_mbox_busy_poll_for_rsp(struct otx2_mbox *mbox, int devid);
struct mbox_msghdr *otx2_mbox_alloc_msg_rsp(struct otx2_mbox *mbox, int devid,
@@ -118,6 +122,8 @@ static inline struct mbox_msghdr *otx2_mbox_alloc_msg(struct otx2_mbox *mbox,
return otx2_mbox_alloc_msg_rsp(mbox, devid, size, 0);
}
+bool otx2_mbox_wait_for_zero(struct otx2_mbox *mbox, int devid);
+
/* Mailbox message types */
#define MBOX_MSG_MASK 0xFFFF
#define MBOX_MSG_INVALID 0xFFFE
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mcs_rvu_if.c b/drivers/net/ethernet/marvell/octeontx2/af/mcs_rvu_if.c
index dfd23580e3b8e1..d39d86e694ccf7 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/mcs_rvu_if.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/mcs_rvu_if.c
@@ -121,13 +121,17 @@ int mcs_add_intr_wq_entry(struct mcs *mcs, struct mcs_intr_event *event)
static int mcs_notify_pfvf(struct mcs_intr_event *event, struct rvu *rvu)
{
struct mcs_intr_info *req;
- int err, pf;
+ int pf;
pf = rvu_get_pf(event->pcifunc);
+ mutex_lock(&rvu->mbox_lock);
+
req = otx2_mbox_alloc_msg_mcs_intr_notify(rvu, pf);
- if (!req)
+ if (!req) {
+ mutex_unlock(&rvu->mbox_lock);
return -ENOMEM;
+ }
req->mcs_id = event->mcs_id;
req->intr_mask = event->intr_mask;
@@ -135,10 +139,11 @@ static int mcs_notify_pfvf(struct mcs_intr_event *event, struct rvu *rvu)
req->hdr.pcifunc = event->pcifunc;
req->lmac_id = event->lmac_id;
- otx2_mbox_msg_send(&rvu->afpf_wq_info.mbox_up, pf);
- err = otx2_mbox_wait_for_rsp(&rvu->afpf_wq_info.mbox_up, pf);
- if (err)
- dev_warn(rvu->dev, "MCS notification to pf %d failed\n", pf);
+ otx2_mbox_wait_for_zero(&rvu->afpf_wq_info.mbox_up, pf);
+
+ otx2_mbox_msg_send_up(&rvu->afpf_wq_info.mbox_up, pf);
+
+ mutex_unlock(&rvu->mbox_lock);
return 0;
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
index 07d4859de53ade..ff78251f92d448 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
@@ -2119,7 +2119,7 @@ bad_message:
}
}
-static void __rvu_mbox_handler(struct rvu_work *mwork, int type)
+static void __rvu_mbox_handler(struct rvu_work *mwork, int type, bool poll)
{
struct rvu *rvu = mwork->rvu;
int offset, err, id, devid;
@@ -2186,6 +2186,9 @@ static void __rvu_mbox_handler(struct rvu_work *mwork, int type)
}
mw->mbox_wrk[devid].num_msgs = 0;
+ if (poll)
+ otx2_mbox_wait_for_zero(mbox, devid);
+
/* Send mbox responses to VF/PF */
otx2_mbox_msg_send(mbox, devid);
}
@@ -2193,15 +2196,18 @@ static void __rvu_mbox_handler(struct rvu_work *mwork, int type)
static inline void rvu_afpf_mbox_handler(struct work_struct *work)
{
struct rvu_work *mwork = container_of(work, struct rvu_work, work);
+ struct rvu *rvu = mwork->rvu;
- __rvu_mbox_handler(mwork, TYPE_AFPF);
+ mutex_lock(&rvu->mbox_lock);
+ __rvu_mbox_handler(mwork, TYPE_AFPF, true);
+ mutex_unlock(&rvu->mbox_lock);
}
static inline void rvu_afvf_mbox_handler(struct work_struct *work)
{
struct rvu_work *mwork = container_of(work, struct rvu_work, work);
- __rvu_mbox_handler(mwork, TYPE_AFVF);
+ __rvu_mbox_handler(mwork, TYPE_AFVF, false);
}
static void __rvu_mbox_up_handler(struct rvu_work *mwork, int type)
@@ -2376,6 +2382,8 @@ static int rvu_mbox_init(struct rvu *rvu, struct mbox_wq_info *mw,
}
}
+ mutex_init(&rvu->mbox_lock);
+
mbox_regions = kcalloc(num, sizeof(void *), GFP_KERNEL);
if (!mbox_regions) {
err = -ENOMEM;
@@ -2525,10 +2533,9 @@ static void rvu_queue_work(struct mbox_wq_info *mw, int first,
}
}
-static irqreturn_t rvu_mbox_intr_handler(int irq, void *rvu_irq)
+static irqreturn_t rvu_mbox_pf_intr_handler(int irq, void *rvu_irq)
{
struct rvu *rvu = (struct rvu *)rvu_irq;
- int vfs = rvu->vfs;
u64 intr;
intr = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_PFAF_MBOX_INT);
@@ -2542,6 +2549,18 @@ static irqreturn_t rvu_mbox_intr_handler(int irq, void *rvu_irq)
rvu_queue_work(&rvu->afpf_wq_info, 0, rvu->hw->total_pfs, intr);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t rvu_mbox_intr_handler(int irq, void *rvu_irq)
+{
+ struct rvu *rvu = (struct rvu *)rvu_irq;
+ int vfs = rvu->vfs;
+ u64 intr;
+
+ /* Sync with mbox memory region */
+ rmb();
+
/* Handle VF interrupts */
if (vfs > 64) {
intr = rvupf_read64(rvu, RVU_PF_VFPF_MBOX_INTX(1));
@@ -2886,7 +2905,7 @@ static int rvu_register_interrupts(struct rvu *rvu)
/* Register mailbox interrupt handler */
sprintf(&rvu->irq_name[RVU_AF_INT_VEC_MBOX * NAME_SIZE], "RVUAF Mbox");
ret = request_irq(pci_irq_vector(rvu->pdev, RVU_AF_INT_VEC_MBOX),
- rvu_mbox_intr_handler, 0,
+ rvu_mbox_pf_intr_handler, 0,
&rvu->irq_name[RVU_AF_INT_VEC_MBOX * NAME_SIZE], rvu);
if (ret) {
dev_err(rvu->dev,
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
index f390525a621772..35834687e40fe9 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
@@ -591,6 +591,8 @@ struct rvu {
spinlock_t mcs_intrq_lock;
/* CPT interrupt lock */
spinlock_t cpt_intr_lock;
+
+ struct mutex mbox_lock; /* Serialize mbox up and down msgs */
};
static inline void rvu_write64(struct rvu *rvu, u64 block, u64 offset, u64 val)
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
index 38acdc7a73bbec..72e060cf6b6181 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
@@ -232,7 +232,7 @@ static void cgx_notify_pfs(struct cgx_link_event *event, struct rvu *rvu)
struct cgx_link_user_info *linfo;
struct cgx_link_info_msg *msg;
unsigned long pfmap;
- int err, pfid;
+ int pfid;
linfo = &event->link_uinfo;
pfmap = cgxlmac_to_pfmap(rvu, event->cgx_id, event->lmac_id);
@@ -255,16 +255,22 @@ static void cgx_notify_pfs(struct cgx_link_event *event, struct rvu *rvu)
continue;
}
+ mutex_lock(&rvu->mbox_lock);
+
/* Send mbox message to PF */
msg = otx2_mbox_alloc_msg_cgx_link_event(rvu, pfid);
- if (!msg)
+ if (!msg) {
+ mutex_unlock(&rvu->mbox_lock);
continue;
+ }
+
msg->link_info = *linfo;
- otx2_mbox_msg_send(&rvu->afpf_wq_info.mbox_up, pfid);
- err = otx2_mbox_wait_for_rsp(&rvu->afpf_wq_info.mbox_up, pfid);
- if (err)
- dev_warn(rvu->dev, "notification to pf %d failed\n",
- pfid);
+
+ otx2_mbox_wait_for_zero(&rvu->afpf_wq_info.mbox_up, pfid);
+
+ otx2_mbox_msg_send_up(&rvu->afpf_wq_info.mbox_up, pfid);
+
+ mutex_unlock(&rvu->mbox_lock);
} while (pfmap);
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
index 02d0b707aea5bd..a85ac039d779be 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
@@ -1592,7 +1592,7 @@ int otx2_detach_resources(struct mbox *mbox)
detach->partial = false;
/* Send detach request to AF */
- otx2_mbox_msg_send(&mbox->mbox, 0);
+ otx2_sync_mbox_msg(mbox);
mutex_unlock(&mbox->lock);
return 0;
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
index 06910307085efa..7e16a341ec588f 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
@@ -815,7 +815,7 @@ static inline int otx2_sync_mbox_up_msg(struct mbox *mbox, int devid)
if (!otx2_mbox_nonempty(&mbox->mbox_up, devid))
return 0;
- otx2_mbox_msg_send(&mbox->mbox_up, devid);
+ otx2_mbox_msg_send_up(&mbox->mbox_up, devid);
err = otx2_mbox_wait_for_rsp(&mbox->mbox_up, devid);
if (err)
return err;
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
index e5fe67e7386551..b40bd0e4675148 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
@@ -292,8 +292,8 @@ static int otx2_pf_flr_init(struct otx2_nic *pf, int num_vfs)
return 0;
}
-static void otx2_queue_work(struct mbox *mw, struct workqueue_struct *mbox_wq,
- int first, int mdevs, u64 intr, int type)
+static void otx2_queue_vf_work(struct mbox *mw, struct workqueue_struct *mbox_wq,
+ int first, int mdevs, u64 intr)
{
struct otx2_mbox_dev *mdev;
struct otx2_mbox *mbox;
@@ -307,40 +307,26 @@ static void otx2_queue_work(struct mbox *mw, struct workqueue_struct *mbox_wq,
mbox = &mw->mbox;
mdev = &mbox->dev[i];
- if (type == TYPE_PFAF)
- otx2_sync_mbox_bbuf(mbox, i);
hdr = mdev->mbase + mbox->rx_start;
/* The hdr->num_msgs is set to zero immediately in the interrupt
- * handler to ensure that it holds a correct value next time
- * when the interrupt handler is called.
- * pf->mbox.num_msgs holds the data for use in pfaf_mbox_handler
- * pf>mbox.up_num_msgs holds the data for use in
- * pfaf_mbox_up_handler.
+ * handler to ensure that it holds a correct value next time
+ * when the interrupt handler is called. pf->mw[i].num_msgs
+ * holds the data for use in otx2_pfvf_mbox_handler and
+ * pf->mw[i].up_num_msgs holds the data for use in
+ * otx2_pfvf_mbox_up_handler.
*/
if (hdr->num_msgs) {
mw[i].num_msgs = hdr->num_msgs;
hdr->num_msgs = 0;
- if (type == TYPE_PFAF)
- memset(mbox->hwbase + mbox->rx_start, 0,
- ALIGN(sizeof(struct mbox_hdr),
- sizeof(u64)));
-
queue_work(mbox_wq, &mw[i].mbox_wrk);
}
mbox = &mw->mbox_up;
mdev = &mbox->dev[i];
- if (type == TYPE_PFAF)
- otx2_sync_mbox_bbuf(mbox, i);
hdr = mdev->mbase + mbox->rx_start;
if (hdr->num_msgs) {
mw[i].up_num_msgs = hdr->num_msgs;
hdr->num_msgs = 0;
- if (type == TYPE_PFAF)
- memset(mbox->hwbase + mbox->rx_start, 0,
- ALIGN(sizeof(struct mbox_hdr),
- sizeof(u64)));
-
queue_work(mbox_wq, &mw[i].mbox_up_wrk);
}
}
@@ -356,8 +342,10 @@ static void otx2_forward_msg_pfvf(struct otx2_mbox_dev *mdev,
/* Msgs are already copied, trigger VF's mbox irq */
smp_wmb();
+ otx2_mbox_wait_for_zero(pfvf_mbox, devid);
+
offset = pfvf_mbox->trigger | (devid << pfvf_mbox->tr_shift);
- writeq(1, (void __iomem *)pfvf_mbox->reg_base + offset);
+ writeq(MBOX_DOWN_MSG, (void __iomem *)pfvf_mbox->reg_base + offset);
/* Restore VF's mbox bounce buffer region address */
src_mdev->mbase = bbuf_base;
@@ -547,7 +535,7 @@ static void otx2_pfvf_mbox_up_handler(struct work_struct *work)
end:
offset = mbox->rx_start + msg->next_msgoff;
if (mdev->msgs_acked == (vf_mbox->up_num_msgs - 1))
- __otx2_mbox_reset(mbox, 0);
+ __otx2_mbox_reset(mbox, vf_idx);
mdev->msgs_acked++;
}
}
@@ -564,8 +552,7 @@ static irqreturn_t otx2_pfvf_mbox_intr_handler(int irq, void *pf_irq)
if (vfs > 64) {
intr = otx2_read64(pf, RVU_PF_VFPF_MBOX_INTX(1));
otx2_write64(pf, RVU_PF_VFPF_MBOX_INTX(1), intr);
- otx2_queue_work(mbox, pf->mbox_pfvf_wq, 64, vfs, intr,
- TYPE_PFVF);
+ otx2_queue_vf_work(mbox, pf->mbox_pfvf_wq, 64, vfs, intr);
if (intr)
trace_otx2_msg_interrupt(mbox->mbox.pdev, "VF(s) to PF", intr);
vfs = 64;
@@ -574,7 +561,7 @@ static irqreturn_t otx2_pfvf_mbox_intr_handler(int irq, void *pf_irq)
intr = otx2_read64(pf, RVU_PF_VFPF_MBOX_INTX(0));
otx2_write64(pf, RVU_PF_VFPF_MBOX_INTX(0), intr);
- otx2_queue_work(mbox, pf->mbox_pfvf_wq, 0, vfs, intr, TYPE_PFVF);
+ otx2_queue_vf_work(mbox, pf->mbox_pfvf_wq, 0, vfs, intr);
if (intr)
trace_otx2_msg_interrupt(mbox->mbox.pdev, "VF(s) to PF", intr);
@@ -597,8 +584,9 @@ static int otx2_pfvf_mbox_init(struct otx2_nic *pf, int numvfs)
if (!pf->mbox_pfvf)
return -ENOMEM;
- pf->mbox_pfvf_wq = alloc_ordered_workqueue("otx2_pfvf_mailbox",
- WQ_HIGHPRI | WQ_MEM_RECLAIM);
+ pf->mbox_pfvf_wq = alloc_workqueue("otx2_pfvf_mailbox",
+ WQ_UNBOUND | WQ_HIGHPRI |
+ WQ_MEM_RECLAIM, 0);
if (!pf->mbox_pfvf_wq)
return -ENOMEM;
@@ -821,20 +809,22 @@ static void otx2_pfaf_mbox_handler(struct work_struct *work)
struct mbox *af_mbox;
struct otx2_nic *pf;
int offset, id;
+ u16 num_msgs;
af_mbox = container_of(work, struct mbox, mbox_wrk);
mbox = &af_mbox->mbox;
mdev = &mbox->dev[0];
rsp_hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
+ num_msgs = rsp_hdr->num_msgs;
offset = mbox->rx_start + ALIGN(sizeof(*rsp_hdr), MBOX_MSG_ALIGN);
pf = af_mbox->pfvf;
- for (id = 0; id < af_mbox->num_msgs; id++) {
+ for (id = 0; id < num_msgs; id++) {
msg = (struct mbox_msghdr *)(mdev->mbase + offset);
otx2_process_pfaf_mbox_msg(pf, msg);
offset = mbox->rx_start + msg->next_msgoff;
- if (mdev->msgs_acked == (af_mbox->num_msgs - 1))
+ if (mdev->msgs_acked == (num_msgs - 1))
__otx2_mbox_reset(mbox, 0);
mdev->msgs_acked++;
}
@@ -945,12 +935,14 @@ static void otx2_pfaf_mbox_up_handler(struct work_struct *work)
int offset, id, devid = 0;
struct mbox_hdr *rsp_hdr;
struct mbox_msghdr *msg;
+ u16 num_msgs;
rsp_hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
+ num_msgs = rsp_hdr->num_msgs;
offset = mbox->rx_start + ALIGN(sizeof(*rsp_hdr), MBOX_MSG_ALIGN);
- for (id = 0; id < af_mbox->up_num_msgs; id++) {
+ for (id = 0; id < num_msgs; id++) {
msg = (struct mbox_msghdr *)(mdev->mbase + offset);
devid = msg->pcifunc & RVU_PFVF_FUNC_MASK;
@@ -959,10 +951,11 @@ static void otx2_pfaf_mbox_up_handler(struct work_struct *work)
otx2_process_mbox_msg_up(pf, msg);
offset = mbox->rx_start + msg->next_msgoff;
}
- if (devid) {
+ /* Forward to VF iff VFs are really present */
+ if (devid && pci_num_vf(pf->pdev)) {
otx2_forward_vf_mbox_msgs(pf, &pf->mbox.mbox_up,
MBOX_DIR_PFVF_UP, devid - 1,
- af_mbox->up_num_msgs);
+ num_msgs);
return;
}
@@ -972,16 +965,49 @@ static void otx2_pfaf_mbox_up_handler(struct work_struct *work)
static irqreturn_t otx2_pfaf_mbox_intr_handler(int irq, void *pf_irq)
{
struct otx2_nic *pf = (struct otx2_nic *)pf_irq;
- struct mbox *mbox;
+ struct mbox *mw = &pf->mbox;
+ struct otx2_mbox_dev *mdev;
+ struct otx2_mbox *mbox;
+ struct mbox_hdr *hdr;
+ u64 mbox_data;
/* Clear the IRQ */
otx2_write64(pf, RVU_PF_INT, BIT_ULL(0));
- mbox = &pf->mbox;
- trace_otx2_msg_interrupt(mbox->mbox.pdev, "AF to PF", BIT_ULL(0));
+ mbox_data = otx2_read64(pf, RVU_PF_PFAF_MBOX0);
+
+ if (mbox_data & MBOX_UP_MSG) {
+ mbox_data &= ~MBOX_UP_MSG;
+ otx2_write64(pf, RVU_PF_PFAF_MBOX0, mbox_data);
+
+ mbox = &mw->mbox_up;
+ mdev = &mbox->dev[0];
+ otx2_sync_mbox_bbuf(mbox, 0);
+
+ hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
+ if (hdr->num_msgs)
+ queue_work(pf->mbox_wq, &mw->mbox_up_wrk);
+
+ trace_otx2_msg_interrupt(pf->pdev, "UP message from AF to PF",
+ BIT_ULL(0));
+ }
+
+ if (mbox_data & MBOX_DOWN_MSG) {
+ mbox_data &= ~MBOX_DOWN_MSG;
+ otx2_write64(pf, RVU_PF_PFAF_MBOX0, mbox_data);
+
+ mbox = &mw->mbox;
+ mdev = &mbox->dev[0];
+ otx2_sync_mbox_bbuf(mbox, 0);
+
+ hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
+ if (hdr->num_msgs)
+ queue_work(pf->mbox_wq, &mw->mbox_wrk);
- otx2_queue_work(mbox, pf->mbox_wq, 0, 1, 1, TYPE_PFAF);
+ trace_otx2_msg_interrupt(pf->pdev, "DOWN reply from AF to PF",
+ BIT_ULL(0));
+ }
return IRQ_HANDLED;
}
@@ -3087,6 +3113,7 @@ static void otx2_vf_link_event_task(struct work_struct *work)
struct otx2_vf_config *config;
struct cgx_link_info_msg *req;
struct mbox_msghdr *msghdr;
+ struct delayed_work *dwork;
struct otx2_nic *pf;
int vf_idx;
@@ -3095,10 +3122,24 @@ static void otx2_vf_link_event_task(struct work_struct *work)
vf_idx = config - config->pf->vf_configs;
pf = config->pf;
+ if (config->intf_down)
+ return;
+
+ mutex_lock(&pf->mbox.lock);
+
+ dwork = &config->link_event_work;
+
+ if (!otx2_mbox_wait_for_zero(&pf->mbox_pfvf[0].mbox_up, vf_idx)) {
+ schedule_delayed_work(dwork, msecs_to_jiffies(100));
+ mutex_unlock(&pf->mbox.lock);
+ return;
+ }
+
msghdr = otx2_mbox_alloc_msg_rsp(&pf->mbox_pfvf[0].mbox_up, vf_idx,
sizeof(*req), sizeof(struct msg_rsp));
if (!msghdr) {
dev_err(pf->dev, "Failed to create VF%d link event\n", vf_idx);
+ mutex_unlock(&pf->mbox.lock);
return;
}
@@ -3107,7 +3148,11 @@ static void otx2_vf_link_event_task(struct work_struct *work)
req->hdr.sig = OTX2_MBOX_REQ_SIG;
memcpy(&req->link_info, &pf->linfo, sizeof(req->link_info));
+ otx2_mbox_wait_for_zero(&pf->mbox_pfvf[0].mbox_up, vf_idx);
+
otx2_sync_mbox_up_msg(&pf->mbox_pfvf[0], vf_idx);
+
+ mutex_unlock(&pf->mbox.lock);
}
static int otx2_sriov_enable(struct pci_dev *pdev, int numvfs)
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
index 35e06048356f4d..cf0aa16d754070 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
@@ -89,16 +89,20 @@ static void otx2vf_vfaf_mbox_handler(struct work_struct *work)
struct otx2_mbox *mbox;
struct mbox *af_mbox;
int offset, id;
+ u16 num_msgs;
af_mbox = container_of(work, struct mbox, mbox_wrk);
mbox = &af_mbox->mbox;
mdev = &mbox->dev[0];
rsp_hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
- if (af_mbox->num_msgs == 0)
+ num_msgs = rsp_hdr->num_msgs;
+
+ if (num_msgs == 0)
return;
+
offset = mbox->rx_start + ALIGN(sizeof(*rsp_hdr), MBOX_MSG_ALIGN);
- for (id = 0; id < af_mbox->num_msgs; id++) {
+ for (id = 0; id < num_msgs; id++) {
msg = (struct mbox_msghdr *)(mdev->mbase + offset);
otx2vf_process_vfaf_mbox_msg(af_mbox->pfvf, msg);
offset = mbox->rx_start + msg->next_msgoff;
@@ -151,6 +155,7 @@ static void otx2vf_vfaf_mbox_up_handler(struct work_struct *work)
struct mbox *vf_mbox;
struct otx2_nic *vf;
int offset, id;
+ u16 num_msgs;
vf_mbox = container_of(work, struct mbox, mbox_up_wrk);
vf = vf_mbox->pfvf;
@@ -158,12 +163,14 @@ static void otx2vf_vfaf_mbox_up_handler(struct work_struct *work)
mdev = &mbox->dev[0];
rsp_hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
- if (vf_mbox->up_num_msgs == 0)
+ num_msgs = rsp_hdr->num_msgs;
+
+ if (num_msgs == 0)
return;
offset = mbox->rx_start + ALIGN(sizeof(*rsp_hdr), MBOX_MSG_ALIGN);
- for (id = 0; id < vf_mbox->up_num_msgs; id++) {
+ for (id = 0; id < num_msgs; id++) {
msg = (struct mbox_msghdr *)(mdev->mbase + offset);
otx2vf_process_mbox_msg_up(vf, msg);
offset = mbox->rx_start + msg->next_msgoff;
@@ -178,40 +185,48 @@ static irqreturn_t otx2vf_vfaf_mbox_intr_handler(int irq, void *vf_irq)
struct otx2_mbox_dev *mdev;
struct otx2_mbox *mbox;
struct mbox_hdr *hdr;
+ u64 mbox_data;
/* Clear the IRQ */
otx2_write64(vf, RVU_VF_INT, BIT_ULL(0));
+ mbox_data = otx2_read64(vf, RVU_VF_VFPF_MBOX0);
+
/* Read latest mbox data */
smp_rmb();
- /* Check for PF => VF response messages */
- mbox = &vf->mbox.mbox;
- mdev = &mbox->dev[0];
- otx2_sync_mbox_bbuf(mbox, 0);
+ if (mbox_data & MBOX_DOWN_MSG) {
+ mbox_data &= ~MBOX_DOWN_MSG;
+ otx2_write64(vf, RVU_VF_VFPF_MBOX0, mbox_data);
+
+ /* Check for PF => VF response messages */
+ mbox = &vf->mbox.mbox;
+ mdev = &mbox->dev[0];
+ otx2_sync_mbox_bbuf(mbox, 0);
- trace_otx2_msg_interrupt(mbox->pdev, "PF to VF", BIT_ULL(0));
+ hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
+ if (hdr->num_msgs)
+ queue_work(vf->mbox_wq, &vf->mbox.mbox_wrk);
- hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
- if (hdr->num_msgs) {
- vf->mbox.num_msgs = hdr->num_msgs;
- hdr->num_msgs = 0;
- memset(mbox->hwbase + mbox->rx_start, 0,
- ALIGN(sizeof(struct mbox_hdr), sizeof(u64)));
- queue_work(vf->mbox_wq, &vf->mbox.mbox_wrk);
+ trace_otx2_msg_interrupt(mbox->pdev, "DOWN reply from PF to VF",
+ BIT_ULL(0));
}
- /* Check for PF => VF notification messages */
- mbox = &vf->mbox.mbox_up;
- mdev = &mbox->dev[0];
- otx2_sync_mbox_bbuf(mbox, 0);
- hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
- if (hdr->num_msgs) {
- vf->mbox.up_num_msgs = hdr->num_msgs;
- hdr->num_msgs = 0;
- memset(mbox->hwbase + mbox->rx_start, 0,
- ALIGN(sizeof(struct mbox_hdr), sizeof(u64)));
- queue_work(vf->mbox_wq, &vf->mbox.mbox_up_wrk);
+ if (mbox_data & MBOX_UP_MSG) {
+ mbox_data &= ~MBOX_UP_MSG;
+ otx2_write64(vf, RVU_VF_VFPF_MBOX0, mbox_data);
+
+ /* Check for PF => VF notification messages */
+ mbox = &vf->mbox.mbox_up;
+ mdev = &mbox->dev[0];
+ otx2_sync_mbox_bbuf(mbox, 0);
+
+ hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
+ if (hdr->num_msgs)
+ queue_work(vf->mbox_wq, &vf->mbox.mbox_up_wrk);
+
+ trace_otx2_msg_interrupt(mbox->pdev, "UP message from PF to VF",
+ BIT_ULL(0));
}
return IRQ_HANDLED;
@@ -760,8 +775,8 @@ static void otx2vf_remove(struct pci_dev *pdev)
otx2_mcam_flow_del(vf);
otx2_shutdown_tc(vf);
otx2_shutdown_qos(vf);
- otx2vf_disable_mbox_intr(vf);
otx2_detach_resources(&vf->mbox);
+ otx2vf_disable_mbox_intr(vf);
free_percpu(vf->hw.lmt_info);
if (test_bit(CN10K_LMTST, &vf->hw.cap_flag))
qmem_free(vf->dev, vf->dync_lmt);