aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorOr Gerlitz <ogerlitz@mellanox.com>2011-12-11 16:42:22 +0200
committerRoland Dreier <roland@purestorage.com>2012-03-23 14:40:11 -0700
commit8145c59c81b8d829e26f98dcc007431a7590ba71 (patch)
treede20809032da94c456bac3a4137aa9e31f665ee4
parented13892a51d2ba852bf4fe7ac8b149c34134334e (diff)
downloadlibmlx4-8145c59c81b8d829e26f98dcc007431a7590ba71.tar.gz
Use Ethernet 802.1Q VLAN-related fields in CQE
When running IBoE traffic, the hardware places Ethernet fields into the CQE. This patch adds the ones related to VLANs. Signed-off-by: Or Gerlitz <ogerlitz@mellanox.com> Signed-off-by: Roland Dreier <roland@purestorage.com>
-rw-r--r--src/cq.c25
1 files changed, 14 insertions, 11 deletions
diff --git a/src/cq.c b/src/cq.c
index 0e24fea..873c1a2 100644
--- a/src/cq.c
+++ b/src/cq.c
@@ -61,6 +61,11 @@ enum {
#define MLX4_CQ_DB_REQ_NOT (2 << 24)
enum {
+ MLX4_CQE_VLAN_PRESENT_MASK = 1 << 29,
+ MLX4_CQE_QPN_MASK = 0xffffff,
+};
+
+enum {
MLX4_CQE_OWNER_MASK = 0x80,
MLX4_CQE_IS_SEND_MASK = 0x40,
MLX4_CQE_OPCODE_MASK = 0x1f
@@ -83,11 +88,10 @@ enum {
};
struct mlx4_cqe {
- uint32_t my_qpn;
+ uint32_t vlan_my_qpn;
uint32_t immed_rss_invalid;
uint32_t g_mlpath_rqpn;
- uint8_t sl;
- uint8_t reserved1;
+ uint16_t sl_vid;
uint16_t rlid;
uint32_t reserved2;
uint32_t byte_cnt;
@@ -98,7 +102,7 @@ struct mlx4_cqe {
};
struct mlx4_err_cqe {
- uint32_t my_qpn;
+ uint32_t vlan_my_qpn;
uint32_t reserved1[5];
uint16_t wqe_index;
uint8_t vendor_err;
@@ -136,7 +140,7 @@ static void mlx4_handle_error_cqe(struct mlx4_err_cqe *cqe, struct ibv_wc *wc)
printf(PFX "local QP operation err "
"(QPN %06x, WQE index %x, vendor syndrome %02x, "
"opcode = %02x)\n",
- htonl(cqe->my_qpn), htonl(cqe->wqe_index),
+ htonl(cqe->vlan_my_qpn), htonl(cqe->wqe_index),
cqe->vendor_err,
cqe->owner_sr_opcode & ~MLX4_CQE_OWNER_MASK);
@@ -215,21 +219,20 @@ static int mlx4_poll_one(struct mlx4_cq *cq,
*/
rmb();
- qpn = ntohl(cqe->my_qpn);
+ qpn = ntohl(cqe->vlan_my_qpn) & MLX4_CQE_QPN_MASK;
is_send = cqe->owner_sr_opcode & MLX4_CQE_IS_SEND_MASK;
is_error = (cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) ==
MLX4_CQE_OPCODE_ERROR;
if (!*cur_qp ||
- (ntohl(cqe->my_qpn) & 0xffffff) != (*cur_qp)->ibv_qp.qp_num) {
+ (qpn != (*cur_qp)->ibv_qp.qp_num)) {
/*
* We do not have to take the QP table lock here,
* because CQs will be locked while QPs are removed
* from the table.
*/
- *cur_qp = mlx4_find_qp(to_mctx(cq->ibv_cq.context),
- ntohl(cqe->my_qpn) & 0xffffff);
+ *cur_qp = mlx4_find_qp(to_mctx(cq->ibv_cq.context), qpn);
if (!*cur_qp)
return CQ_POLL_ERR;
}
@@ -314,7 +317,7 @@ static int mlx4_poll_one(struct mlx4_cq *cq,
}
wc->slid = ntohs(cqe->rlid);
- wc->sl = cqe->sl >> 4;
+ wc->sl = ntohs(cqe->sl_vid) >> 12;
g_mlpath_rqpn = ntohl(cqe->g_mlpath_rqpn);
wc->src_qp = g_mlpath_rqpn & 0xffffff;
wc->dlid_path_bits = (g_mlpath_rqpn >> 24) & 0x7f;
@@ -405,7 +408,7 @@ void __mlx4_cq_clean(struct mlx4_cq *cq, uint32_t qpn, struct mlx4_srq *srq)
*/
while ((int) --prod_index - (int) cq->cons_index >= 0) {
cqe = get_cqe(cq, prod_index & cq->ibv_cq.cqe);
- if ((ntohl(cqe->my_qpn) & 0xffffff) == qpn) {
+ if ((ntohl(cqe->vlan_my_qpn) & MLX4_CQE_QPN_MASK) == qpn) {
if (srq && !(cqe->owner_sr_opcode & MLX4_CQE_IS_SEND_MASK))
mlx4_free_srq_wqe(srq, ntohs(cqe->wqe_index));
++nfreed;