aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorRoland Dreier <rolandd@cisco.com>2007-06-07 14:11:02 -0700
committerRoland Dreier <rolandd@cisco.com>2007-06-07 16:21:47 -0700
commite7d065192e10851ce6a6506dbc5227f4dbaf8fcd (patch)
treeaccecb050d58bcd53ec7c8d1e34f9f8afe190c3c
parent5de6edbaf6a34967d79f1d7082ad8371e79fb766 (diff)
downloadlibmlx4-e7d065192e10851ce6a6506dbc5227f4dbaf8fcd.tar.gz
Make sure RQ allocation is always valid
QPs attached to an SRQ must never have their own RQ, and QPs not attached to SRQs must have an RQ with at least 1 entry. Enforce all of this in set_rq_size(). Also simplify how we round up queue sizes. There's no need to pass the context into align_queue_size(), since that parameter is completely unused, and we don't really need two functions for rounding up to the next power of two. Signed-off-by: Roland Dreier <rolandd@cisco.com>
-rw-r--r--src/qp.c10
-rw-r--r--src/verbs.c38
2 files changed, 19 insertions, 29 deletions
diff --git a/src/qp.c b/src/qp.c
index 8e2a3d3..92edec6 100644
--- a/src/qp.c
+++ b/src/qp.c
@@ -399,10 +399,12 @@ int mlx4_alloc_qp_buf(struct ibv_pd *pd, struct ibv_qp_cap *cap,
if (!qp->sq.wrid)
return -1;
- qp->rq.wrid = malloc(qp->rq.max * sizeof (uint64_t));
- if (!qp->rq.wrid) {
- free(qp->sq.wrid);
- return -1;
+ if (qp->rq.max) {
+ qp->rq.wrid = malloc(qp->rq.max * sizeof (uint64_t));
+ if (!qp->rq.wrid) {
+ free(qp->sq.wrid);
+ return -1;
+ }
}
size = qp->rq.max_gs * sizeof (struct mlx4_wqe_data_seg);
diff --git a/src/verbs.c b/src/verbs.c
index 1feae9d..a3420cc 100644
--- a/src/verbs.c
+++ b/src/verbs.c
@@ -150,11 +150,11 @@ int mlx4_dereg_mr(struct ibv_mr *mr)
return 0;
}
-static int align_cq_size(int cqe)
+static int align_queue_size(int req)
{
int nent;
- for (nent = 1; nent <= cqe; nent <<= 1)
+ for (nent = 1; nent < req; nent <<= 1)
; /* nothing */
return nent;
@@ -182,7 +182,7 @@ struct ibv_cq *mlx4_create_cq(struct ibv_context *context, int cqe,
if (pthread_spin_init(&cq->lock, PTHREAD_PROCESS_PRIVATE))
goto err;
- cqe = align_cq_size(cqe);
+ cqe = align_queue_size(cqe + 1);
if (mlx4_alloc_buf(&cq->buf, cqe * MLX4_CQ_ENTRY_SIZE,
to_mdev(context->device)->page_size))
@@ -245,23 +245,6 @@ int mlx4_destroy_cq(struct ibv_cq *cq)
return 0;
}
-static int align_queue_size(struct ibv_context *context, int size, int spare)
-{
- int ret;
-
- /*
- * If someone asks for a 0-sized queue, presumably they're not
- * going to use it. So don't mess with their size.
- */
- if (!size)
- return 0;
-
- for (ret = 1; ret < size + spare; ret <<= 1)
- ; /* nothing */
-
- return ret;
-}
-
struct ibv_srq *mlx4_create_srq(struct ibv_pd *pd,
struct ibv_srq_init_attr *attr)
{
@@ -281,7 +264,7 @@ struct ibv_srq *mlx4_create_srq(struct ibv_pd *pd,
if (pthread_spin_init(&srq->lock, PTHREAD_PROCESS_PRIVATE))
goto err;
- srq->max = align_queue_size(pd->context, attr->attr.max_wr, 1);
+ srq->max = align_queue_size(attr->attr.max_wr + 1);
srq->max_gs = attr->attr.max_sge;
srq->counter = 0;
@@ -372,8 +355,11 @@ struct ibv_qp *mlx4_create_qp(struct ibv_pd *pd, struct ibv_qp_init_attr *attr)
if (!qp)
return NULL;
- qp->sq.max = align_queue_size(pd->context, attr->cap.max_send_wr, 0);
- qp->rq.max = align_queue_size(pd->context, attr->cap.max_recv_wr, 0);
+ qp->sq.max = align_queue_size(attr->cap.max_send_wr);
+ qp->rq.max = align_queue_size(attr->cap.max_recv_wr);
+
+ if (attr->srq)
+ attr->cap.max_recv_wr = qp->rq.max = 0;
if (mlx4_alloc_qp_buf(pd, &attr->cap, attr->qp_type, qp))
goto err;
@@ -434,7 +420,8 @@ err_rq_db:
err_free:
free(qp->sq.wrid);
- free(qp->rq.wrid);
+ if (qp->rq.max)
+ free(qp->rq.wrid);
mlx4_free_buf(&qp->buf);
err:
@@ -538,7 +525,8 @@ int mlx4_destroy_qp(struct ibv_qp *ibqp)
if (!ibqp->srq)
mlx4_free_db(to_mctx(ibqp->context), MLX4_DB_TYPE_RQ, qp->db);
free(qp->sq.wrid);
- free(qp->rq.wrid);
+ if (qp->rq.max)
+ free(qp->rq.wrid);
mlx4_free_buf(&qp->buf);
free(qp);