diff options
author | Roland Dreier <rolandd@cisco.com> | 2007-04-10 23:16:59 -0700 |
---|---|---|
committer | Roland Dreier <rolandd@cisco.com> | 2007-04-10 23:16:59 -0700 |
commit | 396d1788b8b72f018c4445961e4e7985e085d4d0 (patch) | |
tree | 9203fbb0ff5a1e09bf39524efed1f1b5669266b7 | |
parent | 8bf98ba7ef38c057a89c806140a24dacfec4bdc5 (diff) | |
download | libmlx4-396d1788b8b72f018c4445961e4e7985e085d4d0.tar.gz |
Multiple SRQ fixes
Several one-liner fixes to SRQ support:
- Scatter entry address is 64 bits, so use htonll() instead of
htonl() when filling in WQE.
- Minimum SRQ WQE size is 32 bytes, so use 5 as a minimum value of
wqe_shift.
- When initializing next_wqe_index values, use htons() to put indices
into big-endian byte order.
Signed-off-by: Roland Dreier <rolandd@cisco.com>
-rw-r--r-- | src/srq.c | 7 |
1 files changed, 3 insertions, 4 deletions
@@ -90,7 +90,7 @@ int mlx4_post_srq_recv(struct ibv_srq *ibsrq, for (i = 0; i < wr->num_sge; ++i) { scat[i].byte_count = htonl(wr->sg_list[i].length); scat[i].lkey = htonl(wr->sg_list[i].lkey); - scat[i].addr = htonl(wr->sg_list[i].addr); + scat[i].addr = htonll(wr->sg_list[i].addr); } if (i < srq->max_gs) { @@ -132,7 +132,7 @@ int mlx4_alloc_srq_buf(struct ibv_pd *pd, struct ibv_srq_attr *attr, size = sizeof (struct mlx4_wqe_srq_next_seg) + srq->max_gs * sizeof (struct mlx4_wqe_data_seg); - for (srq->wqe_shift = 6; 1 << srq->wqe_shift < size; ++srq->wqe_shift) + for (srq->wqe_shift = 5; 1 << srq->wqe_shift < size; ++srq->wqe_shift) ; /* nothing */ buf_size = srq->max << srq->wqe_shift; @@ -152,8 +152,7 @@ int mlx4_alloc_srq_buf(struct ibv_pd *pd, struct ibv_srq_attr *attr, for (i = 0; i < srq->max; ++i) { next = get_wqe(srq, i); - - next->next_wqe_index = (i + 1) & (srq->max - 1); + next->next_wqe_index = htons((i + 1) & (srq->max - 1)); } srq->head = 0; |