Skip to content

Commit

Permalink
providers/irdma: Allow accurate reporting on QP max send/recv WR
Browse files Browse the repository at this point in the history
Currently the attribute cap.max_send_wr and cap.max_recv_wr sent from
user-space during create QP are the provider computed SQ/RQ depth as
opposed to raw values passed from application. This inhibits computation
of an accurate value for max_send_wr and max_recv_wr for this QP in the
kernel which matches the value returned in user create QP. Also these
capabilities need to be reported from the driver in query QP.

Add support by extending the ABI to allow the raw cap.max_send_wr and
cap.max_recv_wr to be passed from user-space, while keeping
compatibility for the older scheme.

Add new helpers to assist with this: irdma_uk_calc_depth_shift_sq,
irdma_uk_calc_depth_shift_rq.

Signed-off-by: Shiraz, Saleem <[email protected]>
Signed-off-by: Sagar, Youvaraj <[email protected]>
Signed-off-by: Tatyana Nikolova <[email protected]>
Signed-off-by: Leon Romanovsky <[email protected]>
  • Loading branch information
shirazsaleem authored and rleon committed Sep 18, 2023
1 parent 08d6e71 commit 6711023
Show file tree
Hide file tree
Showing 5 changed files with 97 additions and 56 deletions.
64 changes: 50 additions & 14 deletions providers/irdma/uk.c
Original file line number Diff line number Diff line change
Expand Up @@ -1450,6 +1450,53 @@ static void irdma_setup_connection_wqes(struct irdma_qp_uk *qp,
IRDMA_RING_MOVE_HEAD_BY_COUNT_NOCHECK(qp->initial_ring, move_cnt);
}

/**
* irdma_uk_calc_depth_shift_sq - calculate depth and shift for SQ size.
* @ukinfo: qp initialization info
* @sq_depth: Returns depth of SQ
* @sq_shift: Returns shift of SQ
*/
int irdma_uk_calc_depth_shift_sq(struct irdma_qp_uk_init_info *ukinfo,
__u32 *sq_depth, __u8 *sq_shift)
{
bool imm_support = ukinfo->uk_attrs->hw_rev >= IRDMA_GEN_2 ? true : false;
int status;

irdma_get_wqe_shift(ukinfo->uk_attrs,
imm_support ? ukinfo->max_sq_frag_cnt + 1 :
ukinfo->max_sq_frag_cnt,
ukinfo->max_inline_data, sq_shift);
status = irdma_get_sqdepth(ukinfo->uk_attrs, ukinfo->sq_size,
*sq_shift, sq_depth);

return status;
}

/**
* irdma_uk_calc_depth_shift_rq - calculate depth and shift for RQ size.
* @ukinfo: qp initialization info
* @rq_depth: Returns depth of RQ
* @rq_shift: Returns shift of RQ
*/
int irdma_uk_calc_depth_shift_rq(struct irdma_qp_uk_init_info *ukinfo,
__u32 *rq_depth, __u8 *rq_shift)
{
int status;

irdma_get_wqe_shift(ukinfo->uk_attrs, ukinfo->max_rq_frag_cnt, 0,
rq_shift);

if (ukinfo->uk_attrs->hw_rev == IRDMA_GEN_1) {
if (ukinfo->abi_ver > 4)
*rq_shift = IRDMA_MAX_RQ_WQE_SHIFT_GEN1;
}

status = irdma_get_rqdepth(ukinfo->uk_attrs, ukinfo->rq_size,
*rq_shift, rq_depth);

return status;
}

/**
* irdma_uk_qp_init - initialize shared qp
* @qp: hw qp (user and kernel)
Expand All @@ -1465,23 +1512,12 @@ enum irdma_status_code irdma_uk_qp_init(struct irdma_qp_uk *qp,
{
enum irdma_status_code ret_code = 0;
__u32 sq_ring_size;
__u8 sqshift, rqshift;

qp->uk_attrs = info->uk_attrs;
if (info->max_sq_frag_cnt > qp->uk_attrs->max_hw_wq_frags ||
info->max_rq_frag_cnt > qp->uk_attrs->max_hw_wq_frags)
return IRDMA_ERR_INVALID_FRAG_COUNT;

irdma_get_wqe_shift(qp->uk_attrs, info->max_rq_frag_cnt, 0, &rqshift);
if (qp->uk_attrs->hw_rev == IRDMA_GEN_1) {
irdma_get_wqe_shift(qp->uk_attrs, info->max_sq_frag_cnt,
info->max_inline_data, &sqshift);
if (info->abi_ver > 4)
rqshift = IRDMA_MAX_RQ_WQE_SHIFT_GEN1;
} else {
irdma_get_wqe_shift(qp->uk_attrs, info->max_sq_frag_cnt + 1,
info->max_inline_data, &sqshift);
}
qp->qp_caps = info->qp_caps;
qp->sq_base = info->sq;
qp->rq_base = info->rq;
Expand All @@ -1495,7 +1531,7 @@ enum irdma_status_code irdma_uk_qp_init(struct irdma_qp_uk *qp,
qp->sq_size = info->sq_size;
qp->push_mode = false;
qp->max_sq_frag_cnt = info->max_sq_frag_cnt;
sq_ring_size = qp->sq_size << sqshift;
sq_ring_size = qp->sq_size << info->sq_shift;
IRDMA_RING_INIT(qp->sq_ring, sq_ring_size);
IRDMA_RING_INIT(qp->initial_ring, sq_ring_size);
if (info->first_sq_wq) {
Expand All @@ -1510,9 +1546,9 @@ enum irdma_status_code irdma_uk_qp_init(struct irdma_qp_uk *qp,
qp->rq_size = info->rq_size;
qp->max_rq_frag_cnt = info->max_rq_frag_cnt;
qp->max_inline_data = info->max_inline_data;
qp->rq_wqe_size = rqshift;
qp->rq_wqe_size = info->rq_shift;
IRDMA_RING_INIT(qp->rq_ring, qp->rq_size);
qp->rq_wqe_size_multiplier = 1 << rqshift;
qp->rq_wqe_size_multiplier = 1 << info->rq_shift;
if (qp->uk_attrs->hw_rev == IRDMA_GEN_1)
qp->wqe_ops = iw_wqe_uk_ops_gen_1;
else
Expand Down
5 changes: 4 additions & 1 deletion providers/irdma/umain.c
Original file line number Diff line number Diff line change
Expand Up @@ -141,7 +141,7 @@ static struct verbs_context *irdma_ualloc_context(struct ibv_device *ibdev,
{
struct ibv_pd *ibv_pd;
struct irdma_uvcontext *iwvctx;
struct irdma_get_context cmd;
struct irdma_get_context cmd = {};
struct irdma_get_context_resp resp = {};
__u64 mmap_key;
__u8 user_ver = IRDMA_ABI_VER;
Expand All @@ -151,6 +151,7 @@ static struct verbs_context *irdma_ualloc_context(struct ibv_device *ibdev,
if (!iwvctx)
return NULL;

cmd.comp_mask |= IRDMA_ALLOC_UCTX_USE_RAW_ATTR;
cmd.userspace_ver = user_ver;
if (ibv_cmd_get_context(&iwvctx->ibv_ctx,
(struct ibv_get_context *)&cmd, sizeof(cmd),
Expand Down Expand Up @@ -183,6 +184,8 @@ static struct verbs_context *irdma_ualloc_context(struct ibv_device *ibdev,
iwvctx->uk_attrs.max_hw_cq_size = resp.max_hw_cq_size;
iwvctx->uk_attrs.min_hw_cq_size = resp.min_hw_cq_size;
iwvctx->abi_ver = user_ver;
if (resp.comp_mask & IRDMA_ALLOC_UCTX_USE_RAW_ATTR)
iwvctx->use_raw_attrs = true;
mmap_key = resp.db_mmap_key;
}

Expand Down
3 changes: 2 additions & 1 deletion providers/irdma/umain.h
Original file line number Diff line number Diff line change
Expand Up @@ -56,7 +56,8 @@ struct irdma_uvcontext {
struct irdma_uk_attrs uk_attrs;
void *db;
int abi_ver;
bool legacy_mode;
bool legacy_mode:1;
bool use_raw_attrs:1;
};

struct irdma_uqp;
Expand Down
9 changes: 9 additions & 0 deletions providers/irdma/user.h
Original file line number Diff line number Diff line change
Expand Up @@ -311,6 +311,11 @@ enum irdma_status_code irdma_uk_cq_init(struct irdma_cq_uk *cq,
struct irdma_cq_uk_init_info *info);
enum irdma_status_code irdma_uk_qp_init(struct irdma_qp_uk *qp,
struct irdma_qp_uk_init_info *info);
int irdma_uk_calc_depth_shift_sq(struct irdma_qp_uk_init_info *ukinfo,
__u32 *sq_depth, __u8 *sq_shift);
int irdma_uk_calc_depth_shift_rq(struct irdma_qp_uk_init_info *ukinfo,
__u32 *rq_depth, __u8 *rq_shift);

struct irdma_sq_uk_wr_trk_info {
__u64 wrid;
__u32 wr_len;
Expand Down Expand Up @@ -391,8 +396,12 @@ struct irdma_qp_uk_init_info {
__u32 max_sq_frag_cnt;
__u32 max_rq_frag_cnt;
__u32 max_inline_data;
__u32 sq_depth;
__u32 rq_depth;
__u8 first_sq_wq;
__u8 type;
__u8 sq_shift;
__u8 rq_shift;
int abi_ver;
bool legacy_mode;
};
Expand Down
72 changes: 32 additions & 40 deletions providers/irdma/uverbs.c
Original file line number Diff line number Diff line change
Expand Up @@ -1228,14 +1228,12 @@ static int irdma_destroy_vmapped_qp(struct irdma_uqp *iwuqp)
* @pd: pd for the qp
* @attr: attributes of qp passed
* @resp: response back from create qp
* @sqdepth: depth of sq
* @rqdepth: depth of rq
* @info: info for initializing user level qp
* @abi_ver: abi version of the create qp command
*/
static int irdma_vmapped_qp(struct irdma_uqp *iwuqp, struct ibv_pd *pd,
struct ibv_qp_init_attr *attr, int sqdepth,
int rqdepth, struct irdma_qp_uk_init_info *info,
struct ibv_qp_init_attr *attr,
struct irdma_qp_uk_init_info *info,
bool legacy_mode)
{
struct irdma_ucreate_qp cmd = {};
Expand All @@ -1245,8 +1243,8 @@ static int irdma_vmapped_qp(struct irdma_uqp *iwuqp, struct ibv_pd *pd,
struct ib_uverbs_reg_mr_resp reg_mr_resp = {};
int ret;

sqsize = roundup(sqdepth * IRDMA_QP_WQE_MIN_SIZE, IRDMA_HW_PAGE_SIZE);
rqsize = roundup(rqdepth * IRDMA_QP_WQE_MIN_SIZE, IRDMA_HW_PAGE_SIZE);
sqsize = roundup(info->sq_depth * IRDMA_QP_WQE_MIN_SIZE, IRDMA_HW_PAGE_SIZE);
rqsize = roundup(info->rq_depth * IRDMA_QP_WQE_MIN_SIZE, IRDMA_HW_PAGE_SIZE);
totalqpsize = rqsize + sqsize + IRDMA_DB_SHADOW_AREA_SIZE;
info->sq = irdma_alloc_hw_buf(totalqpsize);
iwuqp->buf_size = totalqpsize;
Expand Down Expand Up @@ -1313,8 +1311,6 @@ struct ibv_qp *irdma_ucreate_qp(struct ibv_pd *pd,
struct irdma_uk_attrs *uk_attrs;
struct irdma_uvcontext *iwvctx;
struct irdma_uqp *iwuqp;
__u32 sqdepth, rqdepth;
__u8 sqshift, rqshift;
int status;

if (attr->qp_type != IBV_QPT_RC && attr->qp_type != IBV_QPT_UD) {
Expand All @@ -1335,27 +1331,23 @@ struct ibv_qp *irdma_ucreate_qp(struct ibv_pd *pd,
return NULL;
}

irdma_get_wqe_shift(uk_attrs,
uk_attrs->hw_rev > IRDMA_GEN_1 ? attr->cap.max_send_sge + 1 :
attr->cap.max_send_sge,
attr->cap.max_inline_data, &sqshift);
status = irdma_get_sqdepth(uk_attrs, attr->cap.max_send_wr, sqshift,
&sqdepth);
info.uk_attrs = uk_attrs;
info.sq_size = attr->cap.max_send_wr;
info.rq_size = attr->cap.max_recv_wr;
info.max_sq_frag_cnt = attr->cap.max_send_sge;
info.max_rq_frag_cnt = attr->cap.max_recv_sge;
info.max_inline_data = attr->cap.max_inline_data;
info.abi_ver = iwvctx->abi_ver;

status = irdma_uk_calc_depth_shift_sq(&info, &info.sq_depth, &info.sq_shift);
if (status) {
errno = EINVAL;
errno = status;
return NULL;
}

if (uk_attrs->hw_rev == IRDMA_GEN_1 && iwvctx->abi_ver > 4)
rqshift = IRDMA_MAX_RQ_WQE_SHIFT_GEN1;
else
irdma_get_wqe_shift(uk_attrs, attr->cap.max_recv_sge, 0,
&rqshift);

status = irdma_get_rqdepth(uk_attrs, attr->cap.max_recv_wr, rqshift,
&rqdepth);
status = irdma_uk_calc_depth_shift_rq(&info, &info.rq_depth, &info.rq_shift);
if (status) {
errno = EINVAL;
errno = status;
return NULL;
}

Expand All @@ -1368,32 +1360,34 @@ struct ibv_qp *irdma_ucreate_qp(struct ibv_pd *pd,
if (pthread_spin_init(&iwuqp->lock, PTHREAD_PROCESS_PRIVATE))
goto err_free_qp;

info.sq_size = sqdepth >> sqshift;
info.rq_size = rqdepth >> rqshift;
attr->cap.max_send_wr = info.sq_size;
attr->cap.max_recv_wr = info.rq_size;
info.sq_size = info.sq_depth >> info.sq_shift;
info.rq_size = info.rq_depth >> info.rq_shift;
/**
* Maintain backward compatibility with older ABI which pass sq
* and rq depth (in quanta) in cap.max_send_wr a cap.max_recv_wr
*/
if (!iwvctx->use_raw_attrs) {
attr->cap.max_send_wr = info.sq_size;
attr->cap.max_recv_wr = info.rq_size;
}

info.uk_attrs = uk_attrs;
info.max_sq_frag_cnt = attr->cap.max_send_sge;
info.max_rq_frag_cnt = attr->cap.max_recv_sge;
iwuqp->recv_sges = calloc(attr->cap.max_recv_sge, sizeof(*iwuqp->recv_sges));
if (!iwuqp->recv_sges)
goto err_destroy_lock;

info.wqe_alloc_db = (__u32 *)iwvctx->db;
info.abi_ver = iwvctx->abi_ver;
info.legacy_mode = iwvctx->legacy_mode;
info.sq_wrtrk_array = calloc(sqdepth, sizeof(*info.sq_wrtrk_array));
info.sq_wrtrk_array = calloc(info.sq_depth, sizeof(*info.sq_wrtrk_array));
if (!info.sq_wrtrk_array)
goto err_free_rsges;

info.rq_wrid_array = calloc(rqdepth, sizeof(*info.rq_wrid_array));
info.rq_wrid_array = calloc(info.rq_depth, sizeof(*info.rq_wrid_array));
if (!info.rq_wrid_array)
goto err_free_sq_wrtrk;

iwuqp->sq_sig_all = attr->sq_sig_all;
iwuqp->qp_type = attr->qp_type;
status = irdma_vmapped_qp(iwuqp, pd, attr, sqdepth, rqdepth, &info, iwvctx->legacy_mode);
status = irdma_vmapped_qp(iwuqp, pd, attr, &info, iwvctx->legacy_mode);
if (status) {
errno = status;
goto err_free_rq_wrid;
Expand All @@ -1402,17 +1396,15 @@ struct ibv_qp *irdma_ucreate_qp(struct ibv_pd *pd,
iwuqp->qp.back_qp = iwuqp;
iwuqp->qp.lock = &iwuqp->lock;

info.max_sq_frag_cnt = attr->cap.max_send_sge;
info.max_rq_frag_cnt = attr->cap.max_recv_sge;
info.max_inline_data = attr->cap.max_inline_data;
status = irdma_uk_qp_init(&iwuqp->qp, &info);
if (status) {
errno = EINVAL;
goto err_free_vmap_qp;
}

attr->cap.max_send_wr = (sqdepth - IRDMA_SQ_RSVD) >> sqshift;
attr->cap.max_recv_wr = (rqdepth - IRDMA_RQ_RSVD) >> rqshift;
attr->cap.max_send_wr = (info.sq_depth - IRDMA_SQ_RSVD) >> info.sq_shift;
attr->cap.max_recv_wr = (info.rq_depth - IRDMA_RQ_RSVD) >> info.rq_shift;

return &iwuqp->ibv_qp;

err_free_vmap_qp:
Expand Down

0 comments on commit 6711023

Please sign in to comment.