diff --git a/providers/hns/hns_roce_u.c b/providers/hns/hns_roce_u.c index 0b254fb36..69f7d3f1d 100644 --- a/providers/hns/hns_roce_u.c +++ b/providers/hns/hns_roce_u.c @@ -97,50 +97,33 @@ static uint32_t calc_table_shift(uint32_t entry_count, uint32_t size_shift) return count_shift > size_shift ? count_shift - size_shift : 0; } -static struct verbs_context *hns_roce_alloc_context(struct ibv_device *ibdev, - int cmd_fd, - void *private_data) +static int set_context_attr(struct hns_roce_device *hr_dev, + struct hns_roce_context *context, + struct hns_roce_alloc_ucontext_resp *resp) { - struct hns_roce_device *hr_dev = to_hr_dev(ibdev); - struct hns_roce_alloc_ucontext_resp resp = {}; - struct hns_roce_alloc_ucontext cmd = {}; struct ibv_device_attr dev_attrs; - struct hns_roce_context *context; int i; - context = verbs_init_and_alloc_context(ibdev, cmd_fd, context, ibv_ctx, - RDMA_DRIVER_HNS); - if (!context) - return NULL; - - cmd.config |= HNS_ROCE_EXSGE_FLAGS | HNS_ROCE_RQ_INLINE_FLAGS | - HNS_ROCE_CQE_INLINE_FLAGS; - if (ibv_cmd_get_context(&context->ibv_ctx, &cmd.ibv_cmd, sizeof(cmd), - &resp.ibv_resp, sizeof(resp))) - goto err_free; - - if (!resp.cqe_size) + if (!resp->cqe_size) context->cqe_size = HNS_ROCE_CQE_SIZE; - else if (resp.cqe_size <= HNS_ROCE_V3_CQE_SIZE) - context->cqe_size = resp.cqe_size; + else if (resp->cqe_size <= HNS_ROCE_V3_CQE_SIZE) + context->cqe_size = resp->cqe_size; else context->cqe_size = HNS_ROCE_V3_CQE_SIZE; - context->config = resp.config; - if (resp.config & HNS_ROCE_RSP_EXSGE_FLAGS) - context->max_inline_data = resp.max_inline_data; + context->config = resp->config; + if (resp->config & HNS_ROCE_RSP_EXSGE_FLAGS) + context->max_inline_data = resp->max_inline_data; - context->qp_table_shift = calc_table_shift(resp.qp_tab_size, + context->qp_table_shift = calc_table_shift(resp->qp_tab_size, HNS_ROCE_QP_TABLE_BITS); context->qp_table_mask = (1 << context->qp_table_shift) - 1; - pthread_mutex_init(&context->qp_table_mutex, NULL); for (i = 0; i < HNS_ROCE_QP_TABLE_SIZE; ++i) context->qp_table[i].refcnt = 0; - context->srq_table_shift = calc_table_shift(resp.srq_tab_size, + context->srq_table_shift = calc_table_shift(resp->srq_tab_size, HNS_ROCE_SRQ_TABLE_BITS); context->srq_table_mask = (1 << context->srq_table_shift) - 1; - pthread_mutex_init(&context->srq_table_mutex, NULL); for (i = 0; i < HNS_ROCE_SRQ_TABLE_SIZE; ++i) context->srq_table[i].refcnt = 0; @@ -149,7 +132,7 @@ static struct verbs_context *hns_roce_alloc_context(struct ibv_device *ibdev, struct ibv_device_attr_ex, orig_attr), sizeof(dev_attrs))) - goto err_free; + return EIO; hr_dev->hw_version = dev_attrs.hw_ver; context->max_qp_wr = dev_attrs.max_qp_wr; @@ -158,11 +141,39 @@ static struct verbs_context *hns_roce_alloc_context(struct ibv_device *ibdev, context->max_srq_wr = dev_attrs.max_srq_wr; context->max_srq_sge = dev_attrs.max_srq_sge; + return 0; +} + +static struct verbs_context *hns_roce_alloc_context(struct ibv_device *ibdev, + int cmd_fd, + void *private_data) +{ + struct hns_roce_device *hr_dev = to_hr_dev(ibdev); + struct hns_roce_alloc_ucontext_resp resp = {}; + struct hns_roce_alloc_ucontext cmd = {}; + struct hns_roce_context *context; + + context = verbs_init_and_alloc_context(ibdev, cmd_fd, context, ibv_ctx, + RDMA_DRIVER_HNS); + if (!context) + return NULL; + + cmd.config |= HNS_ROCE_EXSGE_FLAGS | HNS_ROCE_RQ_INLINE_FLAGS | + HNS_ROCE_CQE_INLINE_FLAGS; + if (ibv_cmd_get_context(&context->ibv_ctx, &cmd.ibv_cmd, sizeof(cmd), + &resp.ibv_resp, sizeof(resp))) + goto err_free; + + if (set_context_attr(hr_dev, context, &resp)) + goto err_free; + context->uar = mmap(NULL, hr_dev->page_size, PROT_READ | PROT_WRITE, MAP_SHARED, cmd_fd, 0); if (context->uar == MAP_FAILED) goto err_free; + pthread_mutex_init(&context->qp_table_mutex, NULL); + pthread_mutex_init(&context->srq_table_mutex, NULL); pthread_spin_init(&context->uar_lock, PTHREAD_PROCESS_PRIVATE); verbs_set_ops(&context->ibv_ctx, &hns_common_ops);