diff options
Diffstat (limited to 'drivers/infiniband/hw')
37 files changed, 728 insertions, 423 deletions
diff --git a/drivers/infiniband/hw/bnxt_re/bnxt_re.h b/drivers/infiniband/hw/bnxt_re/bnxt_re.h index e55a1666c0cd..c2805384e832 100644 --- a/drivers/infiniband/hw/bnxt_re/bnxt_re.h +++ b/drivers/infiniband/hw/bnxt_re/bnxt_re.h @@ -104,10 +104,19 @@ struct bnxt_re_sqp_entries { struct bnxt_re_qp *qp1_qp; }; +#define BNXT_RE_MAX_GSI_SQP_ENTRIES 1024 +struct bnxt_re_gsi_context { + struct bnxt_re_qp *gsi_qp; + struct bnxt_re_qp *gsi_sqp; + struct bnxt_re_ah *gsi_sah; + struct bnxt_re_sqp_entries *sqp_tbl; +}; + #define BNXT_RE_MIN_MSIX 2 #define BNXT_RE_MAX_MSIX 9 #define BNXT_RE_AEQ_IDX 0 #define BNXT_RE_NQ_IDX 1 +#define BNXT_RE_GEN_P5_MAX_VF 64 struct bnxt_re_dev { struct ib_device ibdev; @@ -164,10 +173,7 @@ struct bnxt_re_dev { u16 cosq[2]; /* QP for for handling QP1 packets */ - u32 sqp_id; - struct bnxt_re_qp *qp1_sqp; - struct bnxt_re_ah *sqp_ah; - struct bnxt_re_sqp_entries sqp_tbl[1024]; + struct bnxt_re_gsi_context gsi_ctx; atomic_t nq_alloc_cnt; u32 is_virtfn; u32 num_vfs; diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c index dd006b177b54..c9a7c03403ac 100644 --- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c +++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c @@ -330,7 +330,7 @@ int bnxt_re_del_gid(const struct ib_gid_attr *attr, void **context) */ if (ctx->idx == 0 && rdma_link_local_addr((struct in6_addr *)gid_to_del) && - ctx->refcnt == 1 && rdev->qp1_sqp) { + ctx->refcnt == 1 && rdev->gsi_ctx.gsi_sqp) { dev_dbg(rdev_to_dev(rdev), "Trying to delete GID0 while QP1 is alive\n"); return -EFAULT; @@ -760,6 +760,49 @@ void bnxt_re_unlock_cqs(struct bnxt_re_qp *qp, spin_unlock_irqrestore(&qp->scq->cq_lock, flags); } +static int bnxt_re_destroy_gsi_sqp(struct bnxt_re_qp *qp) +{ + struct bnxt_re_qp *gsi_sqp; + struct bnxt_re_ah *gsi_sah; + struct bnxt_re_dev *rdev; + int rc = 0; + + rdev = qp->rdev; + gsi_sqp = rdev->gsi_ctx.gsi_sqp; + gsi_sah = rdev->gsi_ctx.gsi_sah; + + dev_dbg(rdev_to_dev(rdev), "Destroy the shadow AH\n"); + bnxt_qplib_destroy_ah(&rdev->qplib_res, + &gsi_sah->qplib_ah, + true); + bnxt_qplib_clean_qp(&qp->qplib_qp); + + dev_dbg(rdev_to_dev(rdev), "Destroy the shadow QP\n"); + rc = bnxt_qplib_destroy_qp(&rdev->qplib_res, &gsi_sqp->qplib_qp); + if (rc) { + dev_err(rdev_to_dev(rdev), "Destroy Shadow QP failed"); + goto fail; + } + bnxt_qplib_free_qp_res(&rdev->qplib_res, &gsi_sqp->qplib_qp); + + /* remove from active qp list */ + mutex_lock(&rdev->qp_lock); + list_del(&gsi_sqp->list); + mutex_unlock(&rdev->qp_lock); + atomic_dec(&rdev->qp_count); + + kfree(rdev->gsi_ctx.sqp_tbl); + kfree(gsi_sah); + kfree(gsi_sqp); + rdev->gsi_ctx.gsi_sqp = NULL; + rdev->gsi_ctx.gsi_sah = NULL; + rdev->gsi_ctx.sqp_tbl = NULL; + + return 0; +fail: + return rc; +} + /* Queue Pairs */ int bnxt_re_destroy_qp(struct ib_qp *ib_qp, struct ib_udata *udata) { @@ -769,6 +812,7 @@ int bnxt_re_destroy_qp(struct ib_qp *ib_qp, struct ib_udata *udata) int rc; bnxt_qplib_flush_cqn_wq(&qp->qplib_qp); + rc = bnxt_qplib_destroy_qp(&rdev->qplib_res, &qp->qplib_qp); if (rc) { dev_err(rdev_to_dev(rdev), "Failed to destroy HW QP"); @@ -783,40 +827,24 @@ int bnxt_re_destroy_qp(struct ib_qp *ib_qp, struct ib_udata *udata) bnxt_qplib_free_qp_res(&rdev->qplib_res, &qp->qplib_qp); - if (ib_qp->qp_type == IB_QPT_GSI && rdev->qp1_sqp) { - bnxt_qplib_destroy_ah(&rdev->qplib_res, &rdev->sqp_ah->qplib_ah, - false); - - bnxt_qplib_clean_qp(&qp->qplib_qp); - rc = bnxt_qplib_destroy_qp(&rdev->qplib_res, - &rdev->qp1_sqp->qplib_qp); - if (rc) { - dev_err(rdev_to_dev(rdev), - "Failed to destroy Shadow QP"); - return rc; - } - bnxt_qplib_free_qp_res(&rdev->qplib_res, - &rdev->qp1_sqp->qplib_qp); - mutex_lock(&rdev->qp_lock); - list_del(&rdev->qp1_sqp->list); - atomic_dec(&rdev->qp_count); - mutex_unlock(&rdev->qp_lock); - - kfree(rdev->sqp_ah); - kfree(rdev->qp1_sqp); - rdev->qp1_sqp = NULL; - rdev->sqp_ah = NULL; + if (ib_qp->qp_type == IB_QPT_GSI && rdev->gsi_ctx.gsi_sqp) { + rc = bnxt_re_destroy_gsi_sqp(qp); + if (rc) + goto sh_fail; } - ib_umem_release(qp->rumem); - ib_umem_release(qp->sumem); - mutex_lock(&rdev->qp_lock); list_del(&qp->list); - atomic_dec(&rdev->qp_count); mutex_unlock(&rdev->qp_lock); + atomic_dec(&rdev->qp_count); + + ib_umem_release(qp->rumem); + ib_umem_release(qp->sumem); + kfree(qp); return 0; +sh_fail: + return rc; } static u8 __from_ib_qp_type(enum ib_qp_type type) @@ -984,8 +1012,6 @@ static struct bnxt_re_qp *bnxt_re_create_shadow_qp if (rc) goto fail; - rdev->sqp_id = qp->qplib_qp.id; - spin_lock_init(&qp->sq_lock); INIT_LIST_HEAD(&qp->list); mutex_lock(&rdev->qp_lock); @@ -998,205 +1024,375 @@ fail: return NULL; } -struct ib_qp *bnxt_re_create_qp(struct ib_pd *ib_pd, - struct ib_qp_init_attr *qp_init_attr, - struct ib_udata *udata) +static int bnxt_re_init_rq_attr(struct bnxt_re_qp *qp, + struct ib_qp_init_attr *init_attr) { - struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd); - struct bnxt_re_dev *rdev = pd->rdev; - struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr; - struct bnxt_re_qp *qp; - struct bnxt_re_cq *cq; - struct bnxt_re_srq *srq; - int rc, entries; + struct bnxt_qplib_dev_attr *dev_attr; + struct bnxt_qplib_qp *qplqp; + struct bnxt_re_dev *rdev; + int entries; - if ((qp_init_attr->cap.max_send_wr > dev_attr->max_qp_wqes) || - (qp_init_attr->cap.max_recv_wr > dev_attr->max_qp_wqes) || - (qp_init_attr->cap.max_send_sge > dev_attr->max_qp_sges) || - (qp_init_attr->cap.max_recv_sge > dev_attr->max_qp_sges) || - (qp_init_attr->cap.max_inline_data > dev_attr->max_inline_data)) - return ERR_PTR(-EINVAL); + rdev = qp->rdev; + qplqp = &qp->qplib_qp; + dev_attr = &rdev->dev_attr; - qp = kzalloc(sizeof(*qp), GFP_KERNEL); - if (!qp) - return ERR_PTR(-ENOMEM); + if (init_attr->srq) { + struct bnxt_re_srq *srq; - qp->rdev = rdev; - ether_addr_copy(qp->qplib_qp.smac, rdev->netdev->dev_addr); - qp->qplib_qp.pd = &pd->qplib_pd; - qp->qplib_qp.qp_handle = (u64)(unsigned long)(&qp->qplib_qp); - qp->qplib_qp.type = __from_ib_qp_type(qp_init_attr->qp_type); + srq = container_of(init_attr->srq, struct bnxt_re_srq, ib_srq); + if (!srq) { + dev_err(rdev_to_dev(rdev), "SRQ not found"); + return -EINVAL; + } + qplqp->srq = &srq->qplib_srq; + qplqp->rq.max_wqe = 0; + } else { + /* Allocate 1 more than what's provided so posting max doesn't + * mean empty. + */ + entries = roundup_pow_of_two(init_attr->cap.max_recv_wr + 1); + qplqp->rq.max_wqe = min_t(u32, entries, + dev_attr->max_qp_wqes + 1); - if (qp_init_attr->qp_type == IB_QPT_GSI && - bnxt_qplib_is_chip_gen_p5(&rdev->chip_ctx)) - qp->qplib_qp.type = CMDQ_CREATE_QP_TYPE_GSI; - if (qp->qplib_qp.type == IB_QPT_MAX) { + qplqp->rq.q_full_delta = qplqp->rq.max_wqe - + init_attr->cap.max_recv_wr; + qplqp->rq.max_sge = init_attr->cap.max_recv_sge; + if (qplqp->rq.max_sge > dev_attr->max_qp_sges) + qplqp->rq.max_sge = dev_attr->max_qp_sges; + } + + return 0; +} + +static void bnxt_re_adjust_gsi_rq_attr(struct bnxt_re_qp *qp) +{ + struct bnxt_qplib_dev_attr *dev_attr; + struct bnxt_qplib_qp *qplqp; + struct bnxt_re_dev *rdev; + + rdev = qp->rdev; + qplqp = &qp->qplib_qp; + dev_attr = &rdev->dev_attr; + + qplqp->rq.max_sge = dev_attr->max_qp_sges; + if (qplqp->rq.max_sge > dev_attr->max_qp_sges) + qplqp->rq.max_sge = dev_attr->max_qp_sges; +} + +static void bnxt_re_init_sq_attr(struct bnxt_re_qp *qp, + struct ib_qp_init_attr *init_attr, + struct ib_udata *udata) +{ + struct bnxt_qplib_dev_attr *dev_attr; + struct bnxt_qplib_qp *qplqp; + struct bnxt_re_dev *rdev; + int entries; + + rdev = qp->rdev; + qplqp = &qp->qplib_qp; + dev_attr = &rdev->dev_attr; + + qplqp->sq.max_sge = init_attr->cap.max_send_sge; + if (qplqp->sq.max_sge > dev_attr->max_qp_sges) + qplqp->sq.max_sge = dev_attr->max_qp_sges; + /* + * Change the SQ depth if user has requested minimum using + * configfs. Only supported for kernel consumers + */ + entries = init_attr->cap.max_send_wr; + /* Allocate 128 + 1 more than what's provided */ + entries = roundup_pow_of_two(entries + BNXT_QPLIB_RESERVED_QP_WRS + 1); + qplqp->sq.max_wqe = min_t(u32, entries, dev_attr->max_qp_wqes + + BNXT_QPLIB_RESERVED_QP_WRS + 1); + qplqp->sq.q_full_delta = BNXT_QPLIB_RESERVED_QP_WRS + 1; + /* + * Reserving one slot for Phantom WQE. Application can + * post one extra entry in this case. But allowing this to avoid + * unexpected Queue full condition + */ + qplqp->sq.q_full_delta -= 1; +} + +static void bnxt_re_adjust_gsi_sq_attr(struct bnxt_re_qp *qp, + struct ib_qp_init_attr *init_attr) +{ + struct bnxt_qplib_dev_attr *dev_attr; + struct bnxt_qplib_qp *qplqp; + struct bnxt_re_dev *rdev; + int entries; + + rdev = qp->rdev; + qplqp = &qp->qplib_qp; + dev_attr = &rdev->dev_attr; + + entries = roundup_pow_of_two(init_attr->cap.max_send_wr + 1); + qplqp->sq.max_wqe = min_t(u32, entries, dev_attr->max_qp_wqes + 1); + qplqp->sq.q_full_delta = qplqp->sq.max_wqe - + init_attr->cap.max_send_wr; + qplqp->sq.max_sge++; /* Need one extra sge to put UD header */ + if (qplqp->sq.max_sge > dev_attr->max_qp_sges) + qplqp->sq.max_sge = dev_attr->max_qp_sges; +} + +static int bnxt_re_init_qp_type(struct bnxt_re_dev *rdev, + struct ib_qp_init_attr *init_attr) +{ + struct bnxt_qplib_chip_ctx *chip_ctx; + int qptype; + + chip_ctx = &rdev->chip_ctx; + + qptype = __from_ib_qp_type(init_attr->qp_type); + if (qptype == IB_QPT_MAX) { dev_err(rdev_to_dev(rdev), "QP type 0x%x not supported", - qp->qplib_qp.type); - rc = -EINVAL; - goto fail; + qptype); + qptype = -EINVAL; + goto out; } - qp->qplib_qp.max_inline_data = qp_init_attr->cap.max_inline_data; - qp->qplib_qp.sig_type = ((qp_init_attr->sq_sig_type == - IB_SIGNAL_ALL_WR) ? true : false); + if (bnxt_qplib_is_chip_gen_p5(chip_ctx) && + init_attr->qp_type == IB_QPT_GSI) + qptype = CMDQ_CREATE_QP_TYPE_GSI; +out: + return qptype; +} - qp->qplib_qp.sq.max_sge = qp_init_attr->cap.max_send_sge; - if (qp->qplib_qp.sq.max_sge > dev_attr->max_qp_sges) - qp->qplib_qp.sq.max_sge = dev_attr->max_qp_sges; +static int bnxt_re_init_qp_attr(struct bnxt_re_qp *qp, struct bnxt_re_pd *pd, + struct ib_qp_init_attr *init_attr, + struct ib_udata *udata) +{ + struct bnxt_qplib_dev_attr *dev_attr; + struct bnxt_qplib_qp *qplqp; + struct bnxt_re_dev *rdev; + struct bnxt_re_cq *cq; + int rc = 0, qptype; - if (qp_init_attr->send_cq) { - cq = container_of(qp_init_attr->send_cq, struct bnxt_re_cq, - ib_cq); + rdev = qp->rdev; + qplqp = &qp->qplib_qp; + dev_attr = &rdev->dev_attr; + + /* Setup misc params */ + ether_addr_copy(qplqp->smac, rdev->netdev->dev_addr); + qplqp->pd = &pd->qplib_pd; + qplqp->qp_handle = (u64)qplqp; + qplqp->max_inline_data = init_attr->cap.max_inline_data; + qplqp->sig_type = ((init_attr->sq_sig_type == IB_SIGNAL_ALL_WR) ? + true : false); + qptype = bnxt_re_init_qp_type(rdev, init_attr); + if (qptype < 0) { + rc = qptype; + goto out; + } + qplqp->type = (u8)qptype; + + if (init_attr->qp_type == IB_QPT_RC) { + qplqp->max_rd_atomic = dev_attr->max_qp_rd_atom; + qplqp->max_dest_rd_atomic = dev_attr->max_qp_init_rd_atom; + } + qplqp->mtu = ib_mtu_enum_to_int(iboe_get_mtu(rdev->netdev->mtu)); + qplqp->dpi = &rdev->dpi_privileged; /* Doorbell page */ + if (init_attr->create_flags) + dev_dbg(rdev_to_dev(rdev), + "QP create flags 0x%x not supported", + init_attr->create_flags); + + /* Setup CQs */ + if (init_attr->send_cq) { + cq = container_of(init_attr->send_cq, struct bnxt_re_cq, ib_cq); if (!cq) { dev_err(rdev_to_dev(rdev), "Send CQ not found"); rc = -EINVAL; - goto fail; + goto out; } - qp->qplib_qp.scq = &cq->qplib_cq; + qplqp->scq = &cq->qplib_cq; qp->scq = cq; } - if (qp_init_attr->recv_cq) { - cq = container_of(qp_init_attr->recv_cq, struct bnxt_re_cq, - ib_cq); + if (init_attr->recv_cq) { + cq = container_of(init_attr->recv_cq, struct bnxt_re_cq, ib_cq); if (!cq) { dev_err(rdev_to_dev(rdev), "Receive CQ not found"); rc = -EINVAL; - goto fail; + goto out; } - qp->qplib_qp.rcq = &cq->qplib_cq; + qplqp->rcq = &cq->qplib_cq; qp->rcq = cq; } - if (qp_init_attr->srq) { - srq = container_of(qp_init_attr->srq, struct bnxt_re_srq, - ib_srq); - if (!srq) { - dev_err(rdev_to_dev(rdev), "SRQ not found"); - rc = -EINVAL; - goto fail; - } - qp->qplib_qp.srq = &srq->qplib_srq; - qp->qplib_qp.rq.max_wqe = 0; - } else { - /* Allocate 1 more than what's provided so posting max doesn't - * mean empty - */ - entries = roundup_pow_of_two(qp_init_attr->cap.max_recv_wr + 1); - qp->qplib_qp.rq.max_wqe = min_t(u32, entries, - dev_attr->max_qp_wqes + 1); + /* Setup RQ/SRQ */ + rc = bnxt_re_init_rq_attr(qp, init_attr); + if (rc) + goto out; + if (init_attr->qp_type == IB_QPT_GSI) + bnxt_re_adjust_gsi_rq_attr(qp); + + /* Setup SQ */ + bnxt_re_init_sq_attr(qp, init_attr, udata); + if (init_attr->qp_type == IB_QPT_GSI) + bnxt_re_adjust_gsi_sq_attr(qp, init_attr); + + if (udata) /* This will update DPI and qp_handle */ + rc = bnxt_re_init_user_qp(rdev, pd, qp, udata); +out: + return rc; +} - qp->qplib_qp.rq.q_full_delta = qp->qplib_qp.rq.max_wqe - - qp_init_attr->cap.max_recv_wr; +static int bnxt_re_create_shadow_gsi(struct bnxt_re_qp *qp, + struct bnxt_re_pd *pd) +{ + struct bnxt_re_sqp_entries *sqp_tbl = NULL; + struct bnxt_re_dev *rdev; + struct bnxt_re_qp *sqp; + struct bnxt_re_ah *sah; + int rc = 0; + + rdev = qp->rdev; + /* Create a shadow QP to handle the QP1 traffic */ + sqp_tbl = kzalloc(sizeof(*sqp_tbl) * BNXT_RE_MAX_GSI_SQP_ENTRIES, + GFP_KERNEL); + if (!sqp_tbl) + return -ENOMEM; + rdev->gsi_ctx.sqp_tbl = sqp_tbl; - qp->qplib_qp.rq.max_sge = qp_init_attr->cap.max_recv_sge; - if (qp->qplib_qp.rq.max_sge > dev_attr->max_qp_sges) - qp->qplib_qp.rq.max_sge = dev_attr->max_qp_sges; + sqp = bnxt_re_create_shadow_qp(pd, &rdev->qplib_res, &qp->qplib_qp); + if (!sqp) { + rc = -ENODEV; + dev_err(rdev_to_dev(rdev), + "Failed to create Shadow QP for QP1"); + goto out; } + rdev->gsi_ctx.gsi_sqp = sqp; - qp->qplib_qp.mtu = ib_mtu_enum_to_int(iboe_get_mtu(rdev->netdev->mtu)); + sqp->rcq = qp->rcq; + sqp->scq = qp->scq; + sah = bnxt_re_create_shadow_qp_ah(pd, &rdev->qplib_res, + &qp->qplib_qp); + if (!sah) { + bnxt_qplib_destroy_qp(&rdev->qplib_res, + &sqp->qplib_qp); + rc = -ENODEV; + dev_err(rdev_to_dev(rdev), + "Failed to create AH entry for ShadowQP"); + goto out; + } + rdev->gsi_ctx.gsi_sah = sah; - if (qp_init_attr->qp_type == IB_QPT_GSI && - !(bnxt_qplib_is_chip_gen_p5(&rdev->chip_ctx))) { - /* Allocate 1 more than what's provided */ - entries = roundup_pow_of_two(qp_init_attr->cap.max_send_wr + 1); - qp->qplib_qp.sq.max_wqe = min_t(u32, entries, - dev_attr->max_qp_wqes + 1); - qp->qplib_qp.sq.q_full_delta = qp->qplib_qp.sq.max_wqe - - qp_init_attr->cap.max_send_wr; - qp->qplib_qp.rq.max_sge = dev_attr->max_qp_sges; - if (qp->qplib_qp.rq.max_sge > dev_attr->max_qp_sges) - qp->qplib_qp.rq.max_sge = dev_attr->max_qp_sges; - qp->qplib_qp.sq.max_sge++; - if (qp->qplib_qp.sq.max_sge > dev_attr->max_qp_sges) - qp->qplib_qp.sq.max_sge = dev_attr->max_qp_sges; - - qp->qplib_qp.rq_hdr_buf_size = - BNXT_QPLIB_MAX_QP1_RQ_HDR_SIZE_V2; - - qp->qplib_qp.sq_hdr_buf_size = - BNXT_QPLIB_MAX_QP1_SQ_HDR_SIZE_V2; - qp->qplib_qp.dpi = &rdev->dpi_privileged; - rc = bnxt_qplib_create_qp1(&rdev->qplib_res, &qp->qplib_qp); - if (rc) { - dev_err(rdev_to_dev(rdev), "Failed to create HW QP1"); - goto fail; - } - /* Create a shadow QP to handle the QP1 traffic */ - rdev->qp1_sqp = bnxt_re_create_shadow_qp(pd, &rdev->qplib_res, - &qp->qplib_qp); - if (!rdev->qp1_sqp) { - rc = -EINVAL; - dev_err(rdev_to_dev(rdev), - "Failed to create Shadow QP for QP1"); - goto qp_destroy; - } - rdev->sqp_ah = bnxt_re_create_shadow_qp_ah(pd, &rdev->qplib_res, - &qp->qplib_qp); - if (!rdev->sqp_ah) { - bnxt_qplib_destroy_qp(&rdev->qplib_res, - &rdev->qp1_sqp->qplib_qp); - rc = -EINVAL; - dev_err(rdev_to_dev(rdev), - "Failed to create AH entry for ShadowQP"); - goto qp_destroy; - } + return 0; +out: + kfree(sqp_tbl); + return rc; +} - } else { - /* Allocate 128 + 1 more than what's provided */ - entries = roundup_pow_of_two(qp_init_attr->cap.max_send_wr + - BNXT_QPLIB_RESERVED_QP_WRS + 1); - qp->qplib_qp.sq.max_wqe = min_t(u32, entries, - dev_attr->max_qp_wqes + - BNXT_QPLIB_RESERVED_QP_WRS + 1); - qp->qplib_qp.sq.q_full_delta = BNXT_QPLIB_RESERVED_QP_WRS + 1; +static int bnxt_re_create_gsi_qp(struct bnxt_re_qp *qp, struct bnxt_re_pd *pd, + struct ib_qp_init_attr *init_attr) +{ + struct bnxt_re_dev *rdev; + struct bnxt_qplib_qp *qplqp; + int rc = 0; - /* - * Reserving one slot for Phantom WQE. Application can - * post one extra entry in this case. But allowing this to avoid - * unexpected Queue full condition - */ + rdev = qp->rdev; + qplqp = &qp->qplib_qp; - qp->qplib_qp.sq.q_full_delta -= 1; + qplqp->rq_hdr_buf_size = BNXT_QPLIB_MAX_QP1_RQ_HDR_SIZE_V2; + qplqp->sq_hdr_buf_size = BNXT_QPLIB_MAX_QP1_SQ_HDR_SIZE_V2; - qp->qplib_qp.max_rd_atomic = dev_attr->max_qp_rd_atom; - qp->qplib_qp.max_dest_rd_atomic = dev_attr->max_qp_init_rd_atom; - if (udata) { - rc = bnxt_re_init_user_qp(rdev, pd, qp, udata); - if (rc) - goto fail; - } else { - qp->qplib_qp.dpi = &rdev->dpi_privileged; - } + rc = bnxt_qplib_create_qp1(&rdev->qplib_res, qplqp); + if (rc) { + dev_err(rdev_to_dev(rdev), "create HW QP1 failed!"); + goto out; + } + + rc = bnxt_re_create_shadow_gsi(qp, pd); +out: + return rc; +} + +static bool bnxt_re_test_qp_limits(struct bnxt_re_dev *rdev, + struct ib_qp_init_attr *init_attr, + struct bnxt_qplib_dev_attr *dev_attr) +{ + bool rc = true; + if (init_attr->cap.max_send_wr > dev_attr->max_qp_wqes || + init_attr->cap.max_recv_wr > dev_attr->max_qp_wqes || + init_attr->cap.max_send_sge > dev_attr->max_qp_sges || + init_attr->cap.max_recv_sge > dev_attr->max_qp_sges || + init_attr->cap.max_inline_data > dev_attr->max_inline_data) { + dev_err(rdev_to_dev(rdev), + "Create QP failed - max exceeded! 0x%x/0x%x 0x%x/0x%x 0x%x/0x%x 0x%x/0x%x 0x%x/0x%x", + init_attr->cap.max_send_wr, dev_attr->max_qp_wqes, + init_attr->cap.max_recv_wr, dev_attr->max_qp_wqes, + init_attr->cap.max_send_sge, dev_attr->max_qp_sges, + init_attr->cap.max_recv_sge, dev_attr->max_qp_sges, + init_attr->cap.max_inline_data, + dev_attr->max_inline_data); + rc = false; + } + return rc; +} + +struct ib_qp *bnxt_re_create_qp(struct ib_pd *ib_pd, + struct ib_qp_init_attr *qp_init_attr, + struct ib_udata *udata) +{ + struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd); + struct bnxt_re_dev *rdev = pd->rdev; + struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr; + struct bnxt_re_qp *qp; + int rc; + + rc = bnxt_re_test_qp_limits(rdev, qp_init_attr, dev_attr); + if (!rc) { + rc = -EINVAL; + goto exit; + } + + qp = kzalloc(sizeof(*qp), GFP_KERNEL); + if (!qp) { + rc = -ENOMEM; + goto exit; + } + qp->rdev = rdev; + rc = bnxt_re_init_qp_attr(qp, pd, qp_init_attr, udata); + if (rc) + goto fail; + + if (qp_init_attr->qp_type == IB_QPT_GSI && + !(bnxt_qplib_is_chip_gen_p5(&rdev->chip_ctx))) { + rc = bnxt_re_create_gsi_qp(qp, pd, qp_init_attr); + if (rc == -ENODEV) + goto qp_destroy; + if (rc) + goto fail; + } else { rc = bnxt_qplib_create_qp(&rdev->qplib_res, &qp->qplib_qp); if (rc) { dev_err(rdev_to_dev(rdev), "Failed to create HW QP"); goto free_umem; } + if (udata) { + struct bnxt_re_qp_resp resp; + + resp.qpid = qp->qplib_qp.id; + resp.rsvd = 0; + rc = ib_copy_to_udata(udata, &resp, sizeof(resp)); + if (rc) { + dev_err(rdev_to_dev(rdev), "Failed to copy QP udata"); + goto qp_destroy; + } + } } qp->ib_qp.qp_num = qp->qplib_qp.id; + if (qp_init_attr->qp_type == IB_QPT_GSI) + rdev->gsi_ctx.gsi_qp = qp; spin_lock_init(&qp->sq_lock); spin_lock_init(&qp->rq_lock); - - if (udata) { - struct bnxt_re_qp_resp resp; - - resp.qpid = qp->ib_qp.qp_num; - resp.rsvd = 0; - rc = ib_copy_to_udata(udata, &resp, sizeof(resp)); - if (rc) { - dev_err(rdev_to_dev(rdev), "Failed to copy QP udata"); - goto qp_destroy; - } - } INIT_LIST_HEAD(&qp->list); mutex_lock(&rdev->qp_lock); list_add_tail(&qp->list, &rdev->qp_list); - atomic_inc(&rdev->qp_count); mutex_unlock(&rdev->qp_lock); + atomic_inc(&rdev->qp_count); return &qp->ib_qp; qp_destroy: @@ -1206,6 +1402,7 @@ free_umem: ib_umem_release(qp->sumem); fail: kfree(qp); +exit: return ERR_PTR(rc); } @@ -1426,7 +1623,7 @@ int bnxt_re_modify_srq(struct ib_srq *ib_srq, struct ib_srq_attr *srq_attr, switch (srq_attr_mask) { case IB_SRQ_MAX_WR: /* SRQ resize is not supported */ - break; + return -EINVAL; case IB_SRQ_LIMIT: /* Change the SRQ threshold */ if (srq_attr->srq_limit > srq->qplib_srq.max_wqe) @@ -1441,13 +1638,12 @@ int bnxt_re_modify_srq(struct ib_srq *ib_srq, struct ib_srq_attr *srq_attr, /* On success, update the shadow */ srq->srq_limit = srq_attr->srq_limit; /* No need to Build and send response back to udata */ - break; + return 0; default: dev_err(rdev_to_dev(rdev), "Unsupported srq_attr_mask 0x%x", srq_attr_mask); return -EINVAL; } - return 0; } int bnxt_re_query_srq(struct ib_srq *ib_srq, struct ib_srq_attr *srq_attr) @@ -1504,7 +1700,7 @@ static int bnxt_re_modify_shadow_qp(struct bnxt_re_dev *rdev, struct bnxt_re_qp *qp1_qp, int qp_attr_mask) { - struct bnxt_re_qp *qp = rdev->qp1_sqp; + struct bnxt_re_qp *qp = rdev->gsi_ctx.gsi_sqp; int rc = 0; if (qp_attr_mask & IB_QP_STATE) { @@ -1768,7 +1964,7 @@ int bnxt_re_modify_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr, dev_err(rdev_to_dev(rdev), "Failed to modify HW QP"); return rc; } - if (ib_qp->qp_type == IB_QPT_GSI && rdev->qp1_sqp) + if (ib_qp->qp_type == IB_QPT_GSI && rdev->gsi_ctx.gsi_sqp) rc = bnxt_re_modify_shadow_qp(rdev, qp, qp_attr_mask); return rc; } @@ -2013,9 +2209,12 @@ static int bnxt_re_build_qp1_shadow_qp_recv(struct bnxt_re_qp *qp, struct bnxt_qplib_swqe *wqe, int payload_size) { + struct bnxt_re_sqp_entries *sqp_entry; struct bnxt_qplib_sge ref, sge; + struct bnxt_re_dev *rdev; u32 rq_prod_index; - struct bnxt_re_sqp_entries *sqp_entry; + + rdev = qp->rdev; rq_prod_index = bnxt_qplib_get_rq_prod_index(&qp->qplib_qp); @@ -2030,7 +2229,7 @@ static int bnxt_re_build_qp1_shadow_qp_recv(struct bnxt_re_qp *qp, ref.lkey = wqe->sg_list[0].lkey; ref.size = wqe->sg_list[0].size; - sqp_entry = &qp->rdev->sqp_tbl[rq_prod_index]; + sqp_entry = &rdev->gsi_ctx.sqp_tbl[rq_prod_index]; /* SGE 1 */ wqe->sg_list[0].addr = sge.addr; @@ -2850,12 +3049,13 @@ static bool bnxt_re_is_loopback_packet(struct bnxt_re_dev *rdev, return rc; } -static int bnxt_re_process_raw_qp_pkt_rx(struct bnxt_re_qp *qp1_qp, +static int bnxt_re_process_raw_qp_pkt_rx(struct bnxt_re_qp *gsi_qp, struct bnxt_qplib_cqe *cqe) { - struct bnxt_re_dev *rdev = qp1_qp->rdev; + struct bnxt_re_dev *rdev = gsi_qp->rdev; struct bnxt_re_sqp_entries *sqp_entry = NULL; - struct bnxt_re_qp *qp = rdev->qp1_sqp; + struct bnxt_re_qp *gsi_sqp = rdev->gsi_ctx.gsi_sqp; + struct bnxt_re_ah *gsi_sah; struct ib_send_wr *swr; struct ib_ud_wr udwr; struct ib_recv_wr rwr; @@ -2878,19 +3078,19 @@ static int bnxt_re_process_raw_qp_pkt_rx(struct bnxt_re_qp *qp1_qp, swr = &udwr.wr; tbl_idx = cqe->wr_id; - rq_hdr_buf = qp1_qp->qplib_qp.rq_hdr_buf + - (tbl_idx * qp1_qp->qplib_qp.rq_hdr_buf_size); - rq_hdr_buf_map = bnxt_qplib_get_qp_buf_from_index(&qp1_qp->qplib_qp, + rq_hdr_buf = gsi_qp->qplib_qp.rq_hdr_buf + + (tbl_idx * gsi_qp->qplib_qp.rq_hdr_buf_size); + rq_hdr_buf_map = bnxt_qplib_get_qp_buf_from_index(&gsi_qp->qplib_qp, tbl_idx); /* Shadow QP header buffer */ - shrq_hdr_buf_map = bnxt_qplib_get_qp_buf_from_index(&qp->qplib_qp, + shrq_hdr_buf_map = bnxt_qplib_get_qp_buf_from_index(&gsi_qp->qplib_qp, tbl_idx); - sqp_entry = &rdev->sqp_tbl[tbl_idx]; + sqp_entry = &rdev->gsi_ctx.sqp_tbl[tbl_idx]; /* Store this cqe */ memcpy(&sqp_entry->cqe, cqe, sizeof(struct bnxt_qplib_cqe)); - sqp_entry->qp1_qp = qp1_qp; + sqp_entry->qp1_qp = gsi_qp; /* Find packet type from the cqe */ @@ -2944,7 +3144,7 @@ static int bnxt_re_process_raw_qp_pkt_rx(struct bnxt_re_qp *qp1_qp, rwr.wr_id = tbl_idx; rwr.next = NULL; - rc = bnxt_re_post_recv_shadow_qp(rdev, qp, &rwr); + rc = bnxt_re_post_recv_shadow_qp(rdev, gsi_sqp, &rwr); if (rc) { dev_err(rdev_to_dev(rdev), "Failed to post Rx buffers to shadow QP"); @@ -2956,15 +3156,13 @@ static int bnxt_re_process_raw_qp_pkt_rx(struct bnxt_re_qp *qp1_qp, swr->wr_id = tbl_idx; swr->opcode = IB_WR_SEND; swr->next = NULL; - - udwr.ah = &rdev->sqp_ah->ib_ah; - udwr.remote_qpn = rdev->qp1_sqp->qplib_qp.id; - udwr.remote_qkey = rdev->qp1_sqp->qplib_qp.qkey; + gsi_sah = rdev->gsi_ctx.gsi_sah; + udwr.ah = &gsi_sah->ib_ah; + udwr.remote_qpn = gsi_sqp->qplib_qp.id; + udwr.remote_qkey = gsi_sqp->qplib_qp.qkey; /* post data received in the send queue */ - rc = bnxt_re_post_send_shadow_qp(rdev, qp, swr); - - return 0; + return bnxt_re_post_send_shadow_qp(rdev, gsi_sqp, swr); } static void bnxt_re_process_res_rawqp1_wc(struct ib_wc *wc, @@ -3029,12 +3227,12 @@ static void bnxt_re_process_res_rc_wc(struct ib_wc *wc, wc->opcode = IB_WC_RECV_RDMA_WITH_IMM; } -static void bnxt_re_process_res_shadow_qp_wc(struct bnxt_re_qp *qp, +static void bnxt_re_process_res_shadow_qp_wc(struct bnxt_re_qp *gsi_sqp, struct ib_wc *wc, struct bnxt_qplib_cqe *cqe) { - struct bnxt_re_dev *rdev = qp->rdev; - struct bnxt_re_qp *qp1_qp = NULL; + struct bnxt_re_dev *rdev = gsi_sqp->rdev; + struct bnxt_re_qp *gsi_qp = NULL; struct bnxt_qplib_cqe *orig_cqe = NULL; struct bnxt_re_sqp_entries *sqp_entry = NULL; int nw_type; @@ -3044,13 +3242,13 @@ static void bnxt_re_process_res_shadow_qp_wc(struct bnxt_re_qp *qp, tbl_idx = cqe->wr_id; - sqp_entry = &rdev->sqp_tbl[tbl_idx]; - qp1_qp = sqp_entry->qp1_qp; + sqp_entry = &rdev->gsi_ctx.sqp_tbl[tbl_idx]; + gsi_qp = sqp_entry->qp1_qp; orig_cqe = &sqp_entry->cqe; wc->wr_id = sqp_entry->wrid; wc->byte_len = orig_cqe->length; - wc->qp = &qp1_qp->ib_qp; + wc->qp = &gsi_qp->ib_qp; wc->ex.imm_data = orig_cqe->immdata; wc->src_qp = orig_cqe->src_qp; @@ -3137,7 +3335,7 @@ static int send_phantom_wqe(struct bnxt_re_qp *qp) int bnxt_re_poll_cq(struct ib_cq *ib_cq, int num_entries, struct ib_wc *wc) { struct bnxt_re_cq *cq = container_of(ib_cq, struct bnxt_re_cq, ib_cq); - struct bnxt_re_qp *qp; + struct bnxt_re_qp *qp, *sh_qp; struct bnxt_qplib_cqe *cqe; int i, ncqe, budget; struct bnxt_qplib_q *sq; @@ -3201,8 +3399,9 @@ int bnxt_re_poll_cq(struct ib_cq *ib_cq, int num_entries, struct ib_wc *wc) switch (cqe->opcode) { case CQ_BASE_CQE_TYPE_REQ: - if (qp->rdev->qp1_sqp && qp->qplib_qp.id == - qp->rdev->qp1_sqp->qplib_qp.id) { + sh_qp = qp->rdev->gsi_ctx.gsi_sqp; + if (sh_qp && + qp->qplib_qp.id == sh_qp->qplib_qp.id) { /* Handle this completion with * the stored completion */ @@ -3228,7 +3427,7 @@ int bnxt_re_poll_cq(struct ib_cq *ib_cq, int num_entries, struct ib_wc *wc) * stored in the table */ tbl_idx = cqe->wr_id; - sqp_entry = &cq->rdev->sqp_tbl[tbl_idx]; + sqp_entry = &cq->rdev->gsi_ctx.sqp_tbl[tbl_idx]; wc->wr_id = sqp_entry->wrid; bnxt_re_process_res_rawqp1_wc(wc, cqe); break; @@ -3236,8 +3435,9 @@ int bnxt_re_poll_cq(struct ib_cq *ib_cq, int num_entries, struct ib_wc *wc) bnxt_re_process_res_rc_wc(wc, cqe); break; case CQ_BASE_CQE_TYPE_RES_UD: - if (qp->rdev->qp1_sqp && qp->qplib_qp.id == - qp->rdev->qp1_sqp->qplib_qp.id) { + sh_qp = qp->rdev->gsi_ctx.gsi_sqp; + if (sh_qp && + qp->qplib_qp.id == sh_qp->qplib_qp.id) { /* Handle this completion with * the stored completion */ diff --git a/drivers/infiniband/hw/bnxt_re/main.c b/drivers/infiniband/hw/bnxt_re/main.c index cfe5f47d9890..34d1c4264602 100644 --- a/drivers/infiniband/hw/bnxt_re/main.c +++ b/drivers/infiniband/hw/bnxt_re/main.c @@ -70,7 +70,7 @@ static char version[] = BNXT_RE_DESC "\n"; MODULE_AUTHOR("Eddie Wai <eddie.wai@broadcom.com>"); -MODULE_DESCRIPTION(BNXT_RE_DESC " Driver"); +MODULE_DESCRIPTION(BNXT_RE_DESC); MODULE_LICENSE("Dual BSD/GPL"); /* globals */ @@ -119,61 +119,76 @@ static void bnxt_re_get_sriov_func_type(struct bnxt_re_dev *rdev) * reserved for the function. The driver may choose to allocate fewer * resources than the firmware maximum. */ -static void bnxt_re_set_resource_limits(struct bnxt_re_dev *rdev) +static void bnxt_re_limit_pf_res(struct bnxt_re_dev *rdev) { - u32 vf_qps = 0, vf_srqs = 0, vf_cqs = 0, vf_mrws = 0, vf_gids = 0; - u32 i; - u32 vf_pct; - u32 num_vfs; - struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr; + struct bnxt_qplib_dev_attr *attr; + struct bnxt_qplib_ctx *ctx; + int i; - rdev->qplib_ctx.qpc_count = min_t(u32, BNXT_RE_MAX_QPC_COUNT, - dev_attr->max_qp); + attr = &rdev->dev_attr; + ctx = &rdev->qplib_ctx; - rdev->qplib_ctx.mrw_count = BNXT_RE_MAX_MRW_COUNT_256K; + ctx->qpc_count = min_t(u32, BNXT_RE_MAX_QPC_COUNT, + attr->max_qp); + ctx->mrw_count = BNXT_RE_MAX_MRW_COUNT_256K; /* Use max_mr from fw since max_mrw does not get set */ - rdev->qplib_ctx.mrw_count = min_t(u32, rdev->qplib_ctx.mrw_count, - dev_attr->max_mr); - rdev->qplib_ctx.srqc_count = min_t(u32, BNXT_RE_MAX_SRQC_COUNT, - dev_attr->max_srq); - rdev->qplib_ctx.cq_count = min_t(u32, BNXT_RE_MAX_CQ_COUNT, - dev_attr->max_cq); - - for (i = 0; i < MAX_TQM_ALLOC_REQ; i++) - rdev->qplib_ctx.tqm_count[i] = - rdev->dev_attr.tqm_alloc_reqs[i]; - - if (rdev->num_vfs) { - /* - * Reserve a set of resources for the PF. Divide the remaining - * resources among the VFs - */ - vf_pct = 100 - BNXT_RE_PCT_RSVD_FOR_PF; - num_vfs = 100 * rdev->num_vfs; - vf_qps = (rdev->qplib_ctx.qpc_count * vf_pct) / num_vfs; - vf_srqs = (rdev->qplib_ctx.srqc_count * vf_pct) / num_vfs; - vf_cqs = (rdev->qplib_ctx.cq_count * vf_pct) / num_vfs; - /* - * The driver allows many more MRs than other resources. If the - * firmware does also, then reserve a fixed amount for the PF - * and divide the rest among VFs. VFs may use many MRs for NFS - * mounts, ISER, NVME applications, etc. If the firmware - * severely restricts the number of MRs, then let PF have - * half and divide the rest among VFs, as for the other - * resource types. - */ - if (rdev->qplib_ctx.mrw_count < BNXT_RE_MAX_MRW_COUNT_64K) - vf_mrws = rdev->qplib_ctx.mrw_count * vf_pct / num_vfs; - else - vf_mrws = (rdev->qplib_ctx.mrw_count - - BNXT_RE_RESVD_MR_FOR_PF) / rdev->num_vfs; - vf_gids = BNXT_RE_MAX_GID_PER_VF; + ctx->mrw_count = min_t(u32, ctx->mrw_count, attr->max_mr); + ctx->srqc_count = min_t(u32, BNXT_RE_MAX_SRQC_COUNT, + attr->max_srq); + ctx->cq_count = min_t(u32, BNXT_RE_MAX_CQ_COUNT, attr->max_cq); + if (!bnxt_qplib_is_chip_gen_p5(&rdev->chip_ctx)) + for (i = 0; i < MAX_TQM_ALLOC_REQ; i++) + rdev->qplib_ctx.tqm_count[i] = + rdev->dev_attr.tqm_alloc_reqs[i]; +} + +static void bnxt_re_limit_vf_res(struct bnxt_qplib_ctx *qplib_ctx, u32 num_vf) +{ + struct bnxt_qplib_vf_res *vf_res; + u32 mrws = 0; + u32 vf_pct; + u32 nvfs; + + vf_res = &qplib_ctx->vf_res; + /* + * Reserve a set of resources for the PF. Divide the remaining + * resources among the VFs + */ + vf_pct = 100 - BNXT_RE_PCT_RSVD_FOR_PF; + nvfs = num_vf; + num_vf = 100 * num_vf; + vf_res->max_qp_per_vf = (qplib_ctx->qpc_count * vf_pct) / num_vf; + vf_res->max_srq_per_vf = (qplib_ctx->srqc_count * vf_pct) / num_vf; + vf_res->max_cq_per_vf = (qplib_ctx->cq_count * vf_pct) / num_vf; + /* + * The driver allows many more MRs than other resources. If the + * firmware does also, then reserve a fixed amount for the PF and + * divide the rest among VFs. VFs may use many MRs for NFS + * mounts, ISER, NVME applications, etc. If the firmware severely + * restricts the number of MRs, then let PF have half and divide + * the rest among VFs, as for the other resource types. + */ + if (qplib_ctx->mrw_count < BNXT_RE_MAX_MRW_COUNT_64K) { + mrws = qplib_ctx->mrw_count * vf_pct; + nvfs = num_vf; + } else { + mrws = qplib_ctx->mrw_count - BNXT_RE_RESVD_MR_FOR_PF; } - rdev->qplib_ctx.vf_res.max_mrw_per_vf = vf_mrws; - rdev->qplib_ctx.vf_res.max_gid_per_vf = vf_gids; - rdev->qplib_ctx.vf_res.max_qp_per_vf = vf_qps; - rdev->qplib_ctx.vf_res.max_srq_per_vf = vf_srqs; - rdev->qplib_ctx.vf_res.max_cq_per_vf = vf_cqs; + vf_res->max_mrw_per_vf = (mrws / nvfs); + vf_res->max_gid_per_vf = BNXT_RE_MAX_GID_PER_VF; +} + +static void bnxt_re_set_resource_limits(struct bnxt_re_dev *rdev) +{ + u32 num_vfs; + + memset(&rdev->qplib_ctx.vf_res, 0, sizeof(struct bnxt_qplib_vf_res)); + bnxt_re_limit_pf_res(rdev); + + num_vfs = bnxt_qplib_is_chip_gen_p5(&rdev->chip_ctx) ? + BNXT_RE_GEN_P5_MAX_VF : rdev->num_vfs; + if (num_vfs) + bnxt_re_limit_vf_res(&rdev->qplib_ctx, num_vfs); } /* for handling bnxt_en callbacks later */ @@ -193,9 +208,11 @@ static void bnxt_re_sriov_config(void *p, int num_vfs) return; rdev->num_vfs = num_vfs; - bnxt_re_set_resource_limits(rdev); - bnxt_qplib_set_func_resources(&rdev->qplib_res, &rdev->rcfw, - &rdev->qplib_ctx); + if (!bnxt_qplib_is_chip_gen_p5(&rdev->chip_ctx)) { + bnxt_re_set_resource_limits(rdev); + bnxt_qplib_set_func_resources(&rdev->qplib_res, &rdev->rcfw, + &rdev->qplib_ctx); + } } static void bnxt_re_shutdown(void *p) @@ -897,10 +914,14 @@ static int bnxt_re_cqn_handler(struct bnxt_qplib_nq *nq, return 0; } +#define BNXT_RE_GEN_P5_PF_NQ_DB 0x10000 +#define BNXT_RE_GEN_P5_VF_NQ_DB 0x4000 static u32 bnxt_re_get_nqdb_offset(struct bnxt_re_dev *rdev, u16 indx) { return bnxt_qplib_is_chip_gen_p5(&rdev->chip_ctx) ? - 0x10000 : rdev->msix_entries[indx].db_offset; + (rdev->is_virtfn ? BNXT_RE_GEN_P5_VF_NQ_DB : + BNXT_RE_GEN_P5_PF_NQ_DB) : + rdev->msix_entries[indx].db_offset; } static void bnxt_re_cleanup_res(struct bnxt_re_dev *rdev) @@ -1106,7 +1127,8 @@ static int bnxt_re_query_hwrm_pri2cos(struct bnxt_re_dev *rdev, u8 dir, static bool bnxt_re_is_qp1_or_shadow_qp(struct bnxt_re_dev *rdev, struct bnxt_re_qp *qp) { - return (qp->ib_qp.qp_type == IB_QPT_GSI) || (qp == rdev->qp1_sqp); + return (qp->ib_qp.qp_type == IB_QPT_GSI) || + (qp == rdev->gsi_ctx.gsi_sqp); } static void bnxt_re_dev_stop(struct bnxt_re_dev *rdev) @@ -1410,8 +1432,8 @@ static int bnxt_re_ib_reg(struct bnxt_re_dev *rdev) rdev->is_virtfn); if (rc) goto disable_rcfw; - if (!rdev->is_virtfn) - bnxt_re_set_resource_limits(rdev); + + bnxt_re_set_resource_limits(rdev); rc = bnxt_qplib_alloc_ctx(rdev->en_dev->pdev, &rdev->qplib_ctx, 0, bnxt_qplib_is_chip_gen_p5(&rdev->chip_ctx)); diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.c b/drivers/infiniband/hw/bnxt_re/qplib_fp.c index 5fc5ab7813c0..18b579c8a8c5 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_fp.c +++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.c @@ -2606,11 +2606,8 @@ static int bnxt_qplib_cq_process_terminal(struct bnxt_qplib_cq *cq, qp = (struct bnxt_qplib_qp *)((unsigned long) le64_to_cpu(hwcqe->qp_handle)); - if (!qp) { - dev_err(&cq->hwq.pdev->dev, - "FP: CQ Process terminal qp is NULL\n"); + if (!qp) return -EINVAL; - } /* Must block new posting of SQ and RQ */ qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR; diff --git a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c index 60c8f76aab33..5cdfa84faf85 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c +++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c @@ -494,8 +494,10 @@ int bnxt_qplib_init_rcfw(struct bnxt_qplib_rcfw *rcfw, * shall setup this area for VF. Skipping the * HW programming */ - if (is_virtfn || bnxt_qplib_is_chip_gen_p5(rcfw->res->cctx)) + if (is_virtfn) goto skip_ctx_setup; + if (bnxt_qplib_is_chip_gen_p5(rcfw->res->cctx)) + goto config_vf_res; level = ctx->qpc_tbl.level; req.qpc_pg_size_qpc_lvl = (level << CMDQ_INITIALIZE_FW_QPC_LVL_SFT) | @@ -540,6 +542,7 @@ int bnxt_qplib_init_rcfw(struct bnxt_qplib_rcfw *rcfw, req.number_of_srq = cpu_to_le32(ctx->srqc_tbl.max_elements); req.number_of_cq = cpu_to_le32(ctx->cq_tbl.max_elements); +config_vf_res: req.max_qp_per_vf = cpu_to_le32(ctx->vf_res.max_qp_per_vf); req.max_mrw_per_vf = cpu_to_le32(ctx->vf_res.max_mrw_per_vf); req.max_srq_per_vf = cpu_to_le32(ctx->vf_res.max_srq_per_vf); diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c index 535ee41ee421..c7214c49f202 100644 --- a/drivers/infiniband/hw/cxgb4/cm.c +++ b/drivers/infiniband/hw/cxgb4/cm.c @@ -1965,6 +1965,9 @@ static int send_fw_act_open_req(struct c4iw_ep *ep, unsigned int atid) int win; skb = get_skb(NULL, sizeof(*req), GFP_KERNEL); + if (!skb) + return -ENOMEM; + req = __skb_put_zero(skb, sizeof(*req)); req->op_compl = htonl(WR_OP_V(FW_OFLD_CONNECTION_WR)); req->len16_pkd = htonl(FW_WR_LEN16_V(DIV_ROUND_UP(sizeof(*req), 16))); @@ -3282,7 +3285,7 @@ static int get_lladdr(struct net_device *dev, struct in6_addr *addr, static int pick_local_ip6addrs(struct c4iw_dev *dev, struct iw_cm_id *cm_id) { - struct in6_addr uninitialized_var(addr); + struct in6_addr addr; struct sockaddr_in6 *la6 = (struct sockaddr_in6 *)&cm_id->m_local_addr; struct sockaddr_in6 *ra6 = (struct sockaddr_in6 *)&cm_id->m_remote_addr; diff --git a/drivers/infiniband/hw/cxgb4/cq.c b/drivers/infiniband/hw/cxgb4/cq.c index 16b74591a68d..a3595b8a4bcf 100644 --- a/drivers/infiniband/hw/cxgb4/cq.c +++ b/drivers/infiniband/hw/cxgb4/cq.c @@ -754,7 +754,7 @@ skip_cqe: static int __c4iw_poll_cq_one(struct c4iw_cq *chp, struct c4iw_qp *qhp, struct ib_wc *wc, struct c4iw_srq *srq) { - struct t4_cqe uninitialized_var(cqe); + struct t4_cqe cqe; struct t4_wq *wq = qhp ? &qhp->wq : NULL; u32 credit = 0; u8 cqe_flushed; diff --git a/drivers/infiniband/hw/efa/efa_verbs.c b/drivers/infiniband/hw/efa/efa_verbs.c index 17f1e59ab12e..559b24ca5205 100644 --- a/drivers/infiniband/hw/efa/efa_verbs.c +++ b/drivers/infiniband/hw/efa/efa_verbs.c @@ -1231,7 +1231,7 @@ static int pbl_continuous_initialize(struct efa_dev *dev, */ static int pbl_indirect_initialize(struct efa_dev *dev, struct pbl_context *pbl) { - u32 size_in_pages = DIV_ROUND_UP(pbl->pbl_buf_size_in_bytes, PAGE_SIZE); + u32 size_in_pages = DIV_ROUND_UP(pbl->pbl_buf_size_in_bytes, EFA_CHUNK_PAYLOAD_SIZE); struct scatterlist *sgl; int sg_dma_cnt, err; diff --git a/drivers/infiniband/hw/hfi1/affinity.c b/drivers/infiniband/hw/hfi1/affinity.c index 1aeea5d65c01..832b878fa67e 100644 --- a/drivers/infiniband/hw/hfi1/affinity.c +++ b/drivers/infiniband/hw/hfi1/affinity.c @@ -218,6 +218,8 @@ out: for (node = 0; node < node_affinity.num_possible_nodes; node++) hfi1_per_node_cntr[node] = 1; + pci_dev_put(dev); + return 0; } diff --git a/drivers/infiniband/hw/hfi1/chip.c b/drivers/infiniband/hw/hfi1/chip.c index 10924f122072..65d6bf34614c 100644 --- a/drivers/infiniband/hw/hfi1/chip.c +++ b/drivers/infiniband/hw/hfi1/chip.c @@ -12191,6 +12191,7 @@ static void free_cntrs(struct hfi1_devdata *dd) if (dd->synth_stats_timer.function) del_timer_sync(&dd->synth_stats_timer); + cancel_work_sync(&dd->update_cntr_work); ppd = (struct hfi1_pportdata *)(dd + 1); for (i = 0; i < dd->num_pports; i++, ppd++) { kfree(ppd->cntrs); diff --git a/drivers/infiniband/hw/hfi1/efivar.c b/drivers/infiniband/hw/hfi1/efivar.c index d106d23016ba..75e39e403a58 100644 --- a/drivers/infiniband/hw/hfi1/efivar.c +++ b/drivers/infiniband/hw/hfi1/efivar.c @@ -152,7 +152,7 @@ int read_hfi1_efi_var(struct hfi1_devdata *dd, const char *kind, unsigned long *size, void **return_data) { char prefix_name[64]; - char name[64]; + char name[128]; int result; int i; diff --git a/drivers/infiniband/hw/hfi1/file_ops.c b/drivers/infiniband/hw/hfi1/file_ops.c index 8c7ba7bad42b..607e2636a6d1 100644 --- a/drivers/infiniband/hw/hfi1/file_ops.c +++ b/drivers/infiniband/hw/hfi1/file_ops.c @@ -1224,8 +1224,10 @@ static int setup_base_ctxt(struct hfi1_filedata *fd, goto done; ret = init_user_ctxt(fd, uctxt); - if (ret) + if (ret) { + hfi1_free_ctxt_rcv_groups(uctxt); goto done; + } user_init(uctxt); @@ -1361,12 +1363,15 @@ static int user_exp_rcv_setup(struct hfi1_filedata *fd, unsigned long arg, addr = arg + offsetof(struct hfi1_tid_info, tidcnt); if (copy_to_user((void __user *)addr, &tinfo.tidcnt, sizeof(tinfo.tidcnt))) - return -EFAULT; + ret = -EFAULT; addr = arg + offsetof(struct hfi1_tid_info, length); - if (copy_to_user((void __user *)addr, &tinfo.length, + if (!ret && copy_to_user((void __user *)addr, &tinfo.length, sizeof(tinfo.length))) ret = -EFAULT; + + if (ret) + hfi1_user_exp_rcv_invalid(fd, &tinfo); } return ret; diff --git a/drivers/infiniband/hw/hfi1/firmware.c b/drivers/infiniband/hw/hfi1/firmware.c index c09080712485..747ec08dec0d 100644 --- a/drivers/infiniband/hw/hfi1/firmware.c +++ b/drivers/infiniband/hw/hfi1/firmware.c @@ -1786,6 +1786,7 @@ int parse_platform_config(struct hfi1_devdata *dd) if (!dd->platform_config.data) { dd_dev_err(dd, "%s: Missing config file\n", __func__); + ret = -EINVAL; goto bail; } ptr = (u32 *)dd->platform_config.data; @@ -1794,6 +1795,7 @@ int parse_platform_config(struct hfi1_devdata *dd) ptr++; if (magic_num != PLATFORM_CONFIG_MAGIC_NUM) { dd_dev_err(dd, "%s: Bad config file\n", __func__); + ret = -EINVAL; goto bail; } @@ -1817,6 +1819,7 @@ int parse_platform_config(struct hfi1_devdata *dd) if (file_length > dd->platform_config.size) { dd_dev_info(dd, "%s:File claims to be larger than read size\n", __func__); + ret = -EINVAL; goto bail; } else if (file_length < dd->platform_config.size) { dd_dev_info(dd, @@ -1837,6 +1840,7 @@ int parse_platform_config(struct hfi1_devdata *dd) dd_dev_err(dd, "%s: Failed validation at offset %ld\n", __func__, (ptr - (u32 *) dd->platform_config.data)); + ret = -EINVAL; goto bail; } @@ -1883,6 +1887,7 @@ int parse_platform_config(struct hfi1_devdata *dd) __func__, table_type, (ptr - (u32 *) dd->platform_config.data)); + ret = -EINVAL; goto bail; /* We don't trust this file now */ } pcfgcache->config_tables[table_type].table = ptr; @@ -1907,6 +1912,7 @@ int parse_platform_config(struct hfi1_devdata *dd) __func__, table_type, (ptr - (u32 *)dd->platform_config.data)); + ret = -EINVAL; goto bail; /* We don't trust this file now */ } pcfgcache->config_tables[table_type].table_metadata = diff --git a/drivers/infiniband/hw/hfi1/mmu_rb.c b/drivers/infiniband/hw/hfi1/mmu_rb.c index 14d2a90964c3..a5631286c8e0 100644 --- a/drivers/infiniband/hw/hfi1/mmu_rb.c +++ b/drivers/infiniband/hw/hfi1/mmu_rb.c @@ -173,7 +173,7 @@ int hfi1_mmu_rb_insert(struct mmu_rb_handler *handler, goto unlock; } __mmu_int_rb_insert(mnode, &handler->root); - list_add(&mnode->list, &handler->lru_list); + list_add_tail(&mnode->list, &handler->lru_list); ret = handler->ops->insert(handler->ops_arg, mnode); if (ret) { @@ -220,8 +220,10 @@ bool hfi1_mmu_rb_remove_unless_exact(struct mmu_rb_handler *handler, spin_lock_irqsave(&handler->lock, flags); node = __mmu_rb_search(handler, addr, len); if (node) { - if (node->addr == addr && node->len == len) + if (node->addr == addr && node->len == len) { + list_move_tail(&node->list, &handler->lru_list); goto unlock; + } __mmu_int_rb_remove(node, &handler->root); list_del(&node->list); /* remove from LRU list */ ret = true; @@ -242,8 +244,7 @@ void hfi1_mmu_rb_evict(struct mmu_rb_handler *handler, void *evict_arg) INIT_LIST_HEAD(&del_list); spin_lock_irqsave(&handler->lock, flags); - list_for_each_entry_safe_reverse(rbnode, ptr, &handler->lru_list, - list) { + list_for_each_entry_safe(rbnode, ptr, &handler->lru_list, list) { if (handler->ops->evict(handler->ops_arg, rbnode, evict_arg, &stop)) { __mmu_int_rb_remove(rbnode, &handler->root); @@ -255,9 +256,7 @@ void hfi1_mmu_rb_evict(struct mmu_rb_handler *handler, void *evict_arg) } spin_unlock_irqrestore(&handler->lock, flags); - while (!list_empty(&del_list)) { - rbnode = list_first_entry(&del_list, struct mmu_rb_node, list); - list_del(&rbnode->list); + list_for_each_entry_safe(rbnode, ptr, &del_list, list) { handler->ops->remove(handler->ops_arg, rbnode); } } diff --git a/drivers/infiniband/hw/hfi1/pcie.c b/drivers/infiniband/hw/hfi1/pcie.c index 61362bd6d3ce..111705e6609c 100644 --- a/drivers/infiniband/hw/hfi1/pcie.c +++ b/drivers/infiniband/hw/hfi1/pcie.c @@ -45,6 +45,7 @@ * */ +#include <linux/bitfield.h> #include <linux/pci.h> #include <linux/io.h> #include <linux/delay.h> @@ -261,12 +262,6 @@ static u32 extract_speed(u16 linkstat) return speed; } -/* return the PCIe link speed from the given link status */ -static u32 extract_width(u16 linkstat) -{ - return (linkstat & PCI_EXP_LNKSTA_NLW) >> PCI_EXP_LNKSTA_NLW_SHIFT; -} - /* read the link status and set dd->{lbus_width,lbus_speed,lbus_info} */ static void update_lbus_info(struct hfi1_devdata *dd) { @@ -279,7 +274,7 @@ static void update_lbus_info(struct hfi1_devdata *dd) return; } - dd->lbus_width = extract_width(linkstat); + dd->lbus_width = FIELD_GET(PCI_EXP_LNKSTA_NLW, linkstat); dd->lbus_speed = extract_speed(linkstat); snprintf(dd->lbus_info, sizeof(dd->lbus_info), "PCIe,%uMHz,x%u", dd->lbus_speed, dd->lbus_width); diff --git a/drivers/infiniband/hw/hfi1/pio.c b/drivers/infiniband/hw/hfi1/pio.c index 1a82ea73a0fc..8303c506733c 100644 --- a/drivers/infiniband/hw/hfi1/pio.c +++ b/drivers/infiniband/hw/hfi1/pio.c @@ -955,8 +955,7 @@ void sc_disable(struct send_context *sc) spin_unlock(&sc->release_lock); write_seqlock(&sc->waitlock); - if (!list_empty(&sc->piowait)) - list_move(&sc->piowait, &wake_list); + list_splice_init(&sc->piowait, &wake_list); write_sequnlock(&sc->waitlock); while (!list_empty(&wake_list)) { struct iowait *wait; @@ -2132,7 +2131,7 @@ int init_credit_return(struct hfi1_devdata *dd) "Unable to allocate credit return DMA range for NUMA %d\n", i); ret = -ENOMEM; - goto done; + goto free_cr_base; } } set_dev_node(&dd->pcidev->dev, dd->node); @@ -2140,6 +2139,10 @@ int init_credit_return(struct hfi1_devdata *dd) ret = 0; done: return ret; + +free_cr_base: + free_credit_return(dd); + goto done; } void free_credit_return(struct hfi1_devdata *dd) diff --git a/drivers/infiniband/hw/hfi1/sdma.c b/drivers/infiniband/hw/hfi1/sdma.c index 2a684fc6056e..e4d5c33baece 100644 --- a/drivers/infiniband/hw/hfi1/sdma.c +++ b/drivers/infiniband/hw/hfi1/sdma.c @@ -3203,7 +3203,6 @@ int _pad_sdma_tx_descs(struct hfi1_devdata *dd, struct sdma_txreq *tx) { int rval = 0; - tx->num_desc++; if ((unlikely(tx->num_desc == tx->desc_limit))) { rval = _extend_sdma_tx_descs(dd, tx); if (rval) { @@ -3217,6 +3216,7 @@ int _pad_sdma_tx_descs(struct hfi1_devdata *dd, struct sdma_txreq *tx) SDMA_MAP_NONE, dd->sdma_pad_phys, sizeof(u32) - (tx->packet_len & (sizeof(u32) - 1))); + tx->num_desc++; _sdma_close_tx(dd, tx); return rval; } diff --git a/drivers/infiniband/hw/hfi1/sdma.h b/drivers/infiniband/hw/hfi1/sdma.h index 1e2e40f79cb2..6ac00755848d 100644 --- a/drivers/infiniband/hw/hfi1/sdma.h +++ b/drivers/infiniband/hw/hfi1/sdma.h @@ -672,14 +672,13 @@ static inline void sdma_txclean(struct hfi1_devdata *dd, struct sdma_txreq *tx) static inline void _sdma_close_tx(struct hfi1_devdata *dd, struct sdma_txreq *tx) { - tx->descp[tx->num_desc].qw[0] |= - SDMA_DESC0_LAST_DESC_FLAG; - tx->descp[tx->num_desc].qw[1] |= - dd->default_desc1; + u16 last_desc = tx->num_desc - 1; + + tx->descp[last_desc].qw[0] |= SDMA_DESC0_LAST_DESC_FLAG; + tx->descp[last_desc].qw[1] |= dd->default_desc1; if (tx->flags & SDMA_TXREQ_F_URGENT) - tx->descp[tx->num_desc].qw[1] |= - (SDMA_DESC1_HEAD_TO_HOST_FLAG | - SDMA_DESC1_INT_REQ_FLAG); + tx->descp[last_desc].qw[1] |= (SDMA_DESC1_HEAD_TO_HOST_FLAG | + SDMA_DESC1_INT_REQ_FLAG); } static inline int _sdma_txadd_daddr( @@ -696,6 +695,7 @@ static inline int _sdma_txadd_daddr( type, addr, len); WARN_ON(len > tx->tlen); + tx->num_desc++; tx->tlen -= len; /* special cases for last */ if (!tx->tlen) { @@ -707,7 +707,6 @@ static inline int _sdma_txadd_daddr( _sdma_close_tx(dd, tx); } } - tx->num_desc++; return rval; } diff --git a/drivers/infiniband/hw/hfi1/user_exp_rcv.c b/drivers/infiniband/hw/hfi1/user_exp_rcv.c index 4d732353379d..6c1d36b2e2a7 100644 --- a/drivers/infiniband/hw/hfi1/user_exp_rcv.c +++ b/drivers/infiniband/hw/hfi1/user_exp_rcv.c @@ -215,16 +215,11 @@ static void unpin_rcv_pages(struct hfi1_filedata *fd, static int pin_rcv_pages(struct hfi1_filedata *fd, struct tid_user_buf *tidbuf) { int pinned; - unsigned int npages; + unsigned int npages = tidbuf->npages; unsigned long vaddr = tidbuf->vaddr; struct page **pages = NULL; struct hfi1_devdata *dd = fd->uctxt->dd; - /* Get the number of pages the user buffer spans */ - npages = num_user_pages(vaddr, tidbuf->length); - if (!npages) - return -EINVAL; - if (npages > fd->uctxt->expected_count) { dd_dev_err(dd, "Expected buffer too big\n"); return -EINVAL; @@ -258,7 +253,6 @@ static int pin_rcv_pages(struct hfi1_filedata *fd, struct tid_user_buf *tidbuf) return pinned; } tidbuf->pages = pages; - tidbuf->npages = npages; fd->tid_n_pinned += pinned; return pinned; } @@ -325,6 +319,8 @@ int hfi1_user_exp_rcv_setup(struct hfi1_filedata *fd, if (!PAGE_ALIGNED(tinfo->vaddr)) return -EINVAL; + if (tinfo->length == 0) + return -EINVAL; tidbuf = kzalloc(sizeof(*tidbuf), GFP_KERNEL); if (!tidbuf) @@ -332,43 +328,42 @@ int hfi1_user_exp_rcv_setup(struct hfi1_filedata *fd, tidbuf->vaddr = tinfo->vaddr; tidbuf->length = tinfo->length; + tidbuf->npages = num_user_pages(tidbuf->vaddr, tidbuf->length); tidbuf->psets = kcalloc(uctxt->expected_count, sizeof(*tidbuf->psets), GFP_KERNEL); if (!tidbuf->psets) { - kfree(tidbuf); - return -ENOMEM; + ret = -ENOMEM; + goto fail_release_mem; } pinned = pin_rcv_pages(fd, tidbuf); if (pinned <= 0) { - kfree(tidbuf->psets); - kfree(tidbuf); - return pinned; + ret = (pinned < 0) ? pinned : -ENOSPC; + goto fail_unpin; } /* Find sets of physically contiguous pages */ tidbuf->n_psets = find_phys_blocks(tidbuf, pinned); - /* - * We don't need to access this under a lock since tid_used is per - * process and the same process cannot be in hfi1_user_exp_rcv_clear() - * and hfi1_user_exp_rcv_setup() at the same time. - */ + /* Reserve the number of expected tids to be used. */ spin_lock(&fd->tid_lock); if (fd->tid_used + tidbuf->n_psets > fd->tid_limit) pageset_count = fd->tid_limit - fd->tid_used; else pageset_count = tidbuf->n_psets; + fd->tid_used += pageset_count; spin_unlock(&fd->tid_lock); - if (!pageset_count) - goto bail; + if (!pageset_count) { + ret = -ENOSPC; + goto fail_unreserve; + } ngroups = pageset_count / dd->rcv_entries.group_size; tidlist = kcalloc(pageset_count, sizeof(*tidlist), GFP_KERNEL); if (!tidlist) { ret = -ENOMEM; - goto nomem; + goto fail_unreserve; } tididx = 0; @@ -464,43 +459,60 @@ int hfi1_user_exp_rcv_setup(struct hfi1_filedata *fd, } unlock: mutex_unlock(&uctxt->exp_mutex); -nomem: hfi1_cdbg(TID, "total mapped: tidpairs:%u pages:%u (%d)", tididx, mapped_pages, ret); - if (tididx) { - spin_lock(&fd->tid_lock); - fd->tid_used += tididx; - spin_unlock(&fd->tid_lock); - tinfo->tidcnt = tididx; - tinfo->length = mapped_pages * PAGE_SIZE; - - if (copy_to_user(u64_to_user_ptr(tinfo->tidlist), - tidlist, sizeof(tidlist[0]) * tididx)) { - /* - * On failure to copy to the user level, we need to undo - * everything done so far so we don't leak resources. - */ - tinfo->tidlist = (unsigned long)&tidlist; - hfi1_user_exp_rcv_clear(fd, tinfo); - tinfo->tidlist = 0; - ret = -EFAULT; - goto bail; - } + + /* fail if nothing was programmed, set error if none provided */ + if (tididx == 0) { + if (ret >= 0) + ret = -ENOSPC; + goto fail_unreserve; } - /* - * If not everything was mapped (due to insufficient RcvArray entries, - * for example), unpin all unmapped pages so we can pin them nex time. - */ - if (mapped_pages != pinned) - unpin_rcv_pages(fd, tidbuf, NULL, mapped_pages, - (pinned - mapped_pages), false); -bail: + /* adjust reserved tid_used to actual count */ + spin_lock(&fd->tid_lock); + fd->tid_used -= pageset_count - tididx; + spin_unlock(&fd->tid_lock); + + /* unpin all pages not covered by a TID */ + unpin_rcv_pages(fd, tidbuf, NULL, mapped_pages, pinned - mapped_pages, + false); + + tinfo->tidcnt = tididx; + tinfo->length = mapped_pages * PAGE_SIZE; + + if (copy_to_user(u64_to_user_ptr(tinfo->tidlist), + tidlist, sizeof(tidlist[0]) * tididx)) { + ret = -EFAULT; + goto fail_unprogram; + } + + kfree(tidbuf->pages); kfree(tidbuf->psets); + kfree(tidbuf); kfree(tidlist); + return 0; + +fail_unprogram: + /* unprogram, unmap, and unpin all allocated TIDs */ + tinfo->tidlist = (unsigned long)tidlist; + hfi1_user_exp_rcv_clear(fd, tinfo); + tinfo->tidlist = 0; + pinned = 0; /* nothing left to unpin */ + pageset_count = 0; /* nothing left reserved */ +fail_unreserve: + spin_lock(&fd->tid_lock); + fd->tid_used -= pageset_count; + spin_unlock(&fd->tid_lock); +fail_unpin: + if (pinned > 0) + unpin_rcv_pages(fd, tidbuf, NULL, 0, pinned, false); +fail_release_mem: kfree(tidbuf->pages); + kfree(tidbuf->psets); kfree(tidbuf); - return ret > 0 ? 0 : ret; + kfree(tidlist); + return ret; } int hfi1_user_exp_rcv_clear(struct hfi1_filedata *fd, diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c index d01e3222c00c..28bbc4708fd4 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c @@ -5216,8 +5216,8 @@ static irqreturn_t hns_roce_v2_msix_interrupt_abn(int irq, void *dev_id) dev_err(dev, "AEQ overflow!\n"); - int_st |= 1 << HNS_ROCE_V2_VF_INT_ST_AEQ_OVERFLOW_S; - roce_write(hr_dev, ROCEE_VF_ABN_INT_ST_REG, int_st); + roce_write(hr_dev, ROCEE_VF_ABN_INT_ST_REG, + 1 << HNS_ROCE_V2_VF_INT_ST_AEQ_OVERFLOW_S); /* Set reset level for reset_event() */ if (ops->set_default_reset_request) diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h index 76a14db7028d..b9ab3ca3079c 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h @@ -89,7 +89,7 @@ #define HNS_ROCE_V2_SCCC_ENTRY_SZ 32 #define HNS_ROCE_V2_QPC_TIMER_ENTRY_SZ PAGE_SIZE #define HNS_ROCE_V2_CQC_TIMER_ENTRY_SZ PAGE_SIZE -#define HNS_ROCE_V2_PAGE_SIZE_SUPPORTED 0xFFFFF000 +#define HNS_ROCE_V2_PAGE_SIZE_SUPPORTED 0xFFFF000 #define HNS_ROCE_V2_MAX_INNER_MTPT_NUM 2 #define HNS_ROCE_INVALID_LKEY 0x100 #define HNS_ROCE_CMQ_TX_TIMEOUT 30000 diff --git a/drivers/infiniband/hw/i40iw/i40iw.h b/drivers/infiniband/hw/i40iw/i40iw.h index 6d6719fa7e46..44d65b11a36e 100644 --- a/drivers/infiniband/hw/i40iw/i40iw.h +++ b/drivers/infiniband/hw/i40iw/i40iw.h @@ -411,9 +411,8 @@ void i40iw_manage_arp_cache(struct i40iw_device *iwdev, bool ipv4, u32 action); -int i40iw_manage_apbvt(struct i40iw_device *iwdev, - u16 accel_local_port, - bool add_port); +enum i40iw_status_code i40iw_manage_apbvt(struct i40iw_device *iwdev, + u16 accel_local_port, bool add_port); struct i40iw_cqp_request *i40iw_get_cqp_request(struct i40iw_cqp *cqp, bool wait); void i40iw_free_cqp_request(struct i40iw_cqp *cqp, struct i40iw_cqp_request *cqp_request); diff --git a/drivers/infiniband/hw/i40iw/i40iw_ctrl.c b/drivers/infiniband/hw/i40iw/i40iw_ctrl.c index 4d841a3c68f3..026557aa2307 100644 --- a/drivers/infiniband/hw/i40iw/i40iw_ctrl.c +++ b/drivers/infiniband/hw/i40iw/i40iw_ctrl.c @@ -2945,6 +2945,9 @@ static enum i40iw_status_code i40iw_sc_alloc_stag( u64 header; enum i40iw_page_size page_size; + if (!info->total_len && !info->all_memory) + return -EINVAL; + page_size = (info->page_size == 0x200000) ? I40IW_PAGE_SIZE_2M : I40IW_PAGE_SIZE_4K; cqp = dev->cqp; wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch); @@ -3003,6 +3006,9 @@ static enum i40iw_status_code i40iw_sc_mr_reg_non_shared( u8 addr_type; enum i40iw_page_size page_size; + if (!info->total_len && !info->all_memory) + return -EINVAL; + page_size = (info->page_size == 0x200000) ? I40IW_PAGE_SIZE_2M : I40IW_PAGE_SIZE_4K; if (info->access_rights & (I40IW_ACCESS_FLAGS_REMOTEREAD_ONLY | I40IW_ACCESS_FLAGS_REMOTEWRITE_ONLY)) diff --git a/drivers/infiniband/hw/i40iw/i40iw_type.h b/drivers/infiniband/hw/i40iw/i40iw_type.h index adc8d2ec523d..5c4e2f206105 100644 --- a/drivers/infiniband/hw/i40iw/i40iw_type.h +++ b/drivers/infiniband/hw/i40iw/i40iw_type.h @@ -779,6 +779,7 @@ struct i40iw_allocate_stag_info { bool use_hmc_fcn_index; u8 hmc_fcn_index; bool use_pf_rid; + bool all_memory; }; struct i40iw_reg_ns_stag_info { @@ -797,6 +798,7 @@ struct i40iw_reg_ns_stag_info { bool use_hmc_fcn_index; u8 hmc_fcn_index; bool use_pf_rid; + bool all_memory; }; struct i40iw_fast_reg_stag_info { diff --git a/drivers/infiniband/hw/i40iw/i40iw_verbs.c b/drivers/infiniband/hw/i40iw/i40iw_verbs.c index 7e9c1a40f040..98f779c82ea0 100644 --- a/drivers/infiniband/hw/i40iw/i40iw_verbs.c +++ b/drivers/infiniband/hw/i40iw/i40iw_verbs.c @@ -1500,7 +1500,8 @@ static int i40iw_handle_q_mem(struct i40iw_device *iwdev, static int i40iw_hw_alloc_stag(struct i40iw_device *iwdev, struct i40iw_mr *iwmr) { struct i40iw_allocate_stag_info *info; - struct i40iw_pd *iwpd = to_iwpd(iwmr->ibmr.pd); + struct ib_pd *pd = iwmr->ibmr.pd; + struct i40iw_pd *iwpd = to_iwpd(pd); enum i40iw_status_code status; int err = 0; struct i40iw_cqp_request *cqp_request; @@ -1517,6 +1518,7 @@ static int i40iw_hw_alloc_stag(struct i40iw_device *iwdev, struct i40iw_mr *iwmr info->stag_idx = iwmr->stag >> I40IW_CQPSQ_STAG_IDX_SHIFT; info->pd_id = iwpd->sc_pd.pd_id; info->total_len = iwmr->length; + info->all_memory = pd->flags & IB_PD_UNSAFE_GLOBAL_RKEY; info->remote_access = true; cqp_info->cqp_cmd = OP_ALLOC_STAG; cqp_info->post_sq = 1; @@ -1570,6 +1572,8 @@ static struct ib_mr *i40iw_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type, iwmr->type = IW_MEMREG_TYPE_MEM; palloc = &iwpbl->pble_alloc; iwmr->page_cnt = max_num_sg; + /* Use system PAGE_SIZE as the sg page sizes are unknown at this point */ + iwmr->length = max_num_sg * PAGE_SIZE; mutex_lock(&iwdev->pbl_mutex); status = i40iw_get_pble(&iwdev->sc_dev, iwdev->pble_rsrc, palloc, iwmr->page_cnt); mutex_unlock(&iwdev->pbl_mutex); @@ -1666,7 +1670,8 @@ static int i40iw_hwreg_mr(struct i40iw_device *iwdev, { struct i40iw_pbl *iwpbl = &iwmr->iwpbl; struct i40iw_reg_ns_stag_info *stag_info; - struct i40iw_pd *iwpd = to_iwpd(iwmr->ibmr.pd); + struct ib_pd *pd = iwmr->ibmr.pd; + struct i40iw_pd *iwpd = to_iwpd(pd); struct i40iw_pble_alloc *palloc = &iwpbl->pble_alloc; enum i40iw_status_code status; int err = 0; @@ -1686,6 +1691,7 @@ static int i40iw_hwreg_mr(struct i40iw_device *iwdev, stag_info->total_len = iwmr->length; stag_info->access_rights = access; stag_info->pd_id = iwpd->sc_pd.pd_id; + stag_info->all_memory = pd->flags & IB_PD_UNSAFE_GLOBAL_RKEY; stag_info->addr_type = I40IW_ADDR_TYPE_VA_BASED; stag_info->page_size = iwmr->page_size; diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c index bca5358f3ef2..395d8a99b12e 100644 --- a/drivers/infiniband/hw/mlx4/qp.c +++ b/drivers/infiniband/hw/mlx4/qp.c @@ -438,9 +438,13 @@ static int set_user_sq_size(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp, struct mlx4_ib_create_qp *ucmd) { + u32 cnt; + /* Sanity check SQ size before proceeding */ - if ((1 << ucmd->log_sq_bb_count) > dev->dev->caps.max_wqes || - ucmd->log_sq_stride > + if (check_shl_overflow(1, ucmd->log_sq_bb_count, &cnt) || + cnt > dev->dev->caps.max_wqes) + return -EINVAL; + if (ucmd->log_sq_stride > ilog2(roundup_pow_of_two(dev->dev->caps.max_sq_desc_sz)) || ucmd->log_sq_stride < MLX4_IB_MIN_SQ_STRIDE) return -EINVAL; @@ -552,15 +556,15 @@ static int set_qp_rss(struct mlx4_ib_dev *dev, struct mlx4_ib_rss *rss_ctx, return (-EOPNOTSUPP); } - if (ucmd->rx_hash_fields_mask & ~(MLX4_IB_RX_HASH_SRC_IPV4 | - MLX4_IB_RX_HASH_DST_IPV4 | - MLX4_IB_RX_HASH_SRC_IPV6 | - MLX4_IB_RX_HASH_DST_IPV6 | - MLX4_IB_RX_HASH_SRC_PORT_TCP | - MLX4_IB_RX_HASH_DST_PORT_TCP | - MLX4_IB_RX_HASH_SRC_PORT_UDP | - MLX4_IB_RX_HASH_DST_PORT_UDP | - MLX4_IB_RX_HASH_INNER)) { + if (ucmd->rx_hash_fields_mask & ~(u64)(MLX4_IB_RX_HASH_SRC_IPV4 | + MLX4_IB_RX_HASH_DST_IPV4 | + MLX4_IB_RX_HASH_SRC_IPV6 | + MLX4_IB_RX_HASH_DST_IPV6 | + MLX4_IB_RX_HASH_SRC_PORT_TCP | + MLX4_IB_RX_HASH_DST_PORT_TCP | + MLX4_IB_RX_HASH_SRC_PORT_UDP | + MLX4_IB_RX_HASH_DST_PORT_UDP | + MLX4_IB_RX_HASH_INNER)) { pr_debug("RX Hash fields_mask has unsupported mask (0x%llx)\n", ucmd->rx_hash_fields_mask); return (-EOPNOTSUPP); @@ -3543,11 +3547,11 @@ static int _mlx4_ib_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr, int nreq; int err = 0; unsigned ind; - int uninitialized_var(size); - unsigned uninitialized_var(seglen); + int size; + unsigned seglen; __be32 dummy; __be32 *lso_wqe; - __be32 uninitialized_var(lso_hdr_sz); + __be32 lso_hdr_sz; __be32 blh; int i; struct mlx4_ib_dev *mdev = to_mdev(ibqp->device); diff --git a/drivers/infiniband/hw/mlx4/sysfs.c b/drivers/infiniband/hw/mlx4/sysfs.c index ea1f3a081b05..6c3a23ee3bc7 100644 --- a/drivers/infiniband/hw/mlx4/sysfs.c +++ b/drivers/infiniband/hw/mlx4/sysfs.c @@ -221,7 +221,7 @@ void del_sysfs_port_mcg_attr(struct mlx4_ib_dev *device, int port_num, static int add_port_entries(struct mlx4_ib_dev *device, int port_num) { int i; - char buff[11]; + char buff[12]; struct mlx4_ib_iov_port *port = NULL; int ret = 0 ; struct ib_port_attr attr; diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c index 23ce0126b268..7f659c240c99 100644 --- a/drivers/infiniband/hw/mlx5/cq.c +++ b/drivers/infiniband/hw/mlx5/cq.c @@ -916,8 +916,8 @@ int mlx5_ib_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr, struct mlx5_ib_dev *dev = to_mdev(ibdev); struct mlx5_ib_cq *cq = to_mcq(ibcq); u32 out[MLX5_ST_SZ_DW(create_cq_out)]; - int uninitialized_var(index); - int uninitialized_var(inlen); + int index; + int inlen; u32 *cqb = NULL; void *cqc; int cqe_size; @@ -1237,7 +1237,7 @@ int mlx5_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata) __be64 *pas; int page_shift; int inlen; - int uninitialized_var(cqe_size); + int cqe_size; unsigned long flags; if (!MLX5_CAP_GEN(dev->mdev, cq_resize)) { diff --git a/drivers/infiniband/hw/mlx5/devx.c b/drivers/infiniband/hw/mlx5/devx.c index 747f42855b7b..7a3b56c15079 100644 --- a/drivers/infiniband/hw/mlx5/devx.c +++ b/drivers/infiniband/hw/mlx5/devx.c @@ -2556,7 +2556,7 @@ static ssize_t devx_async_event_read(struct file *filp, char __user *buf, { struct devx_async_event_file *ev_file = filp->private_data; struct devx_event_subscription *event_sub; - struct devx_async_event_data *uninitialized_var(event); + struct devx_async_event_data *event; int ret = 0; size_t eventsz; bool omit_data; @@ -2811,7 +2811,7 @@ DECLARE_UVERBS_NAMED_METHOD( MLX5_IB_METHOD_DEVX_OBJ_MODIFY, UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_OBJ_MODIFY_HANDLE, UVERBS_IDR_ANY_OBJECT, - UVERBS_ACCESS_WRITE, + UVERBS_ACCESS_READ, UA_MANDATORY), UVERBS_ATTR_PTR_IN( MLX5_IB_ATTR_DEVX_OBJ_MODIFY_CMD_IN, diff --git a/drivers/infiniband/hw/mlx5/mad.c b/drivers/infiniband/hw/mlx5/mad.c index 348c1df69cdc..84d72b3b6df8 100644 --- a/drivers/infiniband/hw/mlx5/mad.c +++ b/drivers/infiniband/hw/mlx5/mad.c @@ -219,6 +219,13 @@ static int process_pma_cmd(struct mlx5_ib_dev *dev, u8 port_num, mdev = dev->mdev; mdev_port_num = 1; } + if (MLX5_CAP_GEN(dev->mdev, num_ports) == 1 && + !mlx5_core_mp_enabled(mdev)) { + /* set local port to one for Function-Per-Port HCA. */ + mdev = dev->mdev; + mdev_port_num = 1; + } + /* Declaring support of extended counters */ if (in_mad->mad_hdr.attr_id == IB_PMA_CLASS_PORT_INFO) { struct ib_class_port_info cpi = {}; diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index 9025086a8932..6698032af87d 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c @@ -2053,7 +2053,7 @@ static inline char *mmap_cmd2str(enum mlx5_ib_mmap_cmd cmd) case MLX5_IB_MMAP_DEVICE_MEM: return "Device Memory"; default: - return NULL; + return "Unknown"; } } diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c index 634f29cb7395..51623431b879 100644 --- a/drivers/infiniband/hw/mlx5/qp.c +++ b/drivers/infiniband/hw/mlx5/qp.c @@ -3821,7 +3821,7 @@ static int mlx5_ib_modify_dct(struct ib_qp *ibqp, struct ib_qp_attr *attr, return -EINVAL; if (attr->port_num == 0 || - attr->port_num > MLX5_CAP_GEN(dev->mdev, num_ports)) { + attr->port_num > dev->num_ports) { mlx5_ib_dbg(dev, "invalid port number %d. number of ports is %d\n", attr->port_num, dev->num_ports); return -EINVAL; @@ -3890,6 +3890,40 @@ static int mlx5_ib_modify_dct(struct ib_qp *ibqp, struct ib_qp_attr *attr, return err; } +static int validate_rd_atomic(struct mlx5_ib_dev *dev, struct ib_qp_attr *attr, + int attr_mask, enum ib_qp_type qp_type) +{ + int log_max_ra_res; + int log_max_ra_req; + + if (qp_type == MLX5_IB_QPT_DCI) { + log_max_ra_res = 1 << MLX5_CAP_GEN(dev->mdev, + log_max_ra_res_dc); + log_max_ra_req = 1 << MLX5_CAP_GEN(dev->mdev, + log_max_ra_req_dc); + } else { + log_max_ra_res = 1 << MLX5_CAP_GEN(dev->mdev, + log_max_ra_res_qp); + log_max_ra_req = 1 << MLX5_CAP_GEN(dev->mdev, + log_max_ra_req_qp); + } + + if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC && + attr->max_rd_atomic > log_max_ra_res) { + mlx5_ib_dbg(dev, "invalid max_rd_atomic value %d\n", + attr->max_rd_atomic); + return false; + } + + if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC && + attr->max_dest_rd_atomic > log_max_ra_req) { + mlx5_ib_dbg(dev, "invalid max_dest_rd_atomic value %d\n", + attr->max_dest_rd_atomic); + return false; + } + return true; +} + int mlx5_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask, struct ib_udata *udata) { @@ -3986,21 +4020,8 @@ int mlx5_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, } } - if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC && - attr->max_rd_atomic > - (1 << MLX5_CAP_GEN(dev->mdev, log_max_ra_res_qp))) { - mlx5_ib_dbg(dev, "invalid max_rd_atomic value %d\n", - attr->max_rd_atomic); - goto out; - } - - if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC && - attr->max_dest_rd_atomic > - (1 << MLX5_CAP_GEN(dev->mdev, log_max_ra_req_qp))) { - mlx5_ib_dbg(dev, "invalid max_dest_rd_atomic value %d\n", - attr->max_dest_rd_atomic); + if (!validate_rd_atomic(dev, attr, attr_mask, qp_type)) goto out; - } if (cur_state == new_state && cur_state == IB_QPS_RESET) { err = 0; diff --git a/drivers/infiniband/hw/mthca/mthca_cmd.c b/drivers/infiniband/hw/mthca/mthca_cmd.c index bdf5ed38de22..0307c45aa6d3 100644 --- a/drivers/infiniband/hw/mthca/mthca_cmd.c +++ b/drivers/infiniband/hw/mthca/mthca_cmd.c @@ -635,7 +635,7 @@ void mthca_free_mailbox(struct mthca_dev *dev, struct mthca_mailbox *mailbox) int mthca_SYS_EN(struct mthca_dev *dev) { - u64 out; + u64 out = 0; int ret; ret = mthca_cmd_imm(dev, 0, &out, 0, 0, CMD_SYS_EN, CMD_TIME_CLASS_D); @@ -1955,7 +1955,7 @@ int mthca_WRITE_MGM(struct mthca_dev *dev, int index, int mthca_MGID_HASH(struct mthca_dev *dev, struct mthca_mailbox *mailbox, u16 *hash) { - u64 imm; + u64 imm = 0; int err; err = mthca_cmd_imm(dev, mailbox->dma, &imm, 0, 0, CMD_MGID_HASH, diff --git a/drivers/infiniband/hw/mthca/mthca_main.c b/drivers/infiniband/hw/mthca/mthca_main.c index fe9654a7af71..3acd1372c814 100644 --- a/drivers/infiniband/hw/mthca/mthca_main.c +++ b/drivers/infiniband/hw/mthca/mthca_main.c @@ -382,7 +382,7 @@ static int mthca_init_icm(struct mthca_dev *mdev, struct mthca_init_hca_param *init_hca, u64 icm_size) { - u64 aux_pages; + u64 aux_pages = 0; int err; err = mthca_SET_ICM_SIZE(mdev, icm_size, &aux_pages); diff --git a/drivers/infiniband/hw/mthca/mthca_qp.c b/drivers/infiniband/hw/mthca/mthca_qp.c index d04c245359eb..c6e95d0d760a 100644 --- a/drivers/infiniband/hw/mthca/mthca_qp.c +++ b/drivers/infiniband/hw/mthca/mthca_qp.c @@ -1639,8 +1639,8 @@ int mthca_tavor_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr, * without initializing f0 and size0, and they are in fact * never used uninitialized. */ - int uninitialized_var(size0); - u32 uninitialized_var(f0); + int size0; + u32 f0; int ind; u8 op0 = 0; @@ -1835,7 +1835,7 @@ int mthca_tavor_post_receive(struct ib_qp *ibqp, const struct ib_recv_wr *wr, * without initializing size0, and it is in fact never used * uninitialized. */ - int uninitialized_var(size0); + int size0; int ind; void *wqe; void *prev_wqe; @@ -1943,8 +1943,8 @@ int mthca_arbel_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr, * without initializing f0 and size0, and they are in fact * never used uninitialized. */ - int uninitialized_var(size0); - u32 uninitialized_var(f0); + int size0; + u32 f0; int ind; u8 op0 = 0; diff --git a/drivers/infiniband/hw/qedr/main.c b/drivers/infiniband/hw/qedr/main.c index 93040c994e2e..50b75bd4633c 100644 --- a/drivers/infiniband/hw/qedr/main.c +++ b/drivers/infiniband/hw/qedr/main.c @@ -362,6 +362,10 @@ static int qedr_alloc_resources(struct qedr_dev *dev) if (IS_IWARP(dev)) { xa_init(&dev->qps); dev->iwarp_wq = create_singlethread_workqueue("qedr_iwarpq"); + if (!dev->iwarp_wq) { + rc = -ENOMEM; + goto err1; + } } /* Allocate Status blocks for CNQ */ @@ -369,7 +373,7 @@ static int qedr_alloc_resources(struct qedr_dev *dev) GFP_KERNEL); if (!dev->sb_array) { rc = -ENOMEM; - goto err1; + goto err_destroy_wq; } dev->cnq_array = kcalloc(dev->num_cnq, @@ -423,6 +427,9 @@ err3: kfree(dev->cnq_array); err2: kfree(dev->sb_array); +err_destroy_wq: + if (IS_IWARP(dev)) + destroy_workqueue(dev->iwarp_wq); err1: kfree(dev->sgid_tbl); return rc; diff --git a/drivers/infiniband/hw/usnic/usnic_uiom.c b/drivers/infiniband/hw/usnic/usnic_uiom.c index 62e6ffa9ad78..1d060ae47933 100644 --- a/drivers/infiniband/hw/usnic/usnic_uiom.c +++ b/drivers/infiniband/hw/usnic/usnic_uiom.c @@ -281,8 +281,8 @@ iter_chunk: size = pa_end - pa_start + PAGE_SIZE; usnic_dbg("va 0x%lx pa %pa size 0x%zx flags 0x%x", va_start, &pa_start, size, flags); - err = iommu_map(pd->domain, va_start, pa_start, - size, flags); + err = iommu_map_atomic(pd->domain, va_start, + pa_start, size, flags); if (err) { usnic_err("Failed to map va 0x%lx pa %pa size 0x%zx with err %d\n", va_start, &pa_start, size, err); @@ -298,8 +298,8 @@ iter_chunk: size = pa - pa_start + PAGE_SIZE; usnic_dbg("va 0x%lx pa %pa size 0x%zx flags 0x%x\n", va_start, &pa_start, size, flags); - err = iommu_map(pd->domain, va_start, pa_start, - size, flags); + err = iommu_map_atomic(pd->domain, va_start, + pa_start, size, flags); if (err) { usnic_err("Failed to map va 0x%lx pa %pa size 0x%zx with err %d\n", va_start, &pa_start, size, err); |