mirror of
https://github.com/freebsd/freebsd-src.git
synced 2024-11-27 13:32:45 +00:00
irdma(4): Upgrade to 1.2.36-k
Update Intel irdma driver to version 1.2.36-k.
Notable changes:
- Start using ib_sge directly instead of irdma_sge
- Turn off flush completion generator for libirdma
- Minor formatting changes
Signed-off-by: Bartosz Sobczak <bartosz.sobczak@intel.com>
Signed-off-by: Eric Joyner <erj@FreeBSD.org>
Reviewed by: erj@
Sponsored by: Intel Corporation
Differential Revision: https://reviews.freebsd.org/D43567
(cherry picked from commit 5b5f7d0e77
)
This commit is contained in:
parent
5f4753501e
commit
a4510dafa8
@ -1,7 +1,7 @@
|
||||
/*-
|
||||
* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
|
||||
*
|
||||
* Copyright (C) 2019 - 2022 Intel Corporation
|
||||
* Copyright (C) 2019 - 2023 Intel Corporation
|
||||
*
|
||||
* This software is available to you under a choice of one of two
|
||||
* licenses. You may choose to be licensed under the terms of the GNU
|
||||
@ -85,6 +85,7 @@ struct irdma_ucreate_qp {
|
||||
struct ibv_create_qp ibv_cmd;
|
||||
__aligned_u64 user_wqe_bufs;
|
||||
__aligned_u64 user_compl_ctx;
|
||||
__aligned_u64 comp_mask;
|
||||
|
||||
};
|
||||
struct irdma_ucreate_qp_resp {
|
||||
@ -97,6 +98,9 @@ struct irdma_ucreate_qp_resp {
|
||||
__u8 lsmm;
|
||||
__u8 rsvd;
|
||||
__u32 qp_caps;
|
||||
__aligned_u64 comp_mask;
|
||||
__u8 start_wqe_idx;
|
||||
__u8 rsvd2[7];
|
||||
|
||||
};
|
||||
struct irdma_umodify_qp_resp {
|
||||
@ -137,6 +141,8 @@ struct irdma_get_context_resp {
|
||||
__u8 hw_rev;
|
||||
__u8 rsvd2;
|
||||
__aligned_u64 comp_mask;
|
||||
__u16 min_hw_wq_size;
|
||||
__u8 rsvd3[6];
|
||||
|
||||
};
|
||||
struct irdma_ureg_mr {
|
||||
|
@ -54,6 +54,11 @@ enum irdma_memreg_type {
|
||||
|
||||
enum {
|
||||
IRDMA_ALLOC_UCTX_USE_RAW_ATTR = 1 << 0,
|
||||
IRDMA_ALLOC_UCTX_MIN_HW_WQ_SIZE = 1 << 1,
|
||||
};
|
||||
|
||||
enum {
|
||||
IRDMA_CREATE_QP_USE_START_WQE_IDX = 1 << 0,
|
||||
};
|
||||
|
||||
struct irdma_alloc_ucontext_req {
|
||||
@ -82,6 +87,8 @@ struct irdma_alloc_ucontext_resp {
|
||||
__u8 hw_rev;
|
||||
__u8 rsvd2;
|
||||
__aligned_u64 comp_mask;
|
||||
__u16 min_hw_wq_size;
|
||||
__u8 rsvd3[6];
|
||||
};
|
||||
|
||||
struct irdma_alloc_pd_resp {
|
||||
@ -101,6 +108,7 @@ struct irdma_create_cq_req {
|
||||
struct irdma_create_qp_req {
|
||||
__aligned_u64 user_wqe_bufs;
|
||||
__aligned_u64 user_compl_ctx;
|
||||
__aligned_u64 comp_mask;
|
||||
};
|
||||
|
||||
struct irdma_mem_reg_req {
|
||||
@ -130,6 +138,9 @@ struct irdma_create_qp_resp {
|
||||
__u8 lsmm;
|
||||
__u8 rsvd;
|
||||
__u32 qp_caps;
|
||||
__aligned_u64 comp_mask;
|
||||
__u8 start_wqe_idx;
|
||||
__u8 rsvd2[7];
|
||||
};
|
||||
|
||||
struct irdma_modify_qp_resp {
|
||||
|
@ -46,7 +46,6 @@ enum irdma_vers {
|
||||
IRDMA_GEN_RSVD = 0,
|
||||
IRDMA_GEN_1 = 1,
|
||||
IRDMA_GEN_2 = 2,
|
||||
IRDMA_GEN_MAX = 2,
|
||||
};
|
||||
|
||||
struct irdma_uk_attrs {
|
||||
|
@ -456,19 +456,6 @@ enum irdma_qp_wqe_size {
|
||||
IRDMA_WQE_SIZE_256 = 256,
|
||||
};
|
||||
|
||||
enum irdma_ws_op_type {
|
||||
IRDMA_WS_OP_TYPE_NODE = 0,
|
||||
IRDMA_WS_OP_TYPE_LEAF_NODE_GROUP,
|
||||
};
|
||||
|
||||
enum irdma_ws_rate_limit_flags {
|
||||
IRDMA_WS_RATE_LIMIT_FLAGS_VALID = 0x1,
|
||||
IRDMA_WS_NO_RDMA_RATE_LIMIT = 0x2,
|
||||
IRDMA_WS_LEAF_NODE_IS_PART_GROUP = 0x4,
|
||||
IRDMA_WS_TREE_RATE_LIMITING = 0x8,
|
||||
IRDMA_WS_PACING_CONTROL = 0x10,
|
||||
};
|
||||
|
||||
/**
|
||||
* set_64bit_val - set 64 bit value to hw wqe
|
||||
* @wqe_words: wqe addr to write
|
||||
|
@ -45,16 +45,16 @@
|
||||
* @valid: The wqe valid
|
||||
*/
|
||||
static void
|
||||
irdma_set_fragment(__le64 * wqe, u32 offset, struct irdma_sge *sge,
|
||||
irdma_set_fragment(__le64 * wqe, u32 offset, struct ibv_sge *sge,
|
||||
u8 valid)
|
||||
{
|
||||
if (sge) {
|
||||
set_64bit_val(wqe, offset,
|
||||
FIELD_PREP(IRDMAQPSQ_FRAG_TO, sge->tag_off));
|
||||
FIELD_PREP(IRDMAQPSQ_FRAG_TO, sge->addr));
|
||||
set_64bit_val(wqe, offset + IRDMA_BYTE_8,
|
||||
FIELD_PREP(IRDMAQPSQ_VALID, valid) |
|
||||
FIELD_PREP(IRDMAQPSQ_FRAG_LEN, sge->len) |
|
||||
FIELD_PREP(IRDMAQPSQ_FRAG_STAG, sge->stag));
|
||||
FIELD_PREP(IRDMAQPSQ_FRAG_LEN, sge->length) |
|
||||
FIELD_PREP(IRDMAQPSQ_FRAG_STAG, sge->lkey));
|
||||
} else {
|
||||
set_64bit_val(wqe, offset, 0);
|
||||
set_64bit_val(wqe, offset + IRDMA_BYTE_8,
|
||||
@ -71,14 +71,14 @@ irdma_set_fragment(__le64 * wqe, u32 offset, struct irdma_sge *sge,
|
||||
*/
|
||||
static void
|
||||
irdma_set_fragment_gen_1(__le64 * wqe, u32 offset,
|
||||
struct irdma_sge *sge, u8 valid)
|
||||
struct ibv_sge *sge, u8 valid)
|
||||
{
|
||||
if (sge) {
|
||||
set_64bit_val(wqe, offset,
|
||||
FIELD_PREP(IRDMAQPSQ_FRAG_TO, sge->tag_off));
|
||||
FIELD_PREP(IRDMAQPSQ_FRAG_TO, sge->addr));
|
||||
set_64bit_val(wqe, offset + IRDMA_BYTE_8,
|
||||
FIELD_PREP(IRDMAQPSQ_GEN1_FRAG_LEN, sge->len) |
|
||||
FIELD_PREP(IRDMAQPSQ_GEN1_FRAG_STAG, sge->stag));
|
||||
FIELD_PREP(IRDMAQPSQ_GEN1_FRAG_LEN, sge->length) |
|
||||
FIELD_PREP(IRDMAQPSQ_GEN1_FRAG_STAG, sge->lkey));
|
||||
} else {
|
||||
set_64bit_val(wqe, offset, 0);
|
||||
set_64bit_val(wqe, offset + IRDMA_BYTE_8, 0);
|
||||
@ -209,8 +209,7 @@ irdma_qp_push_wqe(struct irdma_qp_uk *qp, __le64 * wqe, u16 quanta,
|
||||
if (IRDMA_RING_CURRENT_HEAD(qp->initial_ring) !=
|
||||
IRDMA_RING_CURRENT_TAIL(qp->sq_ring) &&
|
||||
!qp->push_mode) {
|
||||
if (post_sq)
|
||||
irdma_uk_qp_post_wr(qp);
|
||||
irdma_uk_qp_post_wr(qp);
|
||||
} else {
|
||||
push = (__le64 *) ((uintptr_t)qp->push_wqe +
|
||||
(wqe_idx & 0x7) * 0x20);
|
||||
@ -338,7 +337,7 @@ irdma_uk_rdma_write(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info,
|
||||
return EINVAL;
|
||||
|
||||
for (i = 0; i < op_info->num_lo_sges; i++)
|
||||
total_size += op_info->lo_sg_list[i].len;
|
||||
total_size += op_info->lo_sg_list[i].length;
|
||||
|
||||
read_fence |= info->read_fence;
|
||||
|
||||
@ -357,7 +356,7 @@ irdma_uk_rdma_write(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info,
|
||||
|
||||
qp->sq_wrtrk_array[wqe_idx].signaled = info->signaled;
|
||||
set_64bit_val(wqe, IRDMA_BYTE_16,
|
||||
FIELD_PREP(IRDMAQPSQ_FRAG_TO, op_info->rem_addr.tag_off));
|
||||
FIELD_PREP(IRDMAQPSQ_FRAG_TO, op_info->rem_addr.addr));
|
||||
|
||||
if (info->imm_data_valid) {
|
||||
set_64bit_val(wqe, IRDMA_BYTE_0,
|
||||
@ -386,7 +385,7 @@ irdma_uk_rdma_write(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info,
|
||||
++addl_frag_cnt;
|
||||
}
|
||||
|
||||
hdr = FIELD_PREP(IRDMAQPSQ_REMSTAG, op_info->rem_addr.stag) |
|
||||
hdr = FIELD_PREP(IRDMAQPSQ_REMSTAG, op_info->rem_addr.lkey) |
|
||||
FIELD_PREP(IRDMAQPSQ_OPCODE, info->op_type) |
|
||||
FIELD_PREP(IRDMAQPSQ_IMMDATAFLAG, info->imm_data_valid) |
|
||||
FIELD_PREP(IRDMAQPSQ_REPORTRTT, info->report_rtt) |
|
||||
@ -437,7 +436,7 @@ irdma_uk_rdma_read(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info,
|
||||
return EINVAL;
|
||||
|
||||
for (i = 0; i < op_info->num_lo_sges; i++)
|
||||
total_size += op_info->lo_sg_list[i].len;
|
||||
total_size += op_info->lo_sg_list[i].length;
|
||||
|
||||
ret_code = irdma_fragcnt_to_quanta_sq(op_info->num_lo_sges, &quanta);
|
||||
if (ret_code)
|
||||
@ -475,8 +474,8 @@ irdma_uk_rdma_read(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info,
|
||||
++addl_frag_cnt;
|
||||
}
|
||||
set_64bit_val(wqe, IRDMA_BYTE_16,
|
||||
FIELD_PREP(IRDMAQPSQ_FRAG_TO, op_info->rem_addr.tag_off));
|
||||
hdr = FIELD_PREP(IRDMAQPSQ_REMSTAG, op_info->rem_addr.stag) |
|
||||
FIELD_PREP(IRDMAQPSQ_FRAG_TO, op_info->rem_addr.addr));
|
||||
hdr = FIELD_PREP(IRDMAQPSQ_REMSTAG, op_info->rem_addr.lkey) |
|
||||
FIELD_PREP(IRDMAQPSQ_REPORTRTT, (info->report_rtt ? 1 : 0)) |
|
||||
FIELD_PREP(IRDMAQPSQ_ADDFRAGCNT, addl_frag_cnt) |
|
||||
FIELD_PREP(IRDMAQPSQ_OPCODE,
|
||||
@ -525,7 +524,7 @@ irdma_uk_send(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info,
|
||||
return EINVAL;
|
||||
|
||||
for (i = 0; i < op_info->num_sges; i++)
|
||||
total_size += op_info->sg_list[i].len;
|
||||
total_size += op_info->sg_list[i].length;
|
||||
|
||||
if (info->imm_data_valid)
|
||||
frag_cnt = op_info->num_sges + 1;
|
||||
@ -620,15 +619,15 @@ irdma_set_mw_bind_wqe_gen_1(__le64 * wqe,
|
||||
* @polarity: compatibility parameter
|
||||
*/
|
||||
static void
|
||||
irdma_copy_inline_data_gen_1(u8 *wqe, struct irdma_sge *sge_list,
|
||||
irdma_copy_inline_data_gen_1(u8 *wqe, struct ibv_sge *sge_list,
|
||||
u32 num_sges, u8 polarity)
|
||||
{
|
||||
u32 quanta_bytes_remaining = 16;
|
||||
u32 i;
|
||||
|
||||
for (i = 0; i < num_sges; i++) {
|
||||
u8 *cur_sge = (u8 *)(uintptr_t)sge_list[i].tag_off;
|
||||
u32 sge_len = sge_list[i].len;
|
||||
u8 *cur_sge = (u8 *)(uintptr_t)sge_list[i].addr;
|
||||
u32 sge_len = sge_list[i].length;
|
||||
|
||||
while (sge_len) {
|
||||
u32 bytes_copied;
|
||||
@ -683,7 +682,7 @@ irdma_set_mw_bind_wqe(__le64 * wqe,
|
||||
* @polarity: polarity of wqe valid bit
|
||||
*/
|
||||
static void
|
||||
irdma_copy_inline_data(u8 *wqe, struct irdma_sge *sge_list,
|
||||
irdma_copy_inline_data(u8 *wqe, struct ibv_sge *sge_list,
|
||||
u32 num_sges, u8 polarity)
|
||||
{
|
||||
u8 inline_valid = polarity << IRDMA_INLINE_VALID_S;
|
||||
@ -694,8 +693,8 @@ irdma_copy_inline_data(u8 *wqe, struct irdma_sge *sge_list,
|
||||
wqe += 8;
|
||||
|
||||
for (i = 0; i < num_sges; i++) {
|
||||
u8 *cur_sge = (u8 *)(uintptr_t)sge_list[i].tag_off;
|
||||
u32 sge_len = sge_list[i].len;
|
||||
u8 *cur_sge = (u8 *)(uintptr_t)sge_list[i].addr;
|
||||
u32 sge_len = sge_list[i].length;
|
||||
|
||||
while (sge_len) {
|
||||
u32 bytes_copied;
|
||||
@ -775,7 +774,7 @@ irdma_uk_inline_rdma_write(struct irdma_qp_uk *qp,
|
||||
return EINVAL;
|
||||
|
||||
for (i = 0; i < op_info->num_lo_sges; i++)
|
||||
total_size += op_info->lo_sg_list[i].len;
|
||||
total_size += op_info->lo_sg_list[i].length;
|
||||
|
||||
if (unlikely(total_size > qp->max_inline_data))
|
||||
return EINVAL;
|
||||
@ -788,9 +787,9 @@ irdma_uk_inline_rdma_write(struct irdma_qp_uk *qp,
|
||||
qp->sq_wrtrk_array[wqe_idx].signaled = info->signaled;
|
||||
read_fence |= info->read_fence;
|
||||
set_64bit_val(wqe, IRDMA_BYTE_16,
|
||||
FIELD_PREP(IRDMAQPSQ_FRAG_TO, op_info->rem_addr.tag_off));
|
||||
FIELD_PREP(IRDMAQPSQ_FRAG_TO, op_info->rem_addr.addr));
|
||||
|
||||
hdr = FIELD_PREP(IRDMAQPSQ_REMSTAG, op_info->rem_addr.stag) |
|
||||
hdr = FIELD_PREP(IRDMAQPSQ_REMSTAG, op_info->rem_addr.lkey) |
|
||||
FIELD_PREP(IRDMAQPSQ_OPCODE, info->op_type) |
|
||||
FIELD_PREP(IRDMAQPSQ_INLINEDATALEN, total_size) |
|
||||
FIELD_PREP(IRDMAQPSQ_REPORTRTT, info->report_rtt ? 1 : 0) |
|
||||
@ -846,7 +845,7 @@ irdma_uk_inline_send(struct irdma_qp_uk *qp,
|
||||
return EINVAL;
|
||||
|
||||
for (i = 0; i < op_info->num_sges; i++)
|
||||
total_size += op_info->sg_list[i].len;
|
||||
total_size += op_info->sg_list[i].length;
|
||||
|
||||
if (unlikely(total_size > qp->max_inline_data))
|
||||
return EINVAL;
|
||||
@ -911,7 +910,7 @@ irdma_uk_stag_local_invalidate(struct irdma_qp_uk *qp,
|
||||
u64 hdr;
|
||||
u32 wqe_idx;
|
||||
bool local_fence = false;
|
||||
struct irdma_sge sge = {0};
|
||||
struct ibv_sge sge = {0};
|
||||
u16 quanta = IRDMA_QP_WQE_MIN_QUANTA;
|
||||
|
||||
info->push_wqe = qp->push_db ? true : false;
|
||||
@ -922,7 +921,7 @@ irdma_uk_stag_local_invalidate(struct irdma_qp_uk *qp,
|
||||
if (!wqe)
|
||||
return ENOSPC;
|
||||
|
||||
sge.stag = op_info->target_stag;
|
||||
sge.lkey = op_info->target_stag;
|
||||
qp->wqe_ops.iw_set_fragment(wqe, IRDMA_BYTE_0, &sge, 0);
|
||||
|
||||
set_64bit_val(wqe, IRDMA_BYTE_16, 0);
|
||||
@ -1436,8 +1435,7 @@ irdma_uk_cq_poll_cmpl(struct irdma_cq_uk *cq,
|
||||
IRDMA_RING_MOVE_TAIL(cq->cq_ring);
|
||||
set_64bit_val(cq->shadow_area, IRDMA_BYTE_0,
|
||||
IRDMA_RING_CURRENT_HEAD(cq->cq_ring));
|
||||
memset(info, 0,
|
||||
sizeof(struct irdma_cq_poll_info));
|
||||
memset(info, 0, sizeof(*info));
|
||||
return irdma_uk_cq_poll_cmpl(cq, info);
|
||||
}
|
||||
}
|
||||
@ -1510,7 +1508,6 @@ exit:
|
||||
if (pring && IRDMA_RING_MORE_WORK(*pring))
|
||||
move_cq_head = false;
|
||||
}
|
||||
|
||||
if (move_cq_head) {
|
||||
IRDMA_RING_MOVE_HEAD_NOCHECK(cq->cq_ring);
|
||||
if (!IRDMA_RING_CURRENT_HEAD(cq->cq_ring))
|
||||
@ -1591,10 +1588,12 @@ irdma_get_wqe_shift(struct irdma_uk_attrs *uk_attrs, u32 sge,
|
||||
int
|
||||
irdma_get_sqdepth(struct irdma_uk_attrs *uk_attrs, u32 sq_size, u8 shift, u32 *sqdepth)
|
||||
{
|
||||
u32 min_size = (u32)uk_attrs->min_hw_wq_size << shift;
|
||||
|
||||
*sqdepth = irdma_round_up_wq((sq_size << shift) + IRDMA_SQ_RSVD);
|
||||
|
||||
if (*sqdepth < ((u32)uk_attrs->min_hw_wq_size << shift))
|
||||
*sqdepth = uk_attrs->min_hw_wq_size << shift;
|
||||
if (*sqdepth < min_size)
|
||||
*sqdepth = min_size;
|
||||
else if (*sqdepth > uk_attrs->max_hw_wq_quanta)
|
||||
return EINVAL;
|
||||
|
||||
@ -1608,10 +1607,12 @@ irdma_get_sqdepth(struct irdma_uk_attrs *uk_attrs, u32 sq_size, u8 shift, u32 *s
|
||||
int
|
||||
irdma_get_rqdepth(struct irdma_uk_attrs *uk_attrs, u32 rq_size, u8 shift, u32 *rqdepth)
|
||||
{
|
||||
u32 min_size = (u32)uk_attrs->min_hw_wq_size << shift;
|
||||
|
||||
*rqdepth = irdma_round_up_wq((rq_size << shift) + IRDMA_RQ_RSVD);
|
||||
|
||||
if (*rqdepth < ((u32)uk_attrs->min_hw_wq_size << shift))
|
||||
*rqdepth = uk_attrs->min_hw_wq_size << shift;
|
||||
if (*rqdepth < min_size)
|
||||
*rqdepth = min_size;
|
||||
else if (*rqdepth > uk_attrs->max_hw_rq_quanta)
|
||||
return EINVAL;
|
||||
|
||||
@ -1644,41 +1645,16 @@ irdma_setup_connection_wqes(struct irdma_qp_uk *qp,
|
||||
{
|
||||
u16 move_cnt = 1;
|
||||
|
||||
if (qp->uk_attrs->feature_flags & IRDMA_FEATURE_RTS_AE)
|
||||
if (info->start_wqe_idx)
|
||||
move_cnt = info->start_wqe_idx;
|
||||
else if (qp->uk_attrs->feature_flags & IRDMA_FEATURE_RTS_AE)
|
||||
move_cnt = 3;
|
||||
|
||||
qp->conn_wqes = move_cnt;
|
||||
IRDMA_RING_MOVE_HEAD_BY_COUNT_NOCHECK(qp->sq_ring, move_cnt);
|
||||
IRDMA_RING_MOVE_TAIL_BY_COUNT(qp->sq_ring, move_cnt);
|
||||
IRDMA_RING_MOVE_HEAD_BY_COUNT_NOCHECK(qp->initial_ring, move_cnt);
|
||||
}
|
||||
|
||||
/**
|
||||
* irdma_uk_calc_shift_wq - calculate WQE shift for both SQ and RQ
|
||||
* @ukinfo: qp initialization info
|
||||
* @sq_shift: Returns shift of SQ
|
||||
* @rq_shift: Returns shift of RQ
|
||||
*/
|
||||
void
|
||||
irdma_uk_calc_shift_wq(struct irdma_qp_uk_init_info *ukinfo, u8 *sq_shift,
|
||||
u8 *rq_shift)
|
||||
{
|
||||
bool imm_support = ukinfo->uk_attrs->hw_rev >= IRDMA_GEN_2 ? true : false;
|
||||
|
||||
irdma_get_wqe_shift(ukinfo->uk_attrs,
|
||||
imm_support ? ukinfo->max_sq_frag_cnt + 1 :
|
||||
ukinfo->max_sq_frag_cnt,
|
||||
ukinfo->max_inline_data, sq_shift);
|
||||
|
||||
irdma_get_wqe_shift(ukinfo->uk_attrs, ukinfo->max_rq_frag_cnt, 0,
|
||||
rq_shift);
|
||||
|
||||
if (ukinfo->uk_attrs->hw_rev == IRDMA_GEN_1) {
|
||||
if (ukinfo->abi_ver > 4)
|
||||
*rq_shift = IRDMA_MAX_RQ_WQE_SHIFT_GEN1;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* irdma_uk_calc_depth_shift_sq - calculate depth and shift for SQ size.
|
||||
* @ukinfo: qp initialization info
|
||||
@ -1786,6 +1762,8 @@ irdma_uk_qp_init(struct irdma_qp_uk *qp, struct irdma_qp_uk_init_info *info)
|
||||
qp->wqe_ops = iw_wqe_uk_ops_gen_1;
|
||||
else
|
||||
qp->wqe_ops = iw_wqe_uk_ops;
|
||||
qp->start_wqe_idx = info->start_wqe_idx;
|
||||
|
||||
return ret_code;
|
||||
}
|
||||
|
||||
|
@ -48,7 +48,7 @@
|
||||
/**
|
||||
* Driver version
|
||||
*/
|
||||
char libirdma_version[] = "1.2.17-k";
|
||||
char libirdma_version[] = "1.2.36-k";
|
||||
|
||||
unsigned int irdma_dbg;
|
||||
|
||||
|
@ -96,7 +96,6 @@ struct irdma_cq_buf {
|
||||
LIST_ENTRY(irdma_cq_buf) list;
|
||||
struct irdma_cq_uk cq;
|
||||
struct verbs_mr vmr;
|
||||
size_t buf_size;
|
||||
};
|
||||
|
||||
extern pthread_mutex_t sigusr1_wait_mutex;
|
||||
@ -142,7 +141,6 @@ struct irdma_uqp {
|
||||
struct ibv_recv_wr *pend_rx_wr;
|
||||
struct irdma_qp_uk qp;
|
||||
enum ibv_qp_type qp_type;
|
||||
struct irdma_sge *recv_sges;
|
||||
};
|
||||
|
||||
/* irdma_uverbs.c */
|
||||
|
@ -1,7 +1,7 @@
|
||||
/*-
|
||||
* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
|
||||
*
|
||||
* Copyright (c) 2015 - 2022 Intel Corporation
|
||||
* Copyright (c) 2015 - 2023 Intel Corporation
|
||||
*
|
||||
* This software is available to you under a choice of one of two
|
||||
* licenses. You may choose to be licensed under the terms of the GNU
|
||||
@ -50,7 +50,7 @@
|
||||
#define irdma_access_privileges u32
|
||||
#define irdma_physical_fragment u64
|
||||
#define irdma_address_list u64 *
|
||||
#define irdma_sgl struct irdma_sge *
|
||||
#define irdma_sgl struct ibv_sge *
|
||||
|
||||
#define IRDMA_MAX_MR_SIZE 0x200000000000ULL
|
||||
|
||||
@ -80,96 +80,6 @@
|
||||
#define IRDMA_OP_TYPE_REC_IMM 0x3f
|
||||
|
||||
#define IRDMA_FLUSH_MAJOR_ERR 1
|
||||
#define IRDMA_SRQFLUSH_RSVD_MAJOR_ERR 0xfffe
|
||||
|
||||
/* Async Events codes */
|
||||
#define IRDMA_AE_AMP_UNALLOCATED_STAG 0x0102
|
||||
#define IRDMA_AE_AMP_INVALID_STAG 0x0103
|
||||
#define IRDMA_AE_AMP_BAD_QP 0x0104
|
||||
#define IRDMA_AE_AMP_BAD_PD 0x0105
|
||||
#define IRDMA_AE_AMP_BAD_STAG_KEY 0x0106
|
||||
#define IRDMA_AE_AMP_BAD_STAG_INDEX 0x0107
|
||||
#define IRDMA_AE_AMP_BOUNDS_VIOLATION 0x0108
|
||||
#define IRDMA_AE_AMP_RIGHTS_VIOLATION 0x0109
|
||||
#define IRDMA_AE_AMP_TO_WRAP 0x010a
|
||||
#define IRDMA_AE_AMP_FASTREG_VALID_STAG 0x010c
|
||||
#define IRDMA_AE_AMP_FASTREG_MW_STAG 0x010d
|
||||
#define IRDMA_AE_AMP_FASTREG_INVALID_RIGHTS 0x010e
|
||||
#define IRDMA_AE_AMP_FASTREG_INVALID_LENGTH 0x0110
|
||||
#define IRDMA_AE_AMP_INVALIDATE_SHARED 0x0111
|
||||
#define IRDMA_AE_AMP_INVALIDATE_NO_REMOTE_ACCESS_RIGHTS 0x0112
|
||||
#define IRDMA_AE_AMP_INVALIDATE_MR_WITH_BOUND_WINDOWS 0x0113
|
||||
#define IRDMA_AE_AMP_MWBIND_VALID_STAG 0x0114
|
||||
#define IRDMA_AE_AMP_MWBIND_OF_MR_STAG 0x0115
|
||||
#define IRDMA_AE_AMP_MWBIND_TO_ZERO_BASED_STAG 0x0116
|
||||
#define IRDMA_AE_AMP_MWBIND_TO_MW_STAG 0x0117
|
||||
#define IRDMA_AE_AMP_MWBIND_INVALID_RIGHTS 0x0118
|
||||
#define IRDMA_AE_AMP_MWBIND_INVALID_BOUNDS 0x0119
|
||||
#define IRDMA_AE_AMP_MWBIND_TO_INVALID_PARENT 0x011a
|
||||
#define IRDMA_AE_AMP_MWBIND_BIND_DISABLED 0x011b
|
||||
#define IRDMA_AE_PRIV_OPERATION_DENIED 0x011c
|
||||
#define IRDMA_AE_AMP_INVALIDATE_TYPE1_MW 0x011d
|
||||
#define IRDMA_AE_AMP_MWBIND_ZERO_BASED_TYPE1_MW 0x011e
|
||||
#define IRDMA_AE_AMP_FASTREG_INVALID_PBL_HPS_CFG 0x011f
|
||||
#define IRDMA_AE_AMP_MWBIND_WRONG_TYPE 0x0120
|
||||
#define IRDMA_AE_AMP_FASTREG_PBLE_MISMATCH 0x0121
|
||||
#define IRDMA_AE_UDA_XMIT_DGRAM_TOO_LONG 0x0132
|
||||
#define IRDMA_AE_UDA_XMIT_BAD_PD 0x0133
|
||||
#define IRDMA_AE_UDA_XMIT_DGRAM_TOO_SHORT 0x0134
|
||||
#define IRDMA_AE_UDA_L4LEN_INVALID 0x0135
|
||||
#define IRDMA_AE_BAD_CLOSE 0x0201
|
||||
#define IRDMA_AE_RDMAP_ROE_BAD_LLP_CLOSE 0x0202
|
||||
#define IRDMA_AE_CQ_OPERATION_ERROR 0x0203
|
||||
#define IRDMA_AE_RDMA_READ_WHILE_ORD_ZERO 0x0205
|
||||
#define IRDMA_AE_STAG_ZERO_INVALID 0x0206
|
||||
#define IRDMA_AE_IB_RREQ_AND_Q1_FULL 0x0207
|
||||
#define IRDMA_AE_IB_INVALID_REQUEST 0x0208
|
||||
#define IRDMA_AE_WQE_UNEXPECTED_OPCODE 0x020a
|
||||
#define IRDMA_AE_WQE_INVALID_PARAMETER 0x020b
|
||||
#define IRDMA_AE_WQE_INVALID_FRAG_DATA 0x020c
|
||||
#define IRDMA_AE_IB_REMOTE_ACCESS_ERROR 0x020d
|
||||
#define IRDMA_AE_IB_REMOTE_OP_ERROR 0x020e
|
||||
#define IRDMA_AE_WQE_LSMM_TOO_LONG 0x0220
|
||||
#define IRDMA_AE_DDP_INVALID_MSN_GAP_IN_MSN 0x0301
|
||||
#define IRDMA_AE_DDP_UBE_DDP_MESSAGE_TOO_LONG_FOR_AVAILABLE_BUFFER 0x0303
|
||||
#define IRDMA_AE_DDP_UBE_INVALID_DDP_VERSION 0x0304
|
||||
#define IRDMA_AE_DDP_UBE_INVALID_MO 0x0305
|
||||
#define IRDMA_AE_DDP_UBE_INVALID_MSN_NO_BUFFER_AVAILABLE 0x0306
|
||||
#define IRDMA_AE_DDP_UBE_INVALID_QN 0x0307
|
||||
#define IRDMA_AE_DDP_NO_L_BIT 0x0308
|
||||
#define IRDMA_AE_RDMAP_ROE_INVALID_RDMAP_VERSION 0x0311
|
||||
#define IRDMA_AE_RDMAP_ROE_UNEXPECTED_OPCODE 0x0312
|
||||
#define IRDMA_AE_ROE_INVALID_RDMA_READ_REQUEST 0x0313
|
||||
#define IRDMA_AE_ROE_INVALID_RDMA_WRITE_OR_READ_RESP 0x0314
|
||||
#define IRDMA_AE_ROCE_RSP_LENGTH_ERROR 0x0316
|
||||
#define IRDMA_AE_ROCE_EMPTY_MCG 0x0380
|
||||
#define IRDMA_AE_ROCE_BAD_MC_IP_ADDR 0x0381
|
||||
#define IRDMA_AE_ROCE_BAD_MC_QPID 0x0382
|
||||
#define IRDMA_AE_MCG_QP_PROTOCOL_MISMATCH 0x0383
|
||||
#define IRDMA_AE_INVALID_ARP_ENTRY 0x0401
|
||||
#define IRDMA_AE_INVALID_TCP_OPTION_RCVD 0x0402
|
||||
#define IRDMA_AE_STALE_ARP_ENTRY 0x0403
|
||||
#define IRDMA_AE_INVALID_AH_ENTRY 0x0406
|
||||
#define IRDMA_AE_LLP_CLOSE_COMPLETE 0x0501
|
||||
#define IRDMA_AE_LLP_CONNECTION_RESET 0x0502
|
||||
#define IRDMA_AE_LLP_FIN_RECEIVED 0x0503
|
||||
#define IRDMA_AE_LLP_RECEIVED_MARKER_AND_LENGTH_FIELDS_DONT_MATCH 0x0504
|
||||
#define IRDMA_AE_LLP_RECEIVED_MPA_CRC_ERROR 0x0505
|
||||
#define IRDMA_AE_LLP_SEGMENT_TOO_SMALL 0x0507
|
||||
#define IRDMA_AE_LLP_SYN_RECEIVED 0x0508
|
||||
#define IRDMA_AE_LLP_TERMINATE_RECEIVED 0x0509
|
||||
#define IRDMA_AE_LLP_TOO_MANY_RETRIES 0x050a
|
||||
#define IRDMA_AE_LLP_TOO_MANY_KEEPALIVE_RETRIES 0x050b
|
||||
#define IRDMA_AE_LLP_DOUBT_REACHABILITY 0x050c
|
||||
#define IRDMA_AE_LLP_CONNECTION_ESTABLISHED 0x050e
|
||||
#define IRDMA_AE_RESOURCE_EXHAUSTION 0x0520
|
||||
#define IRDMA_AE_RESET_SENT 0x0601
|
||||
#define IRDMA_AE_TERMINATE_SENT 0x0602
|
||||
#define IRDMA_AE_RESET_NOT_SENT 0x0603
|
||||
#define IRDMA_AE_LCE_QP_CATASTROPHIC 0x0700
|
||||
#define IRDMA_AE_LCE_FUNCTION_CATASTROPHIC 0x0701
|
||||
#define IRDMA_AE_LCE_CQ_CATASTROPHIC 0x0702
|
||||
#define IRDMA_AE_QP_SUSPEND_COMPLETE 0x0900
|
||||
|
||||
enum irdma_device_caps_const {
|
||||
IRDMA_WQE_SIZE = 4,
|
||||
@ -201,8 +111,7 @@ enum irdma_device_caps_const {
|
||||
IRDMA_MAX_OUTBOUND_MSG_SIZE = 65537,
|
||||
/* 64K +1 */
|
||||
IRDMA_MAX_INBOUND_MSG_SIZE = 65537,
|
||||
IRDMA_MAX_PUSH_PAGE_COUNT = 1024,
|
||||
IRDMA_MAX_PE_ENA_VF_COUNT = 32,
|
||||
IRDMA_MAX_PE_ENA_VF_COUNT = 32,
|
||||
IRDMA_MAX_VF_FPM_ID = 47,
|
||||
IRDMA_MAX_SQ_PAYLOAD_SIZE = 2145386496,
|
||||
IRDMA_MAX_INLINE_DATA_SIZE = 101,
|
||||
@ -229,12 +138,7 @@ enum irdma_flush_opcode {
|
||||
FLUSH_RETRY_EXC_ERR,
|
||||
FLUSH_MW_BIND_ERR,
|
||||
FLUSH_REM_INV_REQ_ERR,
|
||||
};
|
||||
|
||||
enum irdma_qp_event_type {
|
||||
IRDMA_QP_EVENT_CATASTROPHIC,
|
||||
IRDMA_QP_EVENT_ACCESS_ERR,
|
||||
IRDMA_QP_EVENT_REQ_ERR,
|
||||
FLUSH_RNR_RETRY_EXC_ERR,
|
||||
};
|
||||
|
||||
enum irdma_cmpl_status {
|
||||
@ -282,12 +186,6 @@ struct irdma_cq_uk;
|
||||
struct irdma_qp_uk_init_info;
|
||||
struct irdma_cq_uk_init_info;
|
||||
|
||||
struct irdma_sge {
|
||||
irdma_tagged_offset tag_off;
|
||||
u32 len;
|
||||
irdma_stag stag;
|
||||
};
|
||||
|
||||
struct irdma_ring {
|
||||
volatile u32 head;
|
||||
volatile u32 tail; /* effective tail */
|
||||
@ -319,13 +217,13 @@ struct irdma_post_rq_info {
|
||||
struct irdma_rdma_write {
|
||||
irdma_sgl lo_sg_list;
|
||||
u32 num_lo_sges;
|
||||
struct irdma_sge rem_addr;
|
||||
struct ibv_sge rem_addr;
|
||||
};
|
||||
|
||||
struct irdma_rdma_read {
|
||||
irdma_sgl lo_sg_list;
|
||||
u32 num_lo_sges;
|
||||
struct irdma_sge rem_addr;
|
||||
struct ibv_sge rem_addr;
|
||||
};
|
||||
|
||||
struct irdma_bind_window {
|
||||
@ -399,11 +297,6 @@ struct irdma_cq_poll_info {
|
||||
} stat;
|
||||
};
|
||||
|
||||
struct qp_err_code {
|
||||
enum irdma_flush_opcode flush_code;
|
||||
enum irdma_qp_event_type event_type;
|
||||
};
|
||||
|
||||
int irdma_uk_inline_rdma_write(struct irdma_qp_uk *qp,
|
||||
struct irdma_post_sq_info *info, bool post_sq);
|
||||
int irdma_uk_inline_send(struct irdma_qp_uk *qp,
|
||||
@ -426,9 +319,9 @@ int irdma_uk_stag_local_invalidate(struct irdma_qp_uk *qp,
|
||||
bool post_sq);
|
||||
|
||||
struct irdma_wqe_uk_ops {
|
||||
void (*iw_copy_inline_data)(u8 *dest, struct irdma_sge *sge_list, u32 num_sges, u8 polarity);
|
||||
void (*iw_copy_inline_data)(u8 *dest, struct ibv_sge *sge_list, u32 num_sges, u8 polarity);
|
||||
u16 (*iw_inline_data_size_to_quanta)(u32 data_size);
|
||||
void (*iw_set_fragment)(__le64 *wqe, u32 offset, struct irdma_sge *sge,
|
||||
void (*iw_set_fragment)(__le64 *wqe, u32 offset, struct ibv_sge *sge,
|
||||
u8 valid);
|
||||
void (*iw_set_mw_bind_wqe)(__le64 *wqe,
|
||||
struct irdma_bind_window *op_info);
|
||||
@ -444,8 +337,6 @@ int irdma_uk_cq_init(struct irdma_cq_uk *cq,
|
||||
struct irdma_cq_uk_init_info *info);
|
||||
int irdma_uk_qp_init(struct irdma_qp_uk *qp,
|
||||
struct irdma_qp_uk_init_info *info);
|
||||
void irdma_uk_calc_shift_wq(struct irdma_qp_uk_init_info *ukinfo, u8 *sq_shift,
|
||||
u8 *rq_shift);
|
||||
int irdma_uk_calc_depth_shift_sq(struct irdma_qp_uk_init_info *ukinfo,
|
||||
u32 *sq_depth, u8 *sq_shift);
|
||||
int irdma_uk_calc_depth_shift_rq(struct irdma_qp_uk_init_info *ukinfo,
|
||||
@ -494,6 +385,7 @@ struct irdma_qp_uk {
|
||||
u8 rwqe_polarity;
|
||||
u8 rq_wqe_size;
|
||||
u8 rq_wqe_size_multiplier;
|
||||
u8 start_wqe_idx;
|
||||
bool deferred_flag:1;
|
||||
bool push_mode:1; /* whether the last post wqe was pushed */
|
||||
bool push_dropped:1;
|
||||
@ -541,6 +433,7 @@ struct irdma_qp_uk_init_info {
|
||||
u32 sq_depth;
|
||||
u32 rq_depth;
|
||||
u8 first_sq_wq;
|
||||
u8 start_wqe_idx;
|
||||
u8 type;
|
||||
u8 sq_shift;
|
||||
u8 rq_shift;
|
||||
@ -574,75 +467,4 @@ int irdma_get_rqdepth(struct irdma_uk_attrs *uk_attrs, u32 rq_size, u8 shift, u3
|
||||
void irdma_qp_push_wqe(struct irdma_qp_uk *qp, __le64 *wqe, u16 quanta,
|
||||
u32 wqe_idx, bool post_sq);
|
||||
void irdma_clr_wqes(struct irdma_qp_uk *qp, u32 qp_wqe_idx);
|
||||
|
||||
static inline struct qp_err_code irdma_ae_to_qp_err_code(u16 ae_id)
|
||||
{
|
||||
struct qp_err_code qp_err = { 0 };
|
||||
|
||||
switch (ae_id) {
|
||||
case IRDMA_AE_AMP_BOUNDS_VIOLATION:
|
||||
case IRDMA_AE_AMP_INVALID_STAG:
|
||||
case IRDMA_AE_AMP_RIGHTS_VIOLATION:
|
||||
case IRDMA_AE_AMP_UNALLOCATED_STAG:
|
||||
case IRDMA_AE_AMP_BAD_PD:
|
||||
case IRDMA_AE_AMP_BAD_QP:
|
||||
case IRDMA_AE_AMP_BAD_STAG_KEY:
|
||||
case IRDMA_AE_AMP_BAD_STAG_INDEX:
|
||||
case IRDMA_AE_AMP_TO_WRAP:
|
||||
case IRDMA_AE_PRIV_OPERATION_DENIED:
|
||||
qp_err.flush_code = FLUSH_PROT_ERR;
|
||||
qp_err.event_type = IRDMA_QP_EVENT_ACCESS_ERR;
|
||||
break;
|
||||
case IRDMA_AE_UDA_XMIT_BAD_PD:
|
||||
case IRDMA_AE_WQE_UNEXPECTED_OPCODE:
|
||||
qp_err.flush_code = FLUSH_LOC_QP_OP_ERR;
|
||||
qp_err.event_type = IRDMA_QP_EVENT_CATASTROPHIC;
|
||||
break;
|
||||
case IRDMA_AE_UDA_XMIT_DGRAM_TOO_SHORT:
|
||||
case IRDMA_AE_UDA_XMIT_DGRAM_TOO_LONG:
|
||||
case IRDMA_AE_UDA_L4LEN_INVALID:
|
||||
case IRDMA_AE_DDP_UBE_INVALID_MO:
|
||||
case IRDMA_AE_DDP_UBE_DDP_MESSAGE_TOO_LONG_FOR_AVAILABLE_BUFFER:
|
||||
qp_err.flush_code = FLUSH_LOC_LEN_ERR;
|
||||
qp_err.event_type = IRDMA_QP_EVENT_CATASTROPHIC;
|
||||
break;
|
||||
case IRDMA_AE_AMP_INVALIDATE_NO_REMOTE_ACCESS_RIGHTS:
|
||||
case IRDMA_AE_IB_REMOTE_ACCESS_ERROR:
|
||||
qp_err.flush_code = FLUSH_REM_ACCESS_ERR;
|
||||
qp_err.event_type = IRDMA_QP_EVENT_ACCESS_ERR;
|
||||
break;
|
||||
case IRDMA_AE_AMP_MWBIND_INVALID_RIGHTS:
|
||||
case IRDMA_AE_AMP_MWBIND_BIND_DISABLED:
|
||||
case IRDMA_AE_AMP_MWBIND_INVALID_BOUNDS:
|
||||
case IRDMA_AE_AMP_MWBIND_VALID_STAG:
|
||||
qp_err.flush_code = FLUSH_MW_BIND_ERR;
|
||||
qp_err.event_type = IRDMA_QP_EVENT_ACCESS_ERR;
|
||||
break;
|
||||
case IRDMA_AE_LLP_TOO_MANY_RETRIES:
|
||||
qp_err.flush_code = FLUSH_RETRY_EXC_ERR;
|
||||
qp_err.event_type = IRDMA_QP_EVENT_CATASTROPHIC;
|
||||
break;
|
||||
case IRDMA_AE_IB_INVALID_REQUEST:
|
||||
qp_err.flush_code = FLUSH_REM_INV_REQ_ERR;
|
||||
qp_err.event_type = IRDMA_QP_EVENT_REQ_ERR;
|
||||
break;
|
||||
case IRDMA_AE_LLP_SEGMENT_TOO_SMALL:
|
||||
case IRDMA_AE_LLP_RECEIVED_MPA_CRC_ERROR:
|
||||
case IRDMA_AE_ROCE_RSP_LENGTH_ERROR:
|
||||
case IRDMA_AE_IB_REMOTE_OP_ERROR:
|
||||
qp_err.flush_code = FLUSH_REM_OP_ERR;
|
||||
qp_err.event_type = IRDMA_QP_EVENT_CATASTROPHIC;
|
||||
break;
|
||||
case IRDMA_AE_LCE_QP_CATASTROPHIC:
|
||||
qp_err.flush_code = FLUSH_FATAL_ERR;
|
||||
qp_err.event_type = IRDMA_QP_EVENT_CATASTROPHIC;
|
||||
break;
|
||||
default:
|
||||
qp_err.flush_code = FLUSH_GENERAL_ERR;
|
||||
qp_err.event_type = IRDMA_QP_EVENT_CATASTROPHIC;
|
||||
break;
|
||||
}
|
||||
|
||||
return qp_err;
|
||||
}
|
||||
#endif /* IRDMA_USER_H */
|
||||
|
@ -152,6 +152,7 @@ irdma_ualloc_pd(struct ibv_context *context)
|
||||
|
||||
err_free:
|
||||
free(iwupd);
|
||||
|
||||
errno = err;
|
||||
return NULL;
|
||||
}
|
||||
@ -163,7 +164,6 @@ err_free:
|
||||
int
|
||||
irdma_ufree_pd(struct ibv_pd *pd)
|
||||
{
|
||||
struct irdma_uvcontext *iwvctx = container_of(pd->context, struct irdma_uvcontext, ibv_ctx);
|
||||
struct irdma_upd *iwupd;
|
||||
int ret;
|
||||
|
||||
@ -374,12 +374,12 @@ irdma_free_hw_buf(void *buf, size_t size)
|
||||
* @cqe_64byte_ena: enable 64byte cqe
|
||||
*/
|
||||
static inline int
|
||||
get_cq_size(int ncqe, u8 hw_rev, bool cqe_64byte_ena)
|
||||
get_cq_size(int ncqe, u8 hw_rev)
|
||||
{
|
||||
ncqe++;
|
||||
|
||||
/* Completions with immediate require 1 extra entry */
|
||||
if (!cqe_64byte_ena && hw_rev > IRDMA_GEN_1)
|
||||
if (hw_rev > IRDMA_GEN_1)
|
||||
ncqe *= 2;
|
||||
|
||||
if (ncqe < IRDMA_U_MINCQ_SIZE)
|
||||
@ -388,11 +388,8 @@ get_cq_size(int ncqe, u8 hw_rev, bool cqe_64byte_ena)
|
||||
return ncqe;
|
||||
}
|
||||
|
||||
static inline size_t get_cq_total_bytes(u32 cq_size, bool cqe_64byte_ena){
|
||||
if (cqe_64byte_ena)
|
||||
return roundup(cq_size * sizeof(struct irdma_extended_cqe), IRDMA_HW_PAGE_SIZE);
|
||||
else
|
||||
return roundup(cq_size * sizeof(struct irdma_cqe), IRDMA_HW_PAGE_SIZE);
|
||||
static inline size_t get_cq_total_bytes(u32 cq_size) {
|
||||
return roundup(cq_size * sizeof(struct irdma_cqe), IRDMA_HW_PAGE_SIZE);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -420,7 +417,6 @@ ucreate_cq(struct ibv_context *context,
|
||||
u32 cq_pages;
|
||||
int ret, ncqe;
|
||||
u8 hw_rev;
|
||||
bool cqe_64byte_ena;
|
||||
|
||||
iwvctx = container_of(context, struct irdma_uvcontext, ibv_ctx);
|
||||
uk_attrs = &iwvctx->uk_attrs;
|
||||
@ -454,11 +450,10 @@ ucreate_cq(struct ibv_context *context,
|
||||
return NULL;
|
||||
}
|
||||
|
||||
cqe_64byte_ena = uk_attrs->feature_flags & IRDMA_FEATURE_64_BYTE_CQE ? true : false;
|
||||
info.cq_size = get_cq_size(attr_ex->cqe, hw_rev, cqe_64byte_ena);
|
||||
info.cq_size = get_cq_size(attr_ex->cqe, hw_rev);
|
||||
total_size = get_cq_total_bytes(info.cq_size);
|
||||
iwucq->comp_vector = attr_ex->comp_vector;
|
||||
LIST_INIT(&iwucq->resize_list);
|
||||
total_size = get_cq_total_bytes(info.cq_size, cqe_64byte_ena);
|
||||
cq_pages = total_size >> IRDMA_HW_PAGE_SHIFT;
|
||||
|
||||
if (!(uk_attrs->feature_flags & IRDMA_FEATURE_CQ_RESIZE))
|
||||
@ -528,8 +523,6 @@ ucreate_cq(struct ibv_context *context,
|
||||
info.cq_id = resp.cq_id;
|
||||
/* Do not report the CQE's reserved for immediate and burned by HW */
|
||||
iwucq->verbs_cq.cq.cqe = ncqe;
|
||||
if (cqe_64byte_ena)
|
||||
info.avoid_mem_cflct = true;
|
||||
info.cqe_alloc_db = (u32 *)((u8 *)iwvctx->db + IRDMA_DB_CQ_OFFSET);
|
||||
irdma_uk_cq_init(&iwucq->cq, &info);
|
||||
return &iwucq->verbs_cq.cq_ex;
|
||||
@ -585,7 +578,7 @@ static void
|
||||
irdma_free_cq_buf(struct irdma_cq_buf *cq_buf)
|
||||
{
|
||||
ibv_cmd_dereg_mr(&cq_buf->vmr.ibv_mr);
|
||||
irdma_free_hw_buf(cq_buf->cq.cq_base, cq_buf->buf_size);
|
||||
irdma_free_hw_buf(cq_buf->cq.cq_base, get_cq_total_bytes(cq_buf->cq.cq_size));
|
||||
free(cq_buf);
|
||||
}
|
||||
|
||||
@ -1322,6 +1315,8 @@ irdma_vmapped_qp(struct irdma_uqp *iwuqp, struct ibv_pd *pd,
|
||||
|
||||
cmd.user_wqe_bufs = (__u64) ((uintptr_t)info->sq);
|
||||
cmd.user_compl_ctx = (__u64) (uintptr_t)&iwuqp->qp;
|
||||
cmd.comp_mask |= IRDMA_CREATE_QP_USE_START_WQE_IDX;
|
||||
|
||||
ret = ibv_cmd_create_qp(pd, &iwuqp->ibv_qp, attr, &cmd.ibv_cmd,
|
||||
sizeof(cmd), &resp.ibv_resp,
|
||||
sizeof(struct irdma_ucreate_qp_resp));
|
||||
@ -1331,6 +1326,8 @@ irdma_vmapped_qp(struct irdma_uqp *iwuqp, struct ibv_pd *pd,
|
||||
info->sq_size = resp.actual_sq_size;
|
||||
info->rq_size = resp.actual_rq_size;
|
||||
info->first_sq_wq = legacy_mode ? 1 : resp.lsmm;
|
||||
if (resp.comp_mask & IRDMA_CREATE_QP_USE_START_WQE_IDX)
|
||||
info->start_wqe_idx = resp.start_wqe_idx;
|
||||
info->qp_caps = resp.qp_caps;
|
||||
info->qp_id = resp.qp_id;
|
||||
iwuqp->irdma_drv_opt = resp.irdma_drv_opt;
|
||||
@ -1379,6 +1376,8 @@ irdma_ucreate_qp(struct ibv_pd *pd,
|
||||
|
||||
if (attr->cap.max_send_sge > uk_attrs->max_hw_wq_frags ||
|
||||
attr->cap.max_recv_sge > uk_attrs->max_hw_wq_frags ||
|
||||
attr->cap.max_send_wr > uk_attrs->max_hw_wq_quanta ||
|
||||
attr->cap.max_recv_wr > uk_attrs->max_hw_rq_quanta ||
|
||||
attr->cap.max_inline_data > uk_attrs->max_hw_inline) {
|
||||
errno = EINVAL;
|
||||
return NULL;
|
||||
@ -1430,18 +1429,12 @@ irdma_ucreate_qp(struct ibv_pd *pd,
|
||||
attr->cap.max_recv_wr = info.rq_size;
|
||||
}
|
||||
|
||||
iwuqp->recv_sges = calloc(attr->cap.max_recv_sge, sizeof(*iwuqp->recv_sges));
|
||||
if (!iwuqp->recv_sges) {
|
||||
status = errno; /* preserve errno */
|
||||
goto err_destroy_lock;
|
||||
}
|
||||
|
||||
info.wqe_alloc_db = (u32 *)iwvctx->db;
|
||||
info.legacy_mode = iwvctx->legacy_mode;
|
||||
info.sq_wrtrk_array = calloc(info.sq_depth, sizeof(*info.sq_wrtrk_array));
|
||||
if (!info.sq_wrtrk_array) {
|
||||
status = errno; /* preserve errno */
|
||||
goto err_free_rsges;
|
||||
goto err_destroy_lock;
|
||||
}
|
||||
|
||||
info.rq_wrid_array = calloc(info.rq_depth, sizeof(*info.rq_wrid_array));
|
||||
@ -1475,8 +1468,6 @@ err_free_rq_wrid:
|
||||
free(info.rq_wrid_array);
|
||||
err_free_sq_wrtrk:
|
||||
free(info.sq_wrtrk_array);
|
||||
err_free_rsges:
|
||||
free(iwuqp->recv_sges);
|
||||
err_destroy_lock:
|
||||
pthread_spin_destroy(&iwuqp->lock);
|
||||
err_free_qp:
|
||||
@ -1635,7 +1626,6 @@ irdma_udestroy_qp(struct ibv_qp *qp)
|
||||
free(iwuqp->qp.rq_wrid_array);
|
||||
|
||||
irdma_free_hw_buf(iwuqp->qp.sq_base, iwuqp->buf_size);
|
||||
free(iwuqp->recv_sges);
|
||||
free(iwuqp);
|
||||
return 0;
|
||||
|
||||
@ -1645,26 +1635,6 @@ err:
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* irdma_copy_sg_list - copy sg list for qp
|
||||
* @sg_list: copied into sg_list
|
||||
* @sgl: copy from sgl
|
||||
* @num_sges: count of sg entries
|
||||
* @max_sges: count of max supported sg entries
|
||||
*/
|
||||
static void
|
||||
irdma_copy_sg_list(struct irdma_sge *sg_list, struct ibv_sge *sgl,
|
||||
int num_sges)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < num_sges; i++) {
|
||||
sg_list[i].tag_off = sgl[i].addr;
|
||||
sg_list[i].len = sgl[i].length;
|
||||
sg_list[i].stag = sgl[i].lkey;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* calc_type2_mw_stag - calculate type 2 MW stag
|
||||
* @rkey: desired rkey of the MW
|
||||
@ -1743,7 +1713,7 @@ irdma_upost_send(struct ibv_qp *ib_qp, struct ibv_send_wr *ib_wr,
|
||||
info.stag_to_inv = ib_wr->imm_data;
|
||||
}
|
||||
info.op.send.num_sges = ib_wr->num_sge;
|
||||
info.op.send.sg_list = (struct irdma_sge *)ib_wr->sg_list;
|
||||
info.op.send.sg_list = (struct ibv_sge *)ib_wr->sg_list;
|
||||
if (ib_qp->qp_type == IBV_QPT_UD) {
|
||||
struct irdma_uah *ah = container_of(ib_wr->wr.ud.ah,
|
||||
struct irdma_uah, ibv_ah);
|
||||
@ -1774,9 +1744,9 @@ irdma_upost_send(struct ibv_qp *ib_qp, struct ibv_send_wr *ib_wr,
|
||||
info.op_type = IRDMA_OP_TYPE_RDMA_WRITE;
|
||||
|
||||
info.op.rdma_write.num_lo_sges = ib_wr->num_sge;
|
||||
info.op.rdma_write.lo_sg_list = (void *)ib_wr->sg_list;
|
||||
info.op.rdma_write.rem_addr.tag_off = ib_wr->wr.rdma.remote_addr;
|
||||
info.op.rdma_write.rem_addr.stag = ib_wr->wr.rdma.rkey;
|
||||
info.op.rdma_write.lo_sg_list = ib_wr->sg_list;
|
||||
info.op.rdma_write.rem_addr.addr = ib_wr->wr.rdma.remote_addr;
|
||||
info.op.rdma_write.rem_addr.lkey = ib_wr->wr.rdma.rkey;
|
||||
if (ib_wr->send_flags & IBV_SEND_INLINE)
|
||||
err = irdma_uk_inline_rdma_write(&iwuqp->qp, &info, false);
|
||||
else
|
||||
@ -1788,10 +1758,10 @@ irdma_upost_send(struct ibv_qp *ib_qp, struct ibv_send_wr *ib_wr,
|
||||
break;
|
||||
}
|
||||
info.op_type = IRDMA_OP_TYPE_RDMA_READ;
|
||||
info.op.rdma_read.rem_addr.tag_off = ib_wr->wr.rdma.remote_addr;
|
||||
info.op.rdma_read.rem_addr.stag = ib_wr->wr.rdma.rkey;
|
||||
info.op.rdma_read.rem_addr.addr = ib_wr->wr.rdma.remote_addr;
|
||||
info.op.rdma_read.rem_addr.lkey = ib_wr->wr.rdma.rkey;
|
||||
|
||||
info.op.rdma_read.lo_sg_list = (void *)ib_wr->sg_list;
|
||||
info.op.rdma_read.lo_sg_list = ib_wr->sg_list;
|
||||
info.op.rdma_read.num_lo_sges = ib_wr->num_sge;
|
||||
err = irdma_uk_rdma_read(&iwuqp->qp, &info, false, false);
|
||||
break;
|
||||
@ -1873,14 +1843,11 @@ irdma_upost_recv(struct ibv_qp *ib_qp, struct ibv_recv_wr *ib_wr,
|
||||
struct ibv_recv_wr **bad_wr)
|
||||
{
|
||||
struct irdma_post_rq_info post_recv = {};
|
||||
struct irdma_sge *sg_list;
|
||||
struct irdma_uqp *iwuqp;
|
||||
bool reflush = false;
|
||||
int err = 0;
|
||||
|
||||
iwuqp = container_of(ib_qp, struct irdma_uqp, ibv_qp);
|
||||
sg_list = iwuqp->recv_sges;
|
||||
|
||||
err = pthread_spin_lock(&iwuqp->lock);
|
||||
if (err)
|
||||
return err;
|
||||
@ -1897,8 +1864,7 @@ irdma_upost_recv(struct ibv_qp *ib_qp, struct ibv_recv_wr *ib_wr,
|
||||
}
|
||||
post_recv.num_sges = ib_wr->num_sge;
|
||||
post_recv.wr_id = ib_wr->wr_id;
|
||||
irdma_copy_sg_list(sg_list, ib_wr->sg_list, ib_wr->num_sge);
|
||||
post_recv.sg_list = sg_list;
|
||||
post_recv.sg_list = ib_wr->sg_list;
|
||||
err = irdma_uk_post_receive(&iwuqp->qp, &post_recv);
|
||||
if (err) {
|
||||
*bad_wr = ib_wr;
|
||||
@ -2022,7 +1988,6 @@ irdma_uresize_cq(struct ibv_cq *cq, int cqe)
|
||||
u32 cq_pages;
|
||||
int cqe_needed;
|
||||
int ret = 0;
|
||||
bool cqe_64byte_ena;
|
||||
|
||||
iwucq = container_of(cq, struct irdma_ucq, verbs_cq.cq);
|
||||
iwvctx = container_of(cq->context, struct irdma_uvcontext, ibv_ctx);
|
||||
@ -2034,14 +1999,11 @@ irdma_uresize_cq(struct ibv_cq *cq, int cqe)
|
||||
if (cqe < uk_attrs->min_hw_cq_size || cqe > uk_attrs->max_hw_cq_size - 1)
|
||||
return EINVAL;
|
||||
|
||||
cqe_64byte_ena = uk_attrs->feature_flags & IRDMA_FEATURE_64_BYTE_CQE ? true : false;
|
||||
|
||||
cqe_needed = get_cq_size(cqe, uk_attrs->hw_rev, cqe_64byte_ena);
|
||||
|
||||
cqe_needed = get_cq_size(cqe, uk_attrs->hw_rev);
|
||||
if (cqe_needed == iwucq->cq.cq_size)
|
||||
return 0;
|
||||
|
||||
cq_size = get_cq_total_bytes(cqe_needed, cqe_64byte_ena);
|
||||
cq_size = get_cq_total_bytes(cqe_needed);
|
||||
cq_pages = cq_size >> IRDMA_HW_PAGE_SHIFT;
|
||||
cq_base = irdma_alloc_hw_buf(cq_size);
|
||||
if (!cq_base)
|
||||
@ -2077,7 +2039,6 @@ irdma_uresize_cq(struct ibv_cq *cq, int cqe)
|
||||
goto err_resize;
|
||||
|
||||
memcpy(&cq_buf->cq, &iwucq->cq, sizeof(cq_buf->cq));
|
||||
cq_buf->buf_size = cq_size;
|
||||
cq_buf->vmr = iwucq->vmr;
|
||||
iwucq->vmr = new_mr;
|
||||
irdma_uk_cq_resize(&iwucq->cq, cq_base, cqe_needed);
|
||||
|
@ -77,7 +77,6 @@
|
||||
/* constants */
|
||||
#define STATS_TIMER_DELAY 60000
|
||||
|
||||
/* a couple of linux size defines */
|
||||
#define BIT_ULL(a) (1ULL << (a))
|
||||
#define min(a, b) ((a) > (b) ? (b) : (a))
|
||||
#ifndef likely
|
||||
|
@ -1,7 +1,7 @@
|
||||
/*-
|
||||
* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
|
||||
*
|
||||
* Copyright (c) 2021 - 2022 Intel Corporation
|
||||
* Copyright (c) 2021 - 2023 Intel Corporation
|
||||
*
|
||||
* This software is available to you under a choice of one of two
|
||||
* licenses. You may choose to be licensed under the terms of the GNU
|
||||
@ -660,8 +660,7 @@ static const struct irdma_sw_stats_tunable_info irdma_sws_list[] = {
|
||||
"manage_hmc_pm_func_table", 0},
|
||||
{IRDMA_OP_SUSPEND, "suspend", "suspend", 0},
|
||||
{IRDMA_OP_RESUME, "resume", "resume", 0},
|
||||
{IRDMA_OP_MANAGE_VCHNL_REQ_PBLE_BP, "manage_vchnl_req_pble_bp",
|
||||
"manage_vchnl_req_pble_bp", 0},
|
||||
{25, "manage_vchnl_req_pble_bp", "manage_vchnl_req_pble_bp", 0},
|
||||
{IRDMA_OP_QUERY_FPM_VAL, "query_fpm_val", "query_fpm_val", 0},
|
||||
{IRDMA_OP_COMMIT_FPM_VAL, "commit_fpm_val", "commit_fpm_val", 0},
|
||||
{IRDMA_OP_AH_CREATE, "ah_create", "ah_create", 0},
|
||||
|
@ -53,15 +53,12 @@
|
||||
ibdev.dma_device = (dev)
|
||||
#define set_max_sge(props, rf) \
|
||||
((props)->max_sge = (rf)->sc_dev.hw_attrs.uk_attrs.max_hw_wq_frags)
|
||||
#define rdma_query_gid(ibdev, port, index, gid) \
|
||||
ib_get_cached_gid(ibdev, port, index, gid, NULL)
|
||||
#define kmap(pg) page_address(pg)
|
||||
#define kmap_local_page(pg) page_address(pg)
|
||||
#define kunmap(pg)
|
||||
#define kunmap_local(pg)
|
||||
|
||||
#define IB_UVERBS_CQ_FLAGS_TIMESTAMP_COMPLETION IB_CQ_FLAGS_TIMESTAMP_COMPLETION
|
||||
#define kc_irdma_destroy_qp(ibqp, udata) irdma_destroy_qp(ibqp)
|
||||
#ifndef IB_QP_ATTR_STANDARD_BITS
|
||||
#define IB_QP_ATTR_STANDARD_BITS GENMASK(20, 0)
|
||||
#endif
|
||||
@ -71,34 +68,26 @@
|
||||
|
||||
#define IRDMA_VER_LEN 24
|
||||
|
||||
#ifndef EVNT_HNDLR_CRITERR
|
||||
#if ICE_RDMA_MAJOR_VERSION == 1 && ICE_RDMA_MINOR_VERSION == 1
|
||||
#define EVNT_HNDLR_CRITERR
|
||||
#else
|
||||
#undef EVNT_HNDLR_CRITERR
|
||||
#endif
|
||||
|
||||
#endif
|
||||
void kc_set_roce_uverbs_cmd_mask(struct irdma_device *iwdev);
|
||||
void kc_set_rdma_uverbs_cmd_mask(struct irdma_device *iwdev);
|
||||
|
||||
struct irdma_tunable_info {
|
||||
struct sysctl_ctx_list irdma_sysctl_ctx;
|
||||
struct sysctl_oid *irdma_sysctl_tree;
|
||||
struct sysctl_oid *qos_sysctl_tree;
|
||||
struct sysctl_oid *sws_sysctl_tree;
|
||||
char drv_ver[IRDMA_VER_LEN];
|
||||
u8 roce_ena;
|
||||
};
|
||||
|
||||
typedef u_int if_addr_cb_t(void *, struct ifaddr *, u_int);
|
||||
u_int if_foreach_addr_type(if_t ifp, int type, if_addr_cb_t cb, void *cb_arg);
|
||||
typedef int (*if_foreach_cb_t)(if_t, void *);
|
||||
int if_foreach(if_foreach_cb_t cb, void *cb_arg);
|
||||
#ifndef if_iter
|
||||
struct if_iter {
|
||||
void *context[4];
|
||||
};
|
||||
#endif
|
||||
|
||||
u_int if_foreach_addr_type(if_t ifp, int type, if_addr_cb_t cb, void *cb_arg);
|
||||
int if_foreach(if_foreach_cb_t cb, void *cb_arg);
|
||||
if_t if_iter_start(struct if_iter *iter);
|
||||
if_t if_iter_next(struct if_iter *iter);
|
||||
void if_iter_finish(struct if_iter *iter);
|
||||
@ -241,8 +230,8 @@ void irdma_cq_free_rsrc(struct irdma_pci_f *rf, struct irdma_cq *iwcq);
|
||||
int irdma_validate_qp_attrs(struct ib_qp_init_attr *init_attr,
|
||||
struct irdma_device *iwdev);
|
||||
void irdma_setup_virt_qp(struct irdma_device *iwdev,
|
||||
struct irdma_qp *iwqp,
|
||||
struct irdma_qp_init_info *init_info);
|
||||
struct irdma_qp *iwqp,
|
||||
struct irdma_qp_init_info *init_info);
|
||||
int irdma_setup_kmode_qp(struct irdma_device *iwdev,
|
||||
struct irdma_qp *iwqp,
|
||||
struct irdma_qp_init_info *info,
|
||||
@ -258,7 +247,7 @@ void irdma_iw_fill_and_set_qpctx_info(struct irdma_qp *iwqp,
|
||||
struct irdma_qp_host_ctx_info *ctx_info);
|
||||
int irdma_cqp_create_qp_cmd(struct irdma_qp *iwqp);
|
||||
void irdma_dealloc_push_page(struct irdma_pci_f *rf,
|
||||
struct irdma_sc_qp *qp);
|
||||
struct irdma_qp *iwqp);
|
||||
int irdma_process_resize_list(struct irdma_cq *iwcq, struct irdma_device *iwdev,
|
||||
struct irdma_cq_buf *lcqe_buf);
|
||||
int irdma_destroy_cq(struct ib_cq *ib_cq);
|
||||
|
@ -52,7 +52,7 @@
|
||||
/**
|
||||
* Driver version
|
||||
*/
|
||||
char irdma_driver_version[] = "1.2.17-k";
|
||||
char irdma_driver_version[] = "1.2.36-k";
|
||||
|
||||
/**
|
||||
* irdma_init_tunable - prepare tunables
|
||||
|
@ -76,7 +76,7 @@ static u64 icrdma_masks[IRDMA_MAX_MASKS] = {
|
||||
ICRDMA_CQPSQ_CQ_CEQID,
|
||||
ICRDMA_CQPSQ_CQ_CQID,
|
||||
ICRDMA_COMMIT_FPM_CQCNT,
|
||||
ICRDMA_CQPSQ_UPESD_HMCFNID
|
||||
ICRDMA_CQPSQ_UPESD_HMCFNID,
|
||||
};
|
||||
|
||||
static u8 icrdma_shifts[IRDMA_MAX_SHIFTS] = {
|
||||
@ -86,7 +86,7 @@ static u8 icrdma_shifts[IRDMA_MAX_SHIFTS] = {
|
||||
ICRDMA_CQPSQ_CQ_CEQID_S,
|
||||
ICRDMA_CQPSQ_CQ_CQID_S,
|
||||
ICRDMA_COMMIT_FPM_CQCNT_S,
|
||||
ICRDMA_CQPSQ_UPESD_HMCFNID_S
|
||||
ICRDMA_CQPSQ_UPESD_HMCFNID_S,
|
||||
};
|
||||
|
||||
/**
|
||||
@ -210,8 +210,6 @@ icrdma_init_hw(struct irdma_sc_dev *dev)
|
||||
|
||||
dev->hw_regs[i] = (u32 IOMEM *) (hw_addr + icrdma_regs[i]);
|
||||
}
|
||||
dev->hw_attrs.max_hw_vf_fpm_id = IRDMA_MAX_VF_FPM_ID;
|
||||
dev->hw_attrs.first_hw_vf_fpm_id = IRDMA_FIRST_VF_FPM_ID;
|
||||
|
||||
for (i = 0; i < IRDMA_MAX_SHIFTS; ++i)
|
||||
dev->hw_shifts[i] = icrdma_shifts[i];
|
||||
@ -231,6 +229,7 @@ icrdma_init_hw(struct irdma_sc_dev *dev)
|
||||
dev->hw_attrs.max_hw_ord = ICRDMA_MAX_ORD_SIZE;
|
||||
dev->hw_attrs.max_stat_inst = ICRDMA_MAX_STATS_COUNT;
|
||||
dev->hw_attrs.max_stat_idx = IRDMA_HW_STAT_INDEX_MAX_GEN_2;
|
||||
dev->hw_attrs.max_hw_device_pages = ICRDMA_MAX_PUSH_PAGE_COUNT;
|
||||
|
||||
dev->hw_attrs.uk_attrs.max_hw_wq_frags = ICRDMA_MAX_WQ_FRAGMENT_COUNT;
|
||||
dev->hw_attrs.uk_attrs.max_hw_read_sges = ICRDMA_MAX_SGE_RD;
|
||||
|
@ -1,7 +1,7 @@
|
||||
/*-
|
||||
* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
|
||||
*
|
||||
* Copyright (c) 2017 - 2022 Intel Corporation
|
||||
* Copyright (c) 2017 - 2023 Intel Corporation
|
||||
*
|
||||
* This software is available to you under a choice of one of two
|
||||
* licenses. You may choose to be licensed under the terms of the GNU
|
||||
@ -123,6 +123,7 @@ enum icrdma_device_caps_const {
|
||||
ICRDMA_MAX_IRD_SIZE = 32,
|
||||
ICRDMA_MAX_ORD_SIZE = 32,
|
||||
ICRDMA_MIN_WQ_SIZE = 8 /* WQEs */,
|
||||
ICRDMA_MAX_PUSH_PAGE_COUNT = 256,
|
||||
|
||||
};
|
||||
|
||||
|
@ -54,6 +54,11 @@ enum irdma_memreg_type {
|
||||
|
||||
enum {
|
||||
IRDMA_ALLOC_UCTX_USE_RAW_ATTR = 1 << 0,
|
||||
IRDMA_ALLOC_UCTX_MIN_HW_WQ_SIZE = 1 << 1,
|
||||
};
|
||||
|
||||
enum {
|
||||
IRDMA_CREATE_QP_USE_START_WQE_IDX = 1 << 0,
|
||||
};
|
||||
|
||||
struct irdma_alloc_ucontext_req {
|
||||
@ -82,6 +87,8 @@ struct irdma_alloc_ucontext_resp {
|
||||
__u8 hw_rev;
|
||||
__u8 rsvd2;
|
||||
__aligned_u64 comp_mask;
|
||||
__u16 min_hw_wq_size;
|
||||
__u8 rsvd3[6];
|
||||
};
|
||||
|
||||
struct irdma_alloc_pd_resp {
|
||||
@ -101,6 +108,7 @@ struct irdma_create_cq_req {
|
||||
struct irdma_create_qp_req {
|
||||
__aligned_u64 user_wqe_bufs;
|
||||
__aligned_u64 user_compl_ctx;
|
||||
__aligned_u64 comp_mask;
|
||||
};
|
||||
|
||||
struct irdma_mem_reg_req {
|
||||
@ -130,6 +138,9 @@ struct irdma_create_qp_resp {
|
||||
__u8 lsmm;
|
||||
__u8 rsvd;
|
||||
__u32 qp_caps;
|
||||
__aligned_u64 comp_mask;
|
||||
__u8 start_wqe_idx;
|
||||
__u8 rsvd2[7];
|
||||
};
|
||||
|
||||
struct irdma_modify_qp_resp {
|
||||
|
@ -183,7 +183,7 @@ enum irdma_vers {
|
||||
IRDMA_GEN_RSVD = 0,
|
||||
IRDMA_GEN_1 = 1,
|
||||
IRDMA_GEN_2 = 2,
|
||||
IRDMA_GEN_MAX = 2,
|
||||
IRDMA_GEN_MAX = IRDMA_GEN_2,
|
||||
};
|
||||
|
||||
struct irdma_uk_attrs {
|
||||
|
@ -1079,14 +1079,14 @@ irdma_parse_mpa(struct irdma_cm_node *cm_node, u8 *buf, u32 *type,
|
||||
|
||||
*type = IRDMA_MPA_REQUEST_ACCEPT;
|
||||
|
||||
if (len < sizeof(struct ietf_mpa_v1)) {
|
||||
if (len < sizeof(*mpa_frame)) {
|
||||
irdma_debug(&cm_node->iwdev->rf->sc_dev, IRDMA_DEBUG_CM,
|
||||
"ietf buffer small (%x)\n", len);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
mpa_frame = (struct ietf_mpa_v1 *)buf;
|
||||
mpa_hdr_len = sizeof(struct ietf_mpa_v1);
|
||||
mpa_hdr_len = sizeof(*mpa_frame);
|
||||
priv_data_len = ntohs(mpa_frame->priv_data_len);
|
||||
|
||||
if (priv_data_len > IETF_MAX_PRIV_DATA_LEN) {
|
||||
@ -1616,7 +1616,8 @@ static u8 irdma_iw_get_vlan_prio(u32 *loc_addr, u8 prio, bool ipv4)
|
||||
}
|
||||
|
||||
/**
|
||||
* irdma_netdev_vlan_ipv6 - Gets the netdev and mac
|
||||
* irdma_get_vlan_mac_ipv6 - Get the vlan and mac for an IPv6
|
||||
* address
|
||||
* @addr: local IPv6 address
|
||||
* @vlan_id: vlan id for the given IPv6 address
|
||||
* @mac: mac address for the given IPv6 address
|
||||
@ -1624,14 +1625,12 @@ static u8 irdma_iw_get_vlan_prio(u32 *loc_addr, u8 prio, bool ipv4)
|
||||
* Returns the net_device of the IPv6 address and also sets the
|
||||
* vlan id and mac for that address.
|
||||
*/
|
||||
if_t
|
||||
irdma_netdev_vlan_ipv6(struct iw_cm_id *cm_id, u32 *addr, u16 *vlan_id, u8 *mac)
|
||||
void
|
||||
irdma_get_vlan_mac_ipv6(struct iw_cm_id *cm_id, u32 *addr, u16 *vlan_id, u8 *mac)
|
||||
{
|
||||
if_t ip_dev = NULL;
|
||||
struct in6_addr laddr6;
|
||||
#ifdef VIMAGE
|
||||
struct vnet *vnet = irdma_cmid_to_vnet(cm_id);
|
||||
#endif
|
||||
struct vnet *vnet = &init_net;
|
||||
struct ifaddr *ifa;
|
||||
u16 scope_id = 0;
|
||||
|
||||
@ -1646,10 +1645,9 @@ irdma_netdev_vlan_ipv6(struct iw_cm_id *cm_id, u32 *addr, u16 *vlan_id, u8 *mac)
|
||||
scope_id = ntohs(laddr6.__u6_addr.__u6_addr16[1]);
|
||||
|
||||
#ifdef VIMAGE
|
||||
ip_dev = ip6_ifp_find(vnet, laddr6, scope_id);
|
||||
#else
|
||||
ip_dev = ip6_ifp_find(&init_net, laddr6, scope_id);
|
||||
vnet = irdma_cmid_to_vnet(cm_id);
|
||||
#endif
|
||||
ip_dev = ip6_ifp_find(vnet, laddr6, scope_id);
|
||||
if (ip_dev) {
|
||||
if (vlan_id)
|
||||
*vlan_id = rdma_vlan_dev_vlan_id(ip_dev);
|
||||
@ -1657,8 +1655,6 @@ irdma_netdev_vlan_ipv6(struct iw_cm_id *cm_id, u32 *addr, u16 *vlan_id, u8 *mac)
|
||||
if (ifa && ifa->ifa_addr && mac)
|
||||
ether_addr_copy(mac, if_getlladdr(ip_dev));
|
||||
}
|
||||
|
||||
return ip_dev;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -1669,16 +1665,13 @@ u16
|
||||
irdma_get_vlan_ipv4(struct iw_cm_id *cm_id, u32 *addr)
|
||||
{
|
||||
if_t netdev;
|
||||
#ifdef VIMAGE
|
||||
struct vnet *vnet = irdma_cmid_to_vnet(cm_id);
|
||||
#endif
|
||||
struct vnet *vnet = &init_net;
|
||||
u16 vlan_id = 0xFFFF;
|
||||
|
||||
#ifdef VIMAGE
|
||||
netdev = ip_ifp_find(vnet, htonl(addr[0]));
|
||||
#else
|
||||
netdev = ip_ifp_find(&init_net, htonl(addr[0]));
|
||||
vnet = irdma_cmid_to_vnet(cm_id);
|
||||
#endif
|
||||
netdev = ip_ifp_find(vnet, htonl(addr[0]));
|
||||
if (netdev) {
|
||||
vlan_id = rdma_vlan_dev_vlan_id(netdev);
|
||||
dev_put(netdev);
|
||||
@ -2546,7 +2539,7 @@ irdma_handle_syn_pkt(struct irdma_cm_node *cm_node,
|
||||
u32 inc_sequence;
|
||||
int optionsize;
|
||||
|
||||
optionsize = (tcph->th_off << 2) - sizeof(struct tcphdr);
|
||||
optionsize = (tcph->th_off << 2) - sizeof(*tcph);
|
||||
inc_sequence = ntohl(tcph->th_seq);
|
||||
|
||||
switch (cm_node->state) {
|
||||
@ -2613,7 +2606,7 @@ irdma_handle_synack_pkt(struct irdma_cm_node *cm_node,
|
||||
u32 inc_sequence;
|
||||
int optionsize;
|
||||
|
||||
optionsize = (tcph->th_off << 2) - sizeof(struct tcphdr);
|
||||
optionsize = (tcph->th_off << 2) - sizeof(*tcph);
|
||||
inc_sequence = ntohl(tcph->th_seq);
|
||||
switch (cm_node->state) {
|
||||
case IRDMA_CM_STATE_SYN_SENT:
|
||||
@ -2687,7 +2680,7 @@ irdma_handle_ack_pkt(struct irdma_cm_node *cm_node,
|
||||
int optionsize;
|
||||
u32 datasize = rbuf->datalen;
|
||||
|
||||
optionsize = (tcph->th_off << 2) - sizeof(struct tcphdr);
|
||||
optionsize = (tcph->th_off << 2) - sizeof(*tcph);
|
||||
|
||||
if (irdma_check_seq(cm_node, tcph))
|
||||
return -EINVAL;
|
||||
@ -3537,8 +3530,8 @@ irdma_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
|
||||
cm_node->vlan_id = irdma_get_vlan_ipv4(cm_id, cm_node->loc_addr);
|
||||
} else {
|
||||
cm_node->ipv4 = false;
|
||||
irdma_netdev_vlan_ipv6(cm_id, cm_node->loc_addr,
|
||||
&cm_node->vlan_id, NULL);
|
||||
irdma_get_vlan_mac_ipv6(cm_id, cm_node->loc_addr, &cm_node->vlan_id,
|
||||
NULL);
|
||||
}
|
||||
irdma_debug(&iwdev->rf->sc_dev, IRDMA_DEBUG_CM, "Accept vlan_id=%d\n",
|
||||
cm_node->vlan_id);
|
||||
@ -3745,7 +3738,7 @@ irdma_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
|
||||
raddr6->sin6_addr.__u6_addr.__u6_addr32);
|
||||
cm_info.loc_port = ntohs(laddr6->sin6_port);
|
||||
cm_info.rem_port = ntohs(raddr6->sin6_port);
|
||||
irdma_netdev_vlan_ipv6(cm_id, cm_info.loc_addr, &cm_info.vlan_id, NULL);
|
||||
irdma_get_vlan_mac_ipv6(cm_id, cm_info.loc_addr, &cm_info.vlan_id, NULL);
|
||||
}
|
||||
cm_info.cm_id = cm_id;
|
||||
cm_info.qh_qpid = iwdev->vsi.ilq->qp_id;
|
||||
@ -3874,8 +3867,8 @@ irdma_create_listen(struct iw_cm_id *cm_id, int backlog)
|
||||
laddr6->sin6_addr.__u6_addr.__u6_addr32);
|
||||
cm_info.loc_port = ntohs(laddr6->sin6_port);
|
||||
if (!IN6_IS_ADDR_UNSPECIFIED(&laddr6->sin6_addr)) {
|
||||
irdma_netdev_vlan_ipv6(cm_id, cm_info.loc_addr,
|
||||
&cm_info.vlan_id, NULL);
|
||||
irdma_get_vlan_mac_ipv6(cm_id, cm_info.loc_addr,
|
||||
&cm_info.vlan_id, NULL);
|
||||
} else {
|
||||
cm_info.vlan_id = 0xFFFF;
|
||||
wildcard = true;
|
||||
|
@ -1,7 +1,7 @@
|
||||
/*-
|
||||
* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
|
||||
*
|
||||
* Copyright (c) 2015 - 2022 Intel Corporation
|
||||
* Copyright (c) 2015 - 2023 Intel Corporation
|
||||
*
|
||||
* This software is available to you under a choice of one of two
|
||||
* licenses. You may choose to be licensed under the terms of the GNU
|
||||
|
@ -785,14 +785,14 @@ irdma_sc_qp_setctx_roce(struct irdma_sc_qp *qp, __le64 * qp_ctx,
|
||||
set_64bit_val(qp_ctx, IRDMA_BYTE_144,
|
||||
FIELD_PREP(IRDMAQPC_STAT_INDEX, info->stats_idx));
|
||||
set_64bit_val(qp_ctx, IRDMA_BYTE_152,
|
||||
FIELD_PREP(IRDMAQPC_MACADDRESS, irdma_mac_to_u64(roce_info->mac_addr)));
|
||||
FIELD_PREP(IRDMAQPC_MACADDRESS,
|
||||
irdma_mac_to_u64(roce_info->mac_addr)));
|
||||
set_64bit_val(qp_ctx, IRDMA_BYTE_160,
|
||||
FIELD_PREP(IRDMAQPC_ORDSIZE, roce_info->ord_size) |
|
||||
FIELD_PREP(IRDMAQPC_IRDSIZE, irdma_sc_get_encoded_ird_size(roce_info->ird_size)) |
|
||||
FIELD_PREP(IRDMAQPC_WRRDRSPOK, roce_info->wr_rdresp_en) |
|
||||
FIELD_PREP(IRDMAQPC_RDOK, roce_info->rd_en) |
|
||||
FIELD_PREP(IRDMAQPC_USESTATSINSTANCE, info->stats_idx_valid) |
|
||||
FIELD_PREP(IRDMAQPC_BINDEN, roce_info->bind_en) |
|
||||
FIELD_PREP(IRDMAQPC_FASTREGEN, roce_info->fast_reg_en) |
|
||||
FIELD_PREP(IRDMAQPC_DCQCNENABLE, roce_info->dcqcn_en) |
|
||||
FIELD_PREP(IRDMAQPC_RCVNOICRC, roce_info->rcv_no_icrc) |
|
||||
@ -1016,7 +1016,6 @@ irdma_sc_qp_setctx(struct irdma_sc_qp *qp, __le64 * qp_ctx,
|
||||
FIELD_PREP(IRDMAQPC_WRRDRSPOK, iw->wr_rdresp_en) |
|
||||
FIELD_PREP(IRDMAQPC_RDOK, iw->rd_en) |
|
||||
FIELD_PREP(IRDMAQPC_SNDMARKERS, iw->snd_mark_en) |
|
||||
FIELD_PREP(IRDMAQPC_BINDEN, iw->bind_en) |
|
||||
FIELD_PREP(IRDMAQPC_FASTREGEN, iw->fast_reg_en) |
|
||||
FIELD_PREP(IRDMAQPC_PRIVEN, iw->priv_mode_en) |
|
||||
FIELD_PREP(IRDMAQPC_USESTATSINSTANCE, info->stats_idx_valid) |
|
||||
@ -1466,6 +1465,15 @@ irdma_sc_gen_rts_ae(struct irdma_sc_qp *qp)
|
||||
set_64bit_val(wqe, IRDMA_BYTE_24, hdr);
|
||||
irdma_debug_buf(qp->dev, IRDMA_DEBUG_QP, "CONN EST WQE", wqe,
|
||||
IRDMA_QP_WQE_MIN_SIZE);
|
||||
if (qp->qp_uk.start_wqe_idx) {
|
||||
wqe = qp_uk->sq_base[3].elem;
|
||||
hdr = FIELD_PREP(IRDMAQPSQ_OPCODE, IRDMAQP_OP_NOP) |
|
||||
FIELD_PREP(IRDMAQPSQ_LOCALFENCE, 1) |
|
||||
FIELD_PREP(IRDMAQPSQ_VALID, qp->qp_uk.swqe_polarity);
|
||||
irdma_wmb(); /* make sure WQE is written before valid bit is set */
|
||||
|
||||
set_64bit_val(wqe, IRDMA_BYTE_24, hdr);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
@ -1831,7 +1839,7 @@ irdma_bld_terminate_hdr(struct irdma_sc_qp *qp,
|
||||
if (copy_len)
|
||||
irdma_memcpy(termhdr + 1, pkt, copy_len);
|
||||
|
||||
return sizeof(struct irdma_terminate_hdr) + copy_len;
|
||||
return sizeof(*termhdr) + copy_len;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -2124,9 +2132,6 @@ irdma_sc_gather_stats(struct irdma_sc_cqp *cqp,
|
||||
IRDMA_CQP_WQE_SIZE * 8);
|
||||
|
||||
irdma_sc_cqp_post_sq(cqp);
|
||||
irdma_debug(cqp->dev, IRDMA_DEBUG_STATS,
|
||||
"CQP SQ head 0x%x tail 0x%x size 0x%x\n", cqp->sq_ring.head,
|
||||
cqp->sq_ring.tail, cqp->sq_ring.size);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -2607,17 +2612,18 @@ irdma_sc_cq_create(struct irdma_sc_cq *cq, u64 scratch,
|
||||
set_64bit_val(wqe, IRDMA_BYTE_0, cq->cq_uk.cq_size);
|
||||
set_64bit_val(wqe, IRDMA_BYTE_8, RS_64_1(cq, 1));
|
||||
set_64bit_val(wqe, IRDMA_BYTE_16,
|
||||
FIELD_PREP(IRDMA_CQPSQ_CQ_SHADOW_READ_THRESHOLD, cq->shadow_read_threshold));
|
||||
set_64bit_val(wqe, IRDMA_BYTE_32, (cq->virtual_map ? 0 : cq->cq_pa));
|
||||
FIELD_PREP(IRDMA_CQPSQ_CQ_SHADOW_READ_THRESHOLD,
|
||||
cq->shadow_read_threshold));
|
||||
set_64bit_val(wqe, IRDMA_BYTE_32, cq->virtual_map ? 0 : cq->cq_pa);
|
||||
set_64bit_val(wqe, IRDMA_BYTE_40, cq->shadow_area_pa);
|
||||
set_64bit_val(wqe, IRDMA_BYTE_48,
|
||||
FIELD_PREP(IRDMA_CQPSQ_CQ_FIRSTPMPBLIDX, (cq->virtual_map ? cq->first_pm_pbl_idx : 0)));
|
||||
FIELD_PREP(IRDMA_CQPSQ_CQ_FIRSTPMPBLIDX,
|
||||
cq->virtual_map ? cq->first_pm_pbl_idx : 0));
|
||||
set_64bit_val(wqe, IRDMA_BYTE_56,
|
||||
FIELD_PREP(IRDMA_CQPSQ_TPHVAL, cq->tph_val) |
|
||||
FIELD_PREP(IRDMA_CQPSQ_VSIIDX, cq->vsi->vsi_idx));
|
||||
|
||||
hdr = FLD_LS_64(cq->dev, cq->cq_uk.cq_id, IRDMA_CQPSQ_CQ_CQID) |
|
||||
FLD_LS_64(cq->dev, (cq->ceq_id_valid ? cq->ceq_id : 0),
|
||||
FLD_LS_64(cq->dev, cq->ceq_id_valid ? cq->ceq_id : 0,
|
||||
IRDMA_CQPSQ_CQ_CEQID) |
|
||||
FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_CREATE_CQ) |
|
||||
FIELD_PREP(IRDMA_CQPSQ_CQ_LPBLSIZE, cq->pbl_chunk_size) |
|
||||
@ -2935,10 +2941,12 @@ irdma_sc_parse_fpm_commit_buf(struct irdma_sc_dev *dev, __le64 * buf,
|
||||
IRDMA_HMC_IW_HDR);
|
||||
irdma_sc_decode_fpm_commit(dev, buf, IRDMA_BYTE_152, info,
|
||||
IRDMA_HMC_IW_MD);
|
||||
irdma_sc_decode_fpm_commit(dev, buf, IRDMA_BYTE_160, info,
|
||||
IRDMA_HMC_IW_OOISC);
|
||||
irdma_sc_decode_fpm_commit(dev, buf, IRDMA_BYTE_168, info,
|
||||
IRDMA_HMC_IW_OOISCFFL);
|
||||
if (dev->cqp->protocol_used == IRDMA_IWARP_PROTOCOL_ONLY) {
|
||||
irdma_sc_decode_fpm_commit(dev, buf, IRDMA_BYTE_160, info,
|
||||
IRDMA_HMC_IW_OOISC);
|
||||
irdma_sc_decode_fpm_commit(dev, buf, IRDMA_BYTE_168, info,
|
||||
IRDMA_HMC_IW_OOISCFFL);
|
||||
}
|
||||
}
|
||||
|
||||
/* searching for the last object in HMC to find the size of the HMC area. */
|
||||
@ -3071,15 +3079,18 @@ irdma_sc_parse_fpm_query_buf(struct irdma_sc_dev *dev, __le64 * buf,
|
||||
|
||||
irdma_sc_decode_fpm_query(buf, 144, obj_info, IRDMA_HMC_IW_HDR);
|
||||
irdma_sc_decode_fpm_query(buf, 152, obj_info, IRDMA_HMC_IW_MD);
|
||||
irdma_sc_decode_fpm_query(buf, 160, obj_info, IRDMA_HMC_IW_OOISC);
|
||||
|
||||
get_64bit_val(buf, IRDMA_BYTE_168, &temp);
|
||||
obj_info[IRDMA_HMC_IW_OOISCFFL].max_cnt = (u32)temp;
|
||||
obj_info[IRDMA_HMC_IW_OOISCFFL].size = 4;
|
||||
hmc_fpm_misc->ooiscf_block_size = FIELD_GET(IRDMA_QUERY_FPM_OOISCFBLOCKSIZE, temp);
|
||||
if (!hmc_fpm_misc->ooiscf_block_size &&
|
||||
obj_info[IRDMA_HMC_IW_OOISCFFL].max_cnt)
|
||||
return -EINVAL;
|
||||
if (dev->cqp->protocol_used == IRDMA_IWARP_PROTOCOL_ONLY) {
|
||||
irdma_sc_decode_fpm_query(buf, 160, obj_info, IRDMA_HMC_IW_OOISC);
|
||||
|
||||
get_64bit_val(buf, IRDMA_BYTE_168, &temp);
|
||||
obj_info[IRDMA_HMC_IW_OOISCFFL].max_cnt = (u32)temp;
|
||||
obj_info[IRDMA_HMC_IW_OOISCFFL].size = 4;
|
||||
hmc_fpm_misc->ooiscf_block_size = FIELD_GET(IRDMA_QUERY_FPM_OOISCFBLOCKSIZE, temp);
|
||||
if (!hmc_fpm_misc->ooiscf_block_size &&
|
||||
obj_info[IRDMA_HMC_IW_OOISCFFL].max_cnt)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -3251,6 +3262,7 @@ irdma_sc_cqp_create(struct irdma_sc_cqp *cqp, u16 *maj_err, u16 *min_err)
|
||||
|
||||
temp = FIELD_PREP(IRDMA_CQPHC_ENABLED_VFS, cqp->ena_vf_count) |
|
||||
FIELD_PREP(IRDMA_CQPHC_HMC_PROFILE, cqp->hmc_profile);
|
||||
|
||||
if (hw_rev >= IRDMA_GEN_2)
|
||||
temp |= FIELD_PREP(IRDMA_CQPHC_EN_REM_ENDPOINT_TRK,
|
||||
cqp->en_rem_endpoint_trk);
|
||||
@ -3399,11 +3411,13 @@ irdma_sc_cqp_destroy(struct irdma_sc_cqp *cqp, bool free_hwcqp)
|
||||
void
|
||||
irdma_sc_ccq_arm(struct irdma_sc_cq *ccq)
|
||||
{
|
||||
unsigned long flags;
|
||||
u64 temp_val;
|
||||
u16 sw_cq_sel;
|
||||
u8 arm_next_se;
|
||||
u8 arm_seq_num;
|
||||
|
||||
spin_lock_irqsave(&ccq->dev->cqp_lock, flags);
|
||||
get_64bit_val(ccq->cq_uk.shadow_area, IRDMA_BYTE_32, &temp_val);
|
||||
sw_cq_sel = (u16)FIELD_GET(IRDMA_CQ_DBSA_SW_CQ_SELECT, temp_val);
|
||||
arm_next_se = (u8)FIELD_GET(IRDMA_CQ_DBSA_ARM_NEXT_SE, temp_val);
|
||||
@ -3414,6 +3428,7 @@ irdma_sc_ccq_arm(struct irdma_sc_cq *ccq)
|
||||
FIELD_PREP(IRDMA_CQ_DBSA_ARM_NEXT_SE, arm_next_se) |
|
||||
FIELD_PREP(IRDMA_CQ_DBSA_ARM_NEXT, 1);
|
||||
set_64bit_val(ccq->cq_uk.shadow_area, IRDMA_BYTE_32, temp_val);
|
||||
spin_unlock_irqrestore(&ccq->dev->cqp_lock, flags);
|
||||
|
||||
irdma_wmb(); /* make sure shadow area is updated before arming */
|
||||
|
||||
@ -3436,6 +3451,7 @@ irdma_sc_ccq_get_cqe_info(struct irdma_sc_cq *ccq,
|
||||
u32 error;
|
||||
u8 polarity;
|
||||
int ret_code = 0;
|
||||
unsigned long flags;
|
||||
|
||||
if (ccq->cq_uk.avoid_mem_cflct)
|
||||
cqe = IRDMA_GET_CURRENT_EXTENDED_CQ_ELEM(&ccq->cq_uk);
|
||||
@ -3484,7 +3500,9 @@ irdma_sc_ccq_get_cqe_info(struct irdma_sc_cq *ccq,
|
||||
|
||||
irdma_wmb(); /* make sure shadow area is updated before moving tail */
|
||||
|
||||
spin_lock_irqsave(&cqp->dev->cqp_lock, flags);
|
||||
IRDMA_RING_MOVE_TAIL(cqp->sq_ring);
|
||||
spin_unlock_irqrestore(&cqp->dev->cqp_lock, flags);
|
||||
atomic64_inc(&cqp->completed_ops);
|
||||
|
||||
return ret_code;
|
||||
@ -4152,6 +4170,7 @@ irdma_sc_get_next_aeqe(struct irdma_sc_aeq *aeq,
|
||||
case IRDMA_AE_ROE_INVALID_RDMA_READ_REQUEST:
|
||||
case IRDMA_AE_ROE_INVALID_RDMA_WRITE_OR_READ_RESP:
|
||||
case IRDMA_AE_ROCE_RSP_LENGTH_ERROR:
|
||||
case IRDMA_AE_ROCE_REQ_LENGTH_ERROR:
|
||||
case IRDMA_AE_INVALID_ARP_ENTRY:
|
||||
case IRDMA_AE_INVALID_TCP_OPTION_RCVD:
|
||||
case IRDMA_AE_STALE_ARP_ENTRY:
|
||||
@ -4704,10 +4723,11 @@ static u32 irdma_est_sd(struct irdma_sc_dev *dev,
|
||||
u64 sd;
|
||||
int i;
|
||||
|
||||
for (i = IRDMA_HMC_IW_QP; i < IRDMA_HMC_IW_MAX; i++)
|
||||
for (i = IRDMA_HMC_IW_QP; i < IRDMA_HMC_IW_MAX; i++) {
|
||||
if (i != IRDMA_HMC_IW_PBLE)
|
||||
size += round_up(hmc_info->hmc_obj[i].cnt *
|
||||
hmc_info->hmc_obj[i].size, 512);
|
||||
}
|
||||
|
||||
pble_info = &hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE];
|
||||
size += round_up(pble_info->cnt * pble_info->size, 512);
|
||||
@ -4717,7 +4737,7 @@ static u32 irdma_est_sd(struct irdma_sc_dev *dev,
|
||||
sd = size >> 21;
|
||||
if (sd > 0xFFFFFFFF) {
|
||||
irdma_debug(dev, IRDMA_DEBUG_HMC, "sd overflow[%ld]\n", sd);
|
||||
sd = 0xFFFFFFFF - 1;
|
||||
sd = 0xFFFFFFFE;
|
||||
}
|
||||
|
||||
return (u32)sd;
|
||||
@ -4773,10 +4793,9 @@ irdma_sc_query_rdma_features(struct irdma_sc_cqp *cqp,
|
||||
int
|
||||
irdma_get_rdma_features(struct irdma_sc_dev *dev)
|
||||
{
|
||||
int ret_code;
|
||||
int ret_code, byte_idx, feat_type, feat_cnt, feat_idx;
|
||||
struct irdma_dma_mem feat_buf;
|
||||
u64 temp;
|
||||
u16 byte_idx, feat_type, feat_cnt, feat_idx;
|
||||
|
||||
feat_buf.size = IRDMA_FEATURE_BUF_SIZE;
|
||||
feat_buf.va = irdma_allocate_dma_mem(dev->hw, &feat_buf, feat_buf.size,
|
||||
@ -4819,7 +4838,7 @@ irdma_get_rdma_features(struct irdma_sc_dev *dev)
|
||||
irdma_debug_buf(dev, IRDMA_DEBUG_WQE, "QUERY RDMA FEATURES", feat_buf.va,
|
||||
feat_cnt * 8);
|
||||
|
||||
for (byte_idx = 0, feat_idx = 0; feat_idx < min(feat_cnt, (u16)IRDMA_MAX_FEATURES);
|
||||
for (byte_idx = 0, feat_idx = 0; feat_idx < min(feat_cnt, IRDMA_MAX_FEATURES);
|
||||
feat_idx++, byte_idx += 8) {
|
||||
get_64bit_val(feat_buf.va, byte_idx, &temp);
|
||||
feat_type = FIELD_GET(IRDMA_FEATURE_TYPE, temp);
|
||||
@ -5421,14 +5440,16 @@ irdma_cfg_aeq(struct irdma_sc_dev *dev, u32 idx, bool enable)
|
||||
void
|
||||
sc_vsi_update_stats(struct irdma_sc_vsi *vsi)
|
||||
{
|
||||
struct irdma_gather_stats *gather_stats;
|
||||
struct irdma_gather_stats *last_gather_stats;
|
||||
struct irdma_dev_hw_stats *hw_stats = &vsi->pestat->hw_stats;
|
||||
struct irdma_gather_stats *gather_stats =
|
||||
vsi->pestat->gather_info.gather_stats_va;
|
||||
struct irdma_gather_stats *last_gather_stats =
|
||||
vsi->pestat->gather_info.last_gather_stats_va;
|
||||
const struct irdma_hw_stat_map *map = vsi->dev->hw_stats_map;
|
||||
u16 max_stat_idx = vsi->dev->hw_attrs.max_stat_idx;
|
||||
|
||||
gather_stats = vsi->pestat->gather_info.gather_stats_va;
|
||||
last_gather_stats = vsi->pestat->gather_info.last_gather_stats_va;
|
||||
irdma_update_stats(&vsi->pestat->hw_stats, gather_stats,
|
||||
last_gather_stats, vsi->dev->hw_stats_map,
|
||||
vsi->dev->hw_attrs.max_stat_idx);
|
||||
irdma_update_stats(hw_stats, gather_stats, last_gather_stats,
|
||||
map, max_stat_idx);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -5498,7 +5519,6 @@ irdma_sc_dev_init(struct irdma_sc_dev *dev, struct irdma_device_init_info *info)
|
||||
dev->hw_attrs.max_hw_outbound_msg_size = IRDMA_MAX_OUTBOUND_MSG_SIZE;
|
||||
dev->hw_attrs.max_mr_size = IRDMA_MAX_MR_SIZE;
|
||||
dev->hw_attrs.max_hw_inbound_msg_size = IRDMA_MAX_INBOUND_MSG_SIZE;
|
||||
dev->hw_attrs.max_hw_device_pages = IRDMA_MAX_PUSH_PAGE_COUNT;
|
||||
dev->hw_attrs.uk_attrs.max_hw_inline = IRDMA_MAX_INLINE_DATA_SIZE;
|
||||
dev->hw_attrs.max_hw_wqes = IRDMA_MAX_WQ_ENTRIES;
|
||||
dev->hw_attrs.max_qp_wr = IRDMA_MAX_QP_WRS(IRDMA_MAX_QUANTA_PER_WR);
|
||||
@ -5526,7 +5546,6 @@ irdma_sc_dev_init(struct irdma_sc_dev *dev, struct irdma_device_init_info *info)
|
||||
val, db_size);
|
||||
return -ENODEV;
|
||||
}
|
||||
dev->db_addr = dev->hw->hw_addr + (uintptr_t)dev->hw_regs[IRDMA_DB_ADDR_OFFSET];
|
||||
|
||||
return ret_code;
|
||||
}
|
||||
|
@ -249,7 +249,6 @@ enum irdma_cqp_op_type {
|
||||
IRDMA_OP_MANAGE_HMC_PM_FUNC_TABLE = 22,
|
||||
IRDMA_OP_SUSPEND = 23,
|
||||
IRDMA_OP_RESUME = 24,
|
||||
IRDMA_OP_MANAGE_VCHNL_REQ_PBLE_BP = 25,
|
||||
IRDMA_OP_QUERY_FPM_VAL = 26,
|
||||
IRDMA_OP_COMMIT_FPM_VAL = 27,
|
||||
IRDMA_OP_AH_CREATE = 28,
|
||||
@ -292,7 +291,6 @@ enum irdma_cqp_op_type {
|
||||
#define IRDMA_CQP_OP_DEALLOC_STAG 0x0d
|
||||
#define IRDMA_CQP_OP_MANAGE_LOC_MAC_TABLE 0x0e
|
||||
#define IRDMA_CQP_OP_MANAGE_ARP 0x0f
|
||||
#define IRDMA_CQP_OP_MANAGE_VCHNL_REQ_PBLE_BP 0x10
|
||||
#define IRDMA_CQP_OP_MANAGE_PUSH_PAGES 0x11
|
||||
#define IRDMA_CQP_OP_QUERY_RDMA_FEATURES 0x12
|
||||
#define IRDMA_CQP_OP_UPLOAD_CONTEXT 0x13
|
||||
@ -849,7 +847,6 @@ enum irdma_cqp_op_type {
|
||||
#define IRDMA_CQPSQ_UCTX_RAWFORMAT BIT_ULL(61)
|
||||
#define IRDMA_CQPSQ_UCTX_FREEZEQP_S 62
|
||||
#define IRDMA_CQPSQ_UCTX_FREEZEQP BIT_ULL(62)
|
||||
|
||||
#define IRDMA_CQPSQ_MHMC_VFIDX_S 0
|
||||
#define IRDMA_CQPSQ_MHMC_VFIDX GENMASK_ULL(15, 0)
|
||||
#define IRDMA_CQPSQ_MHMC_FREEPMFN_S 62
|
||||
|
@ -394,6 +394,7 @@ irdma_process_aeq(struct irdma_pci_f *rf)
|
||||
case IRDMA_AE_LLP_TOO_MANY_RETRIES:
|
||||
case IRDMA_AE_LCE_QP_CATASTROPHIC:
|
||||
case IRDMA_AE_LCE_FUNCTION_CATASTROPHIC:
|
||||
case IRDMA_AE_LLP_TOO_MANY_RNRS:
|
||||
case IRDMA_AE_UDA_XMIT_DGRAM_TOO_LONG:
|
||||
default:
|
||||
irdma_dev_err(&iwdev->ibdev,
|
||||
@ -490,7 +491,7 @@ irdma_save_msix_info(struct irdma_pci_f *rf)
|
||||
{
|
||||
struct irdma_qvlist_info *iw_qvlist;
|
||||
struct irdma_qv_info *iw_qvinfo;
|
||||
u32 ceq_idx;
|
||||
u16 ceq_idx;
|
||||
u32 i;
|
||||
u32 size;
|
||||
|
||||
@ -500,8 +501,8 @@ irdma_save_msix_info(struct irdma_pci_f *rf)
|
||||
}
|
||||
|
||||
size = sizeof(struct irdma_msix_vector) * rf->msix_count;
|
||||
size += sizeof(struct irdma_qvlist_info);
|
||||
size += sizeof(struct irdma_qv_info) * rf->msix_count - 1;
|
||||
size += sizeof(*iw_qvlist);
|
||||
size += sizeof(*iw_qvinfo) * rf->msix_count - 1;
|
||||
rf->iw_msixtbl = kzalloc(size, GFP_KERNEL);
|
||||
if (!rf->iw_msixtbl)
|
||||
return -ENOMEM;
|
||||
@ -600,6 +601,13 @@ irdma_destroy_irq(struct irdma_pci_f *rf,
|
||||
|
||||
dev->irq_ops->irdma_dis_irq(dev, msix_vec->idx);
|
||||
irdma_free_irq(rf, msix_vec);
|
||||
if (rf == dev_id) {
|
||||
tasklet_kill(&rf->dpc_tasklet);
|
||||
} else {
|
||||
struct irdma_ceq *iwceq = (struct irdma_ceq *)dev_id;
|
||||
|
||||
tasklet_kill(&iwceq->dpc_tasklet);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
@ -964,13 +972,13 @@ irdma_create_cqp(struct irdma_pci_f *rf)
|
||||
u16 maj_err, min_err;
|
||||
int i, status;
|
||||
|
||||
cqp->cqp_requests = kcalloc(sqsize, sizeof(*cqp->cqp_requests), GFP_KERNEL);
|
||||
memset(cqp->cqp_requests, 0, sqsize * sizeof(*cqp->cqp_requests));
|
||||
cqp->cqp_requests = kcalloc(sqsize, sizeof(*cqp->cqp_requests),
|
||||
GFP_KERNEL);
|
||||
if (!cqp->cqp_requests)
|
||||
return -ENOMEM;
|
||||
|
||||
cqp->scratch_array = kcalloc(sqsize, sizeof(*cqp->scratch_array), GFP_KERNEL);
|
||||
memset(cqp->scratch_array, 0, sqsize * sizeof(*cqp->scratch_array));
|
||||
cqp->scratch_array = kcalloc(sqsize, sizeof(*cqp->scratch_array),
|
||||
GFP_KERNEL);
|
||||
if (!cqp->scratch_array) {
|
||||
status = -ENOMEM;
|
||||
goto err_scratch;
|
||||
@ -1190,7 +1198,7 @@ fail_intr:
|
||||
*/
|
||||
static int
|
||||
irdma_cfg_ceq_vector(struct irdma_pci_f *rf, struct irdma_ceq *iwceq,
|
||||
u32 ceq_id, struct irdma_msix_vector *msix_vec)
|
||||
u16 ceq_id, struct irdma_msix_vector *msix_vec)
|
||||
{
|
||||
int status;
|
||||
|
||||
@ -1264,7 +1272,7 @@ irdma_cfg_aeq_vector(struct irdma_pci_f *rf)
|
||||
*/
|
||||
static int
|
||||
irdma_create_ceq(struct irdma_pci_f *rf, struct irdma_ceq *iwceq,
|
||||
u32 ceq_id, struct irdma_sc_vsi *vsi)
|
||||
u16 ceq_id, struct irdma_sc_vsi *vsi)
|
||||
{
|
||||
int status;
|
||||
struct irdma_ceq_init_info info = {0};
|
||||
@ -1381,7 +1389,7 @@ static int
|
||||
irdma_setup_ceqs(struct irdma_pci_f *rf, struct irdma_sc_vsi *vsi)
|
||||
{
|
||||
u32 i;
|
||||
u32 ceq_id;
|
||||
u16 ceq_id;
|
||||
struct irdma_ceq *iwceq;
|
||||
struct irdma_msix_vector *msix_vec;
|
||||
int status;
|
||||
@ -2212,14 +2220,19 @@ irdma_cqp_ce_handler(struct irdma_pci_f *rf, struct irdma_sc_cq *cq)
|
||||
|
||||
cqp_request = (struct irdma_cqp_request *)
|
||||
(uintptr_t)info.scratch;
|
||||
if (info.error && irdma_cqp_crit_err(dev, cqp_request->info.cqp_cmd,
|
||||
if (info.error && irdma_cqp_crit_err(dev,
|
||||
cqp_request->info.cqp_cmd,
|
||||
info.maj_err_code,
|
||||
info.min_err_code))
|
||||
irdma_dev_err(&rf->iwdev->ibdev, "cqp opcode = 0x%x maj_err_code = 0x%x min_err_code = 0x%x\n",
|
||||
info.op_code, info.maj_err_code, info.min_err_code);
|
||||
irdma_dev_err(&rf->iwdev->ibdev,
|
||||
"cqp opcode = 0x%x maj_err_code = 0x%x min_err_code = 0x%x\n",
|
||||
info.op_code, info.maj_err_code,
|
||||
info.min_err_code);
|
||||
if (cqp_request) {
|
||||
cqp_request->compl_info.maj_err_code = info.maj_err_code;
|
||||
cqp_request->compl_info.min_err_code = info.min_err_code;
|
||||
cqp_request->compl_info.maj_err_code =
|
||||
info.maj_err_code;
|
||||
cqp_request->compl_info.min_err_code =
|
||||
info.min_err_code;
|
||||
cqp_request->compl_info.op_ret_val = info.op_ret_val;
|
||||
cqp_request->compl_info.error = info.error;
|
||||
irdma_complete_cqp_request(&rf->cqp, cqp_request);
|
||||
|
@ -160,6 +160,7 @@ err:
|
||||
|
||||
#define IRDMA_ALLOC_UCTX_MIN_REQ_LEN offsetofend(struct irdma_alloc_ucontext_req, rsvd8)
|
||||
#define IRDMA_ALLOC_UCTX_MIN_RESP_LEN offsetofend(struct irdma_alloc_ucontext_resp, rsvd)
|
||||
|
||||
/**
|
||||
* irdma_alloc_ucontext - Allocate the user context data structure
|
||||
* @ibdev: ib device pointer
|
||||
@ -228,6 +229,8 @@ irdma_alloc_ucontext(struct ib_device *ibdev, struct ib_udata *udata)
|
||||
uresp.min_hw_cq_size = uk_attrs->min_hw_cq_size;
|
||||
uresp.hw_rev = uk_attrs->hw_rev;
|
||||
uresp.comp_mask |= IRDMA_ALLOC_UCTX_USE_RAW_ATTR;
|
||||
uresp.min_hw_wq_size = uk_attrs->min_hw_wq_size;
|
||||
uresp.comp_mask |= IRDMA_ALLOC_UCTX_MIN_HW_WQ_SIZE;
|
||||
|
||||
bar_off =
|
||||
(uintptr_t)iwdev->rf->sc_dev.hw_regs[IRDMA_DB_ADDR_OFFSET];
|
||||
@ -285,6 +288,7 @@ irdma_dealloc_ucontext(struct ib_ucontext *context)
|
||||
}
|
||||
|
||||
#define IRDMA_ALLOC_PD_MIN_RESP_LEN offsetofend(struct irdma_alloc_pd_resp, rsvd)
|
||||
|
||||
/**
|
||||
* irdma_alloc_pd - allocate protection domain
|
||||
* @ibdev: IB device
|
||||
@ -481,23 +485,41 @@ static int
|
||||
irdma_create_ah_wait(struct irdma_pci_f *rf,
|
||||
struct irdma_sc_ah *sc_ah, bool sleep)
|
||||
{
|
||||
int ret;
|
||||
|
||||
if (!sleep) {
|
||||
int cnt = rf->sc_dev.hw_attrs.max_cqp_compl_wait_time_ms *
|
||||
CQP_TIMEOUT_THRESHOLD;
|
||||
struct irdma_cqp_request *cqp_request =
|
||||
sc_ah->ah_info.cqp_request;
|
||||
|
||||
do {
|
||||
irdma_cqp_ce_handler(rf, &rf->ccq.sc_cq);
|
||||
mdelay(1);
|
||||
} while (!sc_ah->ah_info.ah_valid && --cnt);
|
||||
} while (!READ_ONCE(cqp_request->request_done) && --cnt);
|
||||
|
||||
if (!cnt)
|
||||
return -ETIMEDOUT;
|
||||
if (cnt && !cqp_request->compl_info.op_ret_val) {
|
||||
irdma_put_cqp_request(&rf->cqp, cqp_request);
|
||||
sc_ah->ah_info.ah_valid = true;
|
||||
} else {
|
||||
ret = !cnt ? -ETIMEDOUT : -EINVAL;
|
||||
irdma_dev_err(&rf->iwdev->ibdev, "CQP create AH error ret = %d opt_ret_val = %d",
|
||||
ret, cqp_request->compl_info.op_ret_val);
|
||||
irdma_put_cqp_request(&rf->cqp, cqp_request);
|
||||
if (!cnt && !rf->reset) {
|
||||
rf->reset = true;
|
||||
rf->gen_ops.request_reset(rf);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
#define IRDMA_CREATE_AH_MIN_RESP_LEN offsetofend(struct irdma_create_ah_resp, rsvd)
|
||||
|
||||
|
||||
void
|
||||
irdma_ether_copy(u8 *dmac, struct ib_ah_attr *attr)
|
||||
{
|
||||
@ -610,17 +632,15 @@ irdma_create_ah(struct ib_pd *ibpd,
|
||||
goto err_gid_l2;
|
||||
|
||||
err = irdma_ah_cqp_op(iwdev->rf, sc_ah, IRDMA_OP_AH_CREATE,
|
||||
sleep, irdma_gsi_ud_qp_ah_cb, sc_ah);
|
||||
sleep, NULL, sc_ah);
|
||||
if (err) {
|
||||
irdma_debug(&iwdev->rf->sc_dev, IRDMA_DEBUG_VERBS, "CQP-OP Create AH fail");
|
||||
goto err_gid_l2;
|
||||
}
|
||||
|
||||
err = irdma_create_ah_wait(rf, sc_ah, sleep);
|
||||
if (err) {
|
||||
irdma_debug(&iwdev->rf->sc_dev, IRDMA_DEBUG_DEV, "CQP create AH timed out");
|
||||
if (err)
|
||||
goto err_gid_l2;
|
||||
}
|
||||
|
||||
if (udata) {
|
||||
uresp.ah_id = ah->sc_ah.ah_info.ah_idx;
|
||||
@ -652,7 +672,7 @@ irdma_free_qp_rsrc(struct irdma_qp *iwqp)
|
||||
u32 qp_num = iwqp->ibqp.qp_num;
|
||||
|
||||
irdma_ieq_cleanup_qp(iwdev->vsi.ieq, &iwqp->sc_qp);
|
||||
irdma_dealloc_push_page(rf, &iwqp->sc_qp);
|
||||
irdma_dealloc_push_page(rf, iwqp);
|
||||
if (iwqp->sc_qp.vsi) {
|
||||
irdma_qp_rem_qos(&iwqp->sc_qp);
|
||||
iwqp->sc_qp.dev->ws_remove(iwqp->sc_qp.vsi,
|
||||
@ -846,12 +866,17 @@ irdma_create_qp(struct ib_pd *ibpd,
|
||||
|
||||
if (udata) {
|
||||
/* GEN_1 legacy support with libi40iw does not have expanded uresp struct */
|
||||
if (udata->outlen < sizeof(uresp)) {
|
||||
if (udata->outlen == IRDMA_CREATE_QP_MIN_RESP_LEN) {
|
||||
uresp.lsmm = 1;
|
||||
uresp.push_idx = IRDMA_INVALID_PUSH_PAGE_INDEX_GEN_1;
|
||||
} else {
|
||||
if (rdma_protocol_iwarp(&iwdev->ibdev, 1))
|
||||
if (rdma_protocol_iwarp(&iwdev->ibdev, 1)) {
|
||||
uresp.lsmm = 1;
|
||||
if (qp->qp_uk.start_wqe_idx) {
|
||||
uresp.comp_mask |= IRDMA_CREATE_QP_USE_START_WQE_IDX;
|
||||
uresp.start_wqe_idx = qp->qp_uk.start_wqe_idx;
|
||||
}
|
||||
}
|
||||
}
|
||||
uresp.actual_sq_size = init_info.qp_uk_init_info.sq_size;
|
||||
uresp.actual_rq_size = init_info.qp_uk_init_info.rq_size;
|
||||
@ -862,7 +887,7 @@ irdma_create_qp(struct ib_pd *ibpd,
|
||||
min(sizeof(uresp), udata->outlen));
|
||||
if (err_code) {
|
||||
irdma_debug(&iwdev->rf->sc_dev, IRDMA_DEBUG_VERBS, "copy_to_udata failed\n");
|
||||
kc_irdma_destroy_qp(&iwqp->ibqp, udata);
|
||||
irdma_destroy_qp(&iwqp->ibqp);
|
||||
return ERR_PTR(err_code);
|
||||
}
|
||||
}
|
||||
@ -1357,50 +1382,6 @@ irdma_destroy_cq(struct ib_cq *ib_cq)
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* irdma_alloc_mw - Allocate memory window
|
||||
* @pd: Protection domain
|
||||
* @type: Window type
|
||||
* @udata: user data pointer
|
||||
*/
|
||||
struct ib_mw *
|
||||
irdma_alloc_mw(struct ib_pd *pd, enum ib_mw_type type,
|
||||
struct ib_udata *udata)
|
||||
{
|
||||
struct irdma_device *iwdev = to_iwdev(pd->device);
|
||||
struct irdma_mr *iwmr;
|
||||
int err_code;
|
||||
u32 stag;
|
||||
|
||||
if (type != IB_MW_TYPE_1 && type != IB_MW_TYPE_2)
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
iwmr = kzalloc(sizeof(*iwmr), GFP_KERNEL);
|
||||
if (!iwmr)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
stag = irdma_create_stag(iwdev);
|
||||
if (!stag) {
|
||||
kfree(iwmr);
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
|
||||
iwmr->stag = stag;
|
||||
iwmr->ibmw.rkey = stag;
|
||||
iwmr->ibmw.pd = pd;
|
||||
iwmr->ibmw.type = type;
|
||||
iwmr->ibmw.device = pd->device;
|
||||
|
||||
err_code = irdma_hw_alloc_mw(iwdev, iwmr);
|
||||
if (err_code) {
|
||||
irdma_free_stag(iwdev, stag);
|
||||
kfree(iwmr);
|
||||
return ERR_PTR(err_code);
|
||||
}
|
||||
|
||||
return &iwmr->ibmw;
|
||||
}
|
||||
|
||||
/**
|
||||
* kc_set_loc_seq_num_mss - Set local seq number and mss
|
||||
* @cm_node: cm node info
|
||||
@ -1569,7 +1550,7 @@ irdma_query_gid_roce(struct ib_device *ibdev, u8 port, int index,
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = rdma_query_gid(ibdev, port, index, gid);
|
||||
ret = ib_get_cached_gid(ibdev, port, index, gid, NULL);
|
||||
if (ret == -EAGAIN) {
|
||||
memcpy(gid, &zgid, sizeof(*gid));
|
||||
return 0;
|
||||
@ -1861,9 +1842,6 @@ kc_set_rdma_uverbs_cmd_mask(struct irdma_device *iwdev)
|
||||
BIT_ULL(IB_USER_VERBS_CMD_QUERY_QP) |
|
||||
BIT_ULL(IB_USER_VERBS_CMD_POLL_CQ) |
|
||||
BIT_ULL(IB_USER_VERBS_CMD_DESTROY_QP) |
|
||||
BIT_ULL(IB_USER_VERBS_CMD_ALLOC_MW) |
|
||||
BIT_ULL(IB_USER_VERBS_CMD_BIND_MW) |
|
||||
BIT_ULL(IB_USER_VERBS_CMD_DEALLOC_MW) |
|
||||
BIT_ULL(IB_USER_VERBS_CMD_POST_RECV) |
|
||||
BIT_ULL(IB_USER_VERBS_CMD_POST_SEND);
|
||||
iwdev->ibdev.uverbs_ex_cmd_mask =
|
||||
|
@ -1,7 +1,7 @@
|
||||
/*-
|
||||
* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
|
||||
*
|
||||
* Copyright (c) 2015 - 2022 Intel Corporation
|
||||
* Copyright (c) 2015 - 2023 Intel Corporation
|
||||
*
|
||||
* This software is available to you under a choice of one of two
|
||||
* licenses. You may choose to be licensed under the terms of the GNU
|
||||
@ -181,7 +181,7 @@ struct irdma_cqp_request {
|
||||
void (*callback_fcn)(struct irdma_cqp_request *cqp_request);
|
||||
void *param;
|
||||
struct irdma_cqp_compl_info compl_info;
|
||||
bool request_done; /* READ/WRITE_ONCE macros operate on it */
|
||||
u8 request_done; /* READ/WRITE_ONCE macros operate on it */
|
||||
bool waiting:1;
|
||||
bool dynamic:1;
|
||||
};
|
||||
@ -232,7 +232,7 @@ struct irdma_msix_vector {
|
||||
u32 idx;
|
||||
u32 irq;
|
||||
u32 cpu_affinity;
|
||||
u32 ceq_id;
|
||||
u16 ceq_id;
|
||||
char name[IRDMA_IRQ_NAME_STR_LEN];
|
||||
struct resource *res;
|
||||
void *tag;
|
||||
@ -374,7 +374,6 @@ struct irdma_device {
|
||||
u32 roce_ackcreds;
|
||||
u32 vendor_id;
|
||||
u32 vendor_part_id;
|
||||
u32 push_mode;
|
||||
u32 rcv_wnd;
|
||||
u16 mac_ip_table_idx;
|
||||
u16 vsi_num;
|
||||
@ -388,6 +387,7 @@ struct irdma_device {
|
||||
bool override_ooo:1;
|
||||
bool override_rd_fence_rate:1;
|
||||
bool override_rtomin:1;
|
||||
bool push_mode:1;
|
||||
bool roce_mode:1;
|
||||
bool roce_dcqcn_en:1;
|
||||
bool dcb_vlan_mode:1;
|
||||
@ -573,8 +573,8 @@ void irdma_gen_ae(struct irdma_pci_f *rf, struct irdma_sc_qp *qp,
|
||||
void irdma_copy_ip_ntohl(u32 *dst, __be32 *src);
|
||||
void irdma_copy_ip_htonl(__be32 *dst, u32 *src);
|
||||
u16 irdma_get_vlan_ipv4(struct iw_cm_id *cm_id, u32 *addr);
|
||||
if_t irdma_netdev_vlan_ipv6(struct iw_cm_id *cm_id, u32 *addr, u16 *vlan_id,
|
||||
u8 *mac);
|
||||
void irdma_get_vlan_mac_ipv6(struct iw_cm_id *cm_id, u32 *addr, u16 *vlan_id,
|
||||
u8 *mac);
|
||||
struct ib_mr *irdma_reg_phys_mr(struct ib_pd *ib_pd, u64 addr, u64 size,
|
||||
int acc, u64 *iova_start);
|
||||
int irdma_upload_qp_context(struct irdma_qp *iwqp, bool freeze, bool raw);
|
||||
@ -586,7 +586,6 @@ int irdma_ah_cqp_op(struct irdma_pci_f *rf, struct irdma_sc_ah *sc_ah, u8 cmd,
|
||||
bool wait,
|
||||
void (*callback_fcn)(struct irdma_cqp_request *cqp_request),
|
||||
void *cb_param);
|
||||
void irdma_gsi_ud_qp_ah_cb(struct irdma_cqp_request *cqp_request);
|
||||
void irdma_udqp_qs_worker(struct work_struct *work);
|
||||
bool irdma_cq_empty(struct irdma_cq *iwcq);
|
||||
int irdma_netdevice_event(struct notifier_block *notifier, unsigned long event,
|
||||
|
@ -1,7 +1,7 @@
|
||||
/*-
|
||||
* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
|
||||
*
|
||||
* Copyright (c) 2015 - 2022 Intel Corporation
|
||||
* Copyright (c) 2015 - 2023 Intel Corporation
|
||||
*
|
||||
* This software is available to you under a choice of one of two
|
||||
* licenses. You may choose to be licensed under the terms of the GNU
|
||||
|
@ -1,7 +1,7 @@
|
||||
/*-
|
||||
* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
|
||||
*
|
||||
* Copyright (c) 2016 - 2022 Intel Corporation
|
||||
* Copyright (c) 2016 - 2023 Intel Corporation
|
||||
*
|
||||
* This software is available to you under a choice of one of two
|
||||
* licenses. You may choose to be licensed under the terms of the GNU
|
||||
|
@ -1,7 +1,7 @@
|
||||
/*-
|
||||
* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
|
||||
*
|
||||
* Copyright (c) 2015 - 2022 Intel Corporation
|
||||
* Copyright (c) 2015 - 2023 Intel Corporation
|
||||
*
|
||||
* This software is available to you under a choice of one of two
|
||||
* licenses. You may choose to be licensed under the terms of the GNU
|
||||
@ -182,7 +182,7 @@ irdma_puda_alloc_buf(struct irdma_sc_dev *dev,
|
||||
struct irdma_puda_buf *buf;
|
||||
struct irdma_virt_mem buf_mem;
|
||||
|
||||
buf_mem.size = sizeof(struct irdma_puda_buf);
|
||||
buf_mem.size = sizeof(*buf);
|
||||
buf_mem.va = kzalloc(buf_mem.size, GFP_KERNEL);
|
||||
if (!buf_mem.va)
|
||||
return NULL;
|
||||
@ -1010,7 +1010,7 @@ irdma_puda_allocbufs(struct irdma_puda_rsrc *rsrc, u32 count)
|
||||
bool virtdma = false;
|
||||
unsigned long flags;
|
||||
|
||||
buf_mem.size = count * sizeof(struct irdma_puda_buf);
|
||||
buf_mem.size = count * sizeof(*buf);
|
||||
buf_mem.va = kzalloc(buf_mem.size, GFP_KERNEL);
|
||||
if (!buf_mem.va) {
|
||||
irdma_debug(rsrc->dev, IRDMA_DEBUG_PUDA,
|
||||
@ -1105,7 +1105,7 @@ irdma_puda_create_rsrc(struct irdma_sc_vsi *vsi,
|
||||
struct irdma_virt_mem *vmem;
|
||||
|
||||
info->count = 1;
|
||||
pudasize = sizeof(struct irdma_puda_rsrc);
|
||||
pudasize = sizeof(*rsrc);
|
||||
sqwridsize = info->sq_size * sizeof(struct irdma_sq_uk_wr_trk_info);
|
||||
rqwridsize = info->rq_size * 8;
|
||||
switch (info->type) {
|
||||
|
@ -42,6 +42,8 @@
|
||||
#include "irdma_hmc.h"
|
||||
#include "irdma_uda.h"
|
||||
#include "irdma_ws.h"
|
||||
#include "irdma_pble.h"
|
||||
|
||||
enum irdma_debug_flag {
|
||||
IRDMA_DEBUG_NONE = 0x00000000,
|
||||
IRDMA_DEBUG_ERR = 0x00000001,
|
||||
@ -70,6 +72,8 @@ enum irdma_debug_flag {
|
||||
IRDMA_DEBUG_ALL = 0xFFFFFFFF,
|
||||
};
|
||||
|
||||
#define RSVD_OFFSET 0xFFFFFFFF
|
||||
|
||||
enum irdma_page_size {
|
||||
IRDMA_PAGE_SIZE_4K = 0,
|
||||
IRDMA_PAGE_SIZE_2M,
|
||||
@ -472,7 +476,7 @@ struct irdma_sc_cq {
|
||||
bool virtual_map:1;
|
||||
bool check_overflow:1;
|
||||
bool ceq_id_valid:1;
|
||||
bool tph_en;
|
||||
bool tph_en:1;
|
||||
};
|
||||
|
||||
struct irdma_sc_qp {
|
||||
@ -520,9 +524,9 @@ struct irdma_sc_qp {
|
||||
};
|
||||
|
||||
struct irdma_stats_inst_info {
|
||||
bool use_hmc_fcn_index;
|
||||
u16 hmc_fn_id;
|
||||
u16 stats_idx;
|
||||
bool use_hmc_fcn_index:1;
|
||||
};
|
||||
|
||||
struct irdma_up_info {
|
||||
@ -570,7 +574,7 @@ struct irdma_qos {
|
||||
u8 traffic_class;
|
||||
u8 rel_bw;
|
||||
u8 prio_type;
|
||||
bool valid;
|
||||
bool valid:1;
|
||||
};
|
||||
|
||||
struct irdma_config_check {
|
||||
@ -623,7 +627,6 @@ struct irdma_sc_dev {
|
||||
__le64 *fpm_query_buf;
|
||||
__le64 *fpm_commit_buf;
|
||||
struct irdma_hw *hw;
|
||||
u8 IOMEM *db_addr;
|
||||
u32 IOMEM *wqe_alloc_db;
|
||||
u32 IOMEM *cq_arm_db;
|
||||
u32 IOMEM *aeq_alloc_db;
|
||||
@ -649,8 +652,6 @@ struct irdma_sc_dev {
|
||||
u32 debug_mask;
|
||||
u16 num_vfs;
|
||||
u16 hmc_fn_id;
|
||||
u8 vf_id;
|
||||
bool vchnl_up:1;
|
||||
bool ceq_valid:1;
|
||||
u8 pci_rev;
|
||||
int (*ws_add)(struct irdma_sc_vsi *vsi, u8 user_pri);
|
||||
@ -666,7 +667,7 @@ struct irdma_modify_cq_info {
|
||||
u8 pbl_chunk_size;
|
||||
u32 first_pm_pbl_idx;
|
||||
bool virtual_map:1;
|
||||
bool check_overflow;
|
||||
bool check_overflow:1;
|
||||
bool cq_resize:1;
|
||||
};
|
||||
|
||||
@ -676,7 +677,7 @@ struct irdma_create_qp_info {
|
||||
bool cq_num_valid:1;
|
||||
bool arp_cache_idx_valid:1;
|
||||
bool mac_valid:1;
|
||||
bool force_lpb;
|
||||
bool force_lpb:1;
|
||||
u8 next_iwarp_state;
|
||||
};
|
||||
|
||||
@ -709,7 +710,7 @@ struct irdma_ccq_cqe_info {
|
||||
u16 maj_err_code;
|
||||
u16 min_err_code;
|
||||
u8 op_code;
|
||||
bool error;
|
||||
bool error:1;
|
||||
};
|
||||
|
||||
struct irdma_qos_tc_info {
|
||||
@ -751,7 +752,7 @@ struct irdma_vsi_init_info {
|
||||
struct irdma_vsi_stats_info {
|
||||
struct irdma_vsi_pestat *pestat;
|
||||
u8 fcn_id;
|
||||
bool alloc_stats_inst;
|
||||
bool alloc_stats_inst:1;
|
||||
};
|
||||
|
||||
struct irdma_device_init_info {
|
||||
@ -789,7 +790,7 @@ struct irdma_aeq_init_info {
|
||||
u32 *aeqe_base;
|
||||
void *pbl_list;
|
||||
u32 elem_cnt;
|
||||
bool virtual_map;
|
||||
bool virtual_map:1;
|
||||
u8 pbl_chunk_size;
|
||||
u32 first_pm_pbl_idx;
|
||||
u32 msix_idx;
|
||||
@ -856,7 +857,6 @@ struct irdma_roce_offload_info {
|
||||
bool dcqcn_en:1;
|
||||
bool rcv_no_icrc:1;
|
||||
bool wr_rdresp_en:1;
|
||||
bool bind_en:1;
|
||||
bool fast_reg_en:1;
|
||||
bool priv_mode_en:1;
|
||||
bool rd_en:1;
|
||||
@ -888,7 +888,6 @@ struct irdma_iwarp_offload_info {
|
||||
bool snd_mark_en:1;
|
||||
bool rcv_mark_en:1;
|
||||
bool wr_rdresp_en:1;
|
||||
bool bind_en:1;
|
||||
bool fast_reg_en:1;
|
||||
bool priv_mode_en:1;
|
||||
bool rd_en:1;
|
||||
@ -1130,12 +1129,12 @@ struct irdma_add_arp_cache_entry_info {
|
||||
u8 mac_addr[ETHER_ADDR_LEN];
|
||||
u32 reach_max;
|
||||
u16 arp_index;
|
||||
bool permanent;
|
||||
bool permanent:1;
|
||||
};
|
||||
|
||||
struct irdma_apbvt_info {
|
||||
u16 port;
|
||||
bool add;
|
||||
bool add:1;
|
||||
};
|
||||
|
||||
struct irdma_qhash_table_info {
|
||||
|
@ -1,7 +1,7 @@
|
||||
/*-
|
||||
* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
|
||||
*
|
||||
* Copyright (c) 2016 - 2022 Intel Corporation
|
||||
* Copyright (c) 2016 - 2023 Intel Corporation
|
||||
*
|
||||
* This software is available to you under a choice of one of two
|
||||
* licenses. You may choose to be licensed under the terms of the GNU
|
||||
|
@ -1,7 +1,7 @@
|
||||
/*-
|
||||
* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
|
||||
*
|
||||
* Copyright (c) 2016 - 2021 Intel Corporation
|
||||
* Copyright (c) 2016 - 2023 Intel Corporation
|
||||
*
|
||||
* This software is available to you under a choice of one of two
|
||||
* licenses. You may choose to be licensed under the terms of the GNU
|
||||
@ -43,6 +43,7 @@ struct irdma_sc_cqp;
|
||||
|
||||
struct irdma_ah_info {
|
||||
struct irdma_sc_vsi *vsi;
|
||||
struct irdma_cqp_request *cqp_request;
|
||||
u32 pd_idx;
|
||||
u32 dst_arpindex;
|
||||
u32 dest_ip_addr[4];
|
||||
|
@ -45,16 +45,16 @@
|
||||
* @valid: The wqe valid
|
||||
*/
|
||||
static void
|
||||
irdma_set_fragment(__le64 * wqe, u32 offset, struct irdma_sge *sge,
|
||||
irdma_set_fragment(__le64 * wqe, u32 offset, struct ib_sge *sge,
|
||||
u8 valid)
|
||||
{
|
||||
if (sge) {
|
||||
set_64bit_val(wqe, offset,
|
||||
FIELD_PREP(IRDMAQPSQ_FRAG_TO, sge->tag_off));
|
||||
FIELD_PREP(IRDMAQPSQ_FRAG_TO, sge->addr));
|
||||
set_64bit_val(wqe, offset + IRDMA_BYTE_8,
|
||||
FIELD_PREP(IRDMAQPSQ_VALID, valid) |
|
||||
FIELD_PREP(IRDMAQPSQ_FRAG_LEN, sge->len) |
|
||||
FIELD_PREP(IRDMAQPSQ_FRAG_STAG, sge->stag));
|
||||
FIELD_PREP(IRDMAQPSQ_FRAG_LEN, sge->length) |
|
||||
FIELD_PREP(IRDMAQPSQ_FRAG_STAG, sge->lkey));
|
||||
} else {
|
||||
set_64bit_val(wqe, offset, 0);
|
||||
set_64bit_val(wqe, offset + IRDMA_BYTE_8,
|
||||
@ -71,14 +71,14 @@ irdma_set_fragment(__le64 * wqe, u32 offset, struct irdma_sge *sge,
|
||||
*/
|
||||
static void
|
||||
irdma_set_fragment_gen_1(__le64 * wqe, u32 offset,
|
||||
struct irdma_sge *sge, u8 valid)
|
||||
struct ib_sge *sge, u8 valid)
|
||||
{
|
||||
if (sge) {
|
||||
set_64bit_val(wqe, offset,
|
||||
FIELD_PREP(IRDMAQPSQ_FRAG_TO, sge->tag_off));
|
||||
FIELD_PREP(IRDMAQPSQ_FRAG_TO, sge->addr));
|
||||
set_64bit_val(wqe, offset + IRDMA_BYTE_8,
|
||||
FIELD_PREP(IRDMAQPSQ_GEN1_FRAG_LEN, sge->len) |
|
||||
FIELD_PREP(IRDMAQPSQ_GEN1_FRAG_STAG, sge->stag));
|
||||
FIELD_PREP(IRDMAQPSQ_GEN1_FRAG_LEN, sge->length) |
|
||||
FIELD_PREP(IRDMAQPSQ_GEN1_FRAG_STAG, sge->lkey));
|
||||
} else {
|
||||
set_64bit_val(wqe, offset, 0);
|
||||
set_64bit_val(wqe, offset + IRDMA_BYTE_8, 0);
|
||||
@ -209,8 +209,7 @@ irdma_qp_push_wqe(struct irdma_qp_uk *qp, __le64 * wqe, u16 quanta,
|
||||
if (IRDMA_RING_CURRENT_HEAD(qp->initial_ring) !=
|
||||
IRDMA_RING_CURRENT_TAIL(qp->sq_ring) &&
|
||||
!qp->push_mode) {
|
||||
if (post_sq)
|
||||
irdma_uk_qp_post_wr(qp);
|
||||
irdma_uk_qp_post_wr(qp);
|
||||
} else {
|
||||
push = (__le64 *) ((uintptr_t)qp->push_wqe +
|
||||
(wqe_idx & 0x7) * 0x20);
|
||||
@ -338,7 +337,7 @@ irdma_uk_rdma_write(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info,
|
||||
return -EINVAL;
|
||||
|
||||
for (i = 0; i < op_info->num_lo_sges; i++)
|
||||
total_size += op_info->lo_sg_list[i].len;
|
||||
total_size += op_info->lo_sg_list[i].length;
|
||||
|
||||
read_fence |= info->read_fence;
|
||||
|
||||
@ -357,7 +356,7 @@ irdma_uk_rdma_write(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info,
|
||||
|
||||
qp->sq_wrtrk_array[wqe_idx].signaled = info->signaled;
|
||||
set_64bit_val(wqe, IRDMA_BYTE_16,
|
||||
FIELD_PREP(IRDMAQPSQ_FRAG_TO, op_info->rem_addr.tag_off));
|
||||
FIELD_PREP(IRDMAQPSQ_FRAG_TO, op_info->rem_addr.addr));
|
||||
|
||||
if (info->imm_data_valid) {
|
||||
set_64bit_val(wqe, IRDMA_BYTE_0,
|
||||
@ -386,7 +385,7 @@ irdma_uk_rdma_write(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info,
|
||||
++addl_frag_cnt;
|
||||
}
|
||||
|
||||
hdr = FIELD_PREP(IRDMAQPSQ_REMSTAG, op_info->rem_addr.stag) |
|
||||
hdr = FIELD_PREP(IRDMAQPSQ_REMSTAG, op_info->rem_addr.lkey) |
|
||||
FIELD_PREP(IRDMAQPSQ_OPCODE, info->op_type) |
|
||||
FIELD_PREP(IRDMAQPSQ_IMMDATAFLAG, info->imm_data_valid) |
|
||||
FIELD_PREP(IRDMAQPSQ_REPORTRTT, info->report_rtt) |
|
||||
@ -437,7 +436,7 @@ irdma_uk_rdma_read(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info,
|
||||
return -EINVAL;
|
||||
|
||||
for (i = 0; i < op_info->num_lo_sges; i++)
|
||||
total_size += op_info->lo_sg_list[i].len;
|
||||
total_size += op_info->lo_sg_list[i].length;
|
||||
|
||||
ret_code = irdma_fragcnt_to_quanta_sq(op_info->num_lo_sges, &quanta);
|
||||
if (ret_code)
|
||||
@ -475,8 +474,8 @@ irdma_uk_rdma_read(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info,
|
||||
++addl_frag_cnt;
|
||||
}
|
||||
set_64bit_val(wqe, IRDMA_BYTE_16,
|
||||
FIELD_PREP(IRDMAQPSQ_FRAG_TO, op_info->rem_addr.tag_off));
|
||||
hdr = FIELD_PREP(IRDMAQPSQ_REMSTAG, op_info->rem_addr.stag) |
|
||||
FIELD_PREP(IRDMAQPSQ_FRAG_TO, op_info->rem_addr.addr));
|
||||
hdr = FIELD_PREP(IRDMAQPSQ_REMSTAG, op_info->rem_addr.lkey) |
|
||||
FIELD_PREP(IRDMAQPSQ_REPORTRTT, (info->report_rtt ? 1 : 0)) |
|
||||
FIELD_PREP(IRDMAQPSQ_ADDFRAGCNT, addl_frag_cnt) |
|
||||
FIELD_PREP(IRDMAQPSQ_OPCODE,
|
||||
@ -525,7 +524,7 @@ irdma_uk_send(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info,
|
||||
return -EINVAL;
|
||||
|
||||
for (i = 0; i < op_info->num_sges; i++)
|
||||
total_size += op_info->sg_list[i].len;
|
||||
total_size += op_info->sg_list[i].length;
|
||||
|
||||
if (info->imm_data_valid)
|
||||
frag_cnt = op_info->num_sges + 1;
|
||||
@ -604,15 +603,15 @@ irdma_uk_send(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info,
|
||||
* @polarity: compatibility parameter
|
||||
*/
|
||||
static void
|
||||
irdma_copy_inline_data_gen_1(u8 *wqe, struct irdma_sge *sge_list,
|
||||
irdma_copy_inline_data_gen_1(u8 *wqe, struct ib_sge *sge_list,
|
||||
u32 num_sges, u8 polarity)
|
||||
{
|
||||
u32 quanta_bytes_remaining = 16;
|
||||
u32 i;
|
||||
|
||||
for (i = 0; i < num_sges; i++) {
|
||||
u8 *cur_sge = (u8 *)(uintptr_t)sge_list[i].tag_off;
|
||||
u32 sge_len = sge_list[i].len;
|
||||
u8 *cur_sge = (u8 *)(uintptr_t)sge_list[i].addr;
|
||||
u32 sge_len = sge_list[i].length;
|
||||
|
||||
while (sge_len) {
|
||||
u32 bytes_copied;
|
||||
@ -651,7 +650,7 @@ static inline u16 irdma_inline_data_size_to_quanta_gen_1(u32 data_size) {
|
||||
* @polarity: polarity of wqe valid bit
|
||||
*/
|
||||
static void
|
||||
irdma_copy_inline_data(u8 *wqe, struct irdma_sge *sge_list,
|
||||
irdma_copy_inline_data(u8 *wqe, struct ib_sge *sge_list,
|
||||
u32 num_sges, u8 polarity)
|
||||
{
|
||||
u8 inline_valid = polarity << IRDMA_INLINE_VALID_S;
|
||||
@ -662,8 +661,8 @@ irdma_copy_inline_data(u8 *wqe, struct irdma_sge *sge_list,
|
||||
wqe += 8;
|
||||
|
||||
for (i = 0; i < num_sges; i++) {
|
||||
u8 *cur_sge = (u8 *)(uintptr_t)sge_list[i].tag_off;
|
||||
u32 sge_len = sge_list[i].len;
|
||||
u8 *cur_sge = (u8 *)(uintptr_t)sge_list[i].addr;
|
||||
u32 sge_len = sge_list[i].length;
|
||||
|
||||
while (sge_len) {
|
||||
u32 bytes_copied;
|
||||
@ -743,7 +742,7 @@ irdma_uk_inline_rdma_write(struct irdma_qp_uk *qp,
|
||||
return -EINVAL;
|
||||
|
||||
for (i = 0; i < op_info->num_lo_sges; i++)
|
||||
total_size += op_info->lo_sg_list[i].len;
|
||||
total_size += op_info->lo_sg_list[i].length;
|
||||
|
||||
if (unlikely(total_size > qp->max_inline_data))
|
||||
return -EINVAL;
|
||||
@ -756,9 +755,9 @@ irdma_uk_inline_rdma_write(struct irdma_qp_uk *qp,
|
||||
qp->sq_wrtrk_array[wqe_idx].signaled = info->signaled;
|
||||
read_fence |= info->read_fence;
|
||||
set_64bit_val(wqe, IRDMA_BYTE_16,
|
||||
FIELD_PREP(IRDMAQPSQ_FRAG_TO, op_info->rem_addr.tag_off));
|
||||
FIELD_PREP(IRDMAQPSQ_FRAG_TO, op_info->rem_addr.addr));
|
||||
|
||||
hdr = FIELD_PREP(IRDMAQPSQ_REMSTAG, op_info->rem_addr.stag) |
|
||||
hdr = FIELD_PREP(IRDMAQPSQ_REMSTAG, op_info->rem_addr.lkey) |
|
||||
FIELD_PREP(IRDMAQPSQ_OPCODE, info->op_type) |
|
||||
FIELD_PREP(IRDMAQPSQ_INLINEDATALEN, total_size) |
|
||||
FIELD_PREP(IRDMAQPSQ_REPORTRTT, info->report_rtt ? 1 : 0) |
|
||||
@ -814,7 +813,7 @@ irdma_uk_inline_send(struct irdma_qp_uk *qp,
|
||||
return -EINVAL;
|
||||
|
||||
for (i = 0; i < op_info->num_sges; i++)
|
||||
total_size += op_info->sg_list[i].len;
|
||||
total_size += op_info->sg_list[i].length;
|
||||
|
||||
if (unlikely(total_size > qp->max_inline_data))
|
||||
return -EINVAL;
|
||||
@ -879,7 +878,7 @@ irdma_uk_stag_local_invalidate(struct irdma_qp_uk *qp,
|
||||
u64 hdr;
|
||||
u32 wqe_idx;
|
||||
bool local_fence = false;
|
||||
struct irdma_sge sge = {0};
|
||||
struct ib_sge sge = {0};
|
||||
u16 quanta = IRDMA_QP_WQE_MIN_QUANTA;
|
||||
|
||||
info->push_wqe = qp->push_db ? true : false;
|
||||
@ -890,7 +889,7 @@ irdma_uk_stag_local_invalidate(struct irdma_qp_uk *qp,
|
||||
if (!wqe)
|
||||
return -ENOSPC;
|
||||
|
||||
sge.stag = op_info->target_stag;
|
||||
sge.lkey = op_info->target_stag;
|
||||
qp->wqe_ops.iw_set_fragment(wqe, IRDMA_BYTE_0, &sge, 0);
|
||||
|
||||
set_64bit_val(wqe, IRDMA_BYTE_16, 0);
|
||||
@ -1327,8 +1326,7 @@ irdma_uk_cq_poll_cmpl(struct irdma_cq_uk *cq,
|
||||
IRDMA_RING_MOVE_TAIL(cq->cq_ring);
|
||||
set_64bit_val(cq->shadow_area, IRDMA_BYTE_0,
|
||||
IRDMA_RING_CURRENT_HEAD(cq->cq_ring));
|
||||
memset(info, 0,
|
||||
sizeof(struct irdma_cq_poll_info));
|
||||
memset(info, 0, sizeof(*info));
|
||||
return irdma_uk_cq_poll_cmpl(cq, info);
|
||||
}
|
||||
}
|
||||
@ -1403,7 +1401,6 @@ exit:
|
||||
if (pring && IRDMA_RING_MORE_WORK(*pring))
|
||||
move_cq_head = false;
|
||||
}
|
||||
|
||||
if (move_cq_head) {
|
||||
IRDMA_RING_MOVE_HEAD_NOCHECK(cq->cq_ring);
|
||||
if (!IRDMA_RING_CURRENT_HEAD(cq->cq_ring))
|
||||
@ -1484,10 +1481,12 @@ irdma_get_wqe_shift(struct irdma_uk_attrs *uk_attrs, u32 sge,
|
||||
int
|
||||
irdma_get_sqdepth(struct irdma_uk_attrs *uk_attrs, u32 sq_size, u8 shift, u32 *sqdepth)
|
||||
{
|
||||
u32 min_size = (u32)uk_attrs->min_hw_wq_size << shift;
|
||||
|
||||
*sqdepth = irdma_round_up_wq((sq_size << shift) + IRDMA_SQ_RSVD);
|
||||
|
||||
if (*sqdepth < ((u32)uk_attrs->min_hw_wq_size << shift))
|
||||
*sqdepth = uk_attrs->min_hw_wq_size << shift;
|
||||
if (*sqdepth < min_size)
|
||||
*sqdepth = min_size;
|
||||
else if (*sqdepth > uk_attrs->max_hw_wq_quanta)
|
||||
return -EINVAL;
|
||||
|
||||
@ -1501,10 +1500,12 @@ irdma_get_sqdepth(struct irdma_uk_attrs *uk_attrs, u32 sq_size, u8 shift, u32 *s
|
||||
int
|
||||
irdma_get_rqdepth(struct irdma_uk_attrs *uk_attrs, u32 rq_size, u8 shift, u32 *rqdepth)
|
||||
{
|
||||
u32 min_size = (u32)uk_attrs->min_hw_wq_size << shift;
|
||||
|
||||
*rqdepth = irdma_round_up_wq((rq_size << shift) + IRDMA_RQ_RSVD);
|
||||
|
||||
if (*rqdepth < ((u32)uk_attrs->min_hw_wq_size << shift))
|
||||
*rqdepth = uk_attrs->min_hw_wq_size << shift;
|
||||
if (*rqdepth < min_size)
|
||||
*rqdepth = min_size;
|
||||
else if (*rqdepth > uk_attrs->max_hw_rq_quanta)
|
||||
return -EINVAL;
|
||||
|
||||
@ -1535,9 +1536,10 @@ irdma_setup_connection_wqes(struct irdma_qp_uk *qp,
|
||||
{
|
||||
u16 move_cnt = 1;
|
||||
|
||||
if (qp->uk_attrs->feature_flags & IRDMA_FEATURE_RTS_AE)
|
||||
if (info->start_wqe_idx)
|
||||
move_cnt = info->start_wqe_idx;
|
||||
else if (qp->uk_attrs->feature_flags & IRDMA_FEATURE_RTS_AE)
|
||||
move_cnt = 3;
|
||||
|
||||
qp->conn_wqes = move_cnt;
|
||||
IRDMA_RING_MOVE_HEAD_BY_COUNT_NOCHECK(qp->sq_ring, move_cnt);
|
||||
IRDMA_RING_MOVE_TAIL_BY_COUNT(qp->sq_ring, move_cnt);
|
||||
@ -1677,6 +1679,8 @@ irdma_uk_qp_init(struct irdma_qp_uk *qp, struct irdma_qp_uk_init_info *info)
|
||||
qp->wqe_ops = iw_wqe_uk_ops_gen_1;
|
||||
else
|
||||
qp->wqe_ops = iw_wqe_uk_ops;
|
||||
qp->start_wqe_idx = info->start_wqe_idx;
|
||||
|
||||
return ret_code;
|
||||
}
|
||||
|
||||
|
@ -1,7 +1,7 @@
|
||||
/*-
|
||||
* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
|
||||
*
|
||||
* Copyright (c) 2015 - 2022 Intel Corporation
|
||||
* Copyright (c) 2015 - 2023 Intel Corporation
|
||||
*
|
||||
* This software is available to you under a choice of one of two
|
||||
* licenses. You may choose to be licensed under the terms of the GNU
|
||||
@ -35,6 +35,8 @@
|
||||
#ifndef IRDMA_USER_H
|
||||
#define IRDMA_USER_H
|
||||
|
||||
#include <rdma/ib_verbs.h>
|
||||
|
||||
#define irdma_handle void *
|
||||
#define irdma_adapter_handle irdma_handle
|
||||
#define irdma_qp_handle irdma_handle
|
||||
@ -48,7 +50,7 @@
|
||||
#define irdma_access_privileges u32
|
||||
#define irdma_physical_fragment u64
|
||||
#define irdma_address_list u64 *
|
||||
#define irdma_sgl struct irdma_sge *
|
||||
#define irdma_sgl struct ib_sge *
|
||||
|
||||
#define IRDMA_MAX_MR_SIZE 0x200000000000ULL
|
||||
|
||||
@ -78,8 +80,6 @@
|
||||
#define IRDMA_OP_TYPE_REC_IMM 0x3f
|
||||
|
||||
#define IRDMA_FLUSH_MAJOR_ERR 1
|
||||
#define IRDMA_SRQFLUSH_RSVD_MAJOR_ERR 0xfffe
|
||||
|
||||
/* Async Events codes */
|
||||
#define IRDMA_AE_AMP_UNALLOCATED_STAG 0x0102
|
||||
#define IRDMA_AE_AMP_INVALID_STAG 0x0103
|
||||
@ -140,6 +140,7 @@
|
||||
#define IRDMA_AE_ROE_INVALID_RDMA_READ_REQUEST 0x0313
|
||||
#define IRDMA_AE_ROE_INVALID_RDMA_WRITE_OR_READ_RESP 0x0314
|
||||
#define IRDMA_AE_ROCE_RSP_LENGTH_ERROR 0x0316
|
||||
#define IRDMA_AE_ROCE_REQ_LENGTH_ERROR 0x0318
|
||||
#define IRDMA_AE_ROCE_EMPTY_MCG 0x0380
|
||||
#define IRDMA_AE_ROCE_BAD_MC_IP_ADDR 0x0381
|
||||
#define IRDMA_AE_ROCE_BAD_MC_QPID 0x0382
|
||||
@ -160,6 +161,7 @@
|
||||
#define IRDMA_AE_LLP_TOO_MANY_KEEPALIVE_RETRIES 0x050b
|
||||
#define IRDMA_AE_LLP_DOUBT_REACHABILITY 0x050c
|
||||
#define IRDMA_AE_LLP_CONNECTION_ESTABLISHED 0x050e
|
||||
#define IRDMA_AE_LLP_TOO_MANY_RNRS 0x050f
|
||||
#define IRDMA_AE_RESOURCE_EXHAUSTION 0x0520
|
||||
#define IRDMA_AE_RESET_SENT 0x0601
|
||||
#define IRDMA_AE_TERMINATE_SENT 0x0602
|
||||
@ -199,8 +201,7 @@ enum irdma_device_caps_const {
|
||||
IRDMA_MAX_OUTBOUND_MSG_SIZE = 65537,
|
||||
/* 64K +1 */
|
||||
IRDMA_MAX_INBOUND_MSG_SIZE = 65537,
|
||||
IRDMA_MAX_PUSH_PAGE_COUNT = 1024,
|
||||
IRDMA_MAX_PE_ENA_VF_COUNT = 32,
|
||||
IRDMA_MAX_PE_ENA_VF_COUNT = 32,
|
||||
IRDMA_MAX_VF_FPM_ID = 47,
|
||||
IRDMA_MAX_SQ_PAYLOAD_SIZE = 2145386496,
|
||||
IRDMA_MAX_INLINE_DATA_SIZE = 101,
|
||||
@ -227,6 +228,7 @@ enum irdma_flush_opcode {
|
||||
FLUSH_RETRY_EXC_ERR,
|
||||
FLUSH_MW_BIND_ERR,
|
||||
FLUSH_REM_INV_REQ_ERR,
|
||||
FLUSH_RNR_RETRY_EXC_ERR,
|
||||
};
|
||||
|
||||
enum irdma_qp_event_type {
|
||||
@ -280,12 +282,6 @@ struct irdma_cq_uk;
|
||||
struct irdma_qp_uk_init_info;
|
||||
struct irdma_cq_uk_init_info;
|
||||
|
||||
struct irdma_sge {
|
||||
irdma_tagged_offset tag_off;
|
||||
u32 len;
|
||||
irdma_stag stag;
|
||||
};
|
||||
|
||||
struct irdma_ring {
|
||||
volatile u32 head;
|
||||
volatile u32 tail; /* effective tail */
|
||||
@ -317,13 +313,13 @@ struct irdma_post_rq_info {
|
||||
struct irdma_rdma_write {
|
||||
irdma_sgl lo_sg_list;
|
||||
u32 num_lo_sges;
|
||||
struct irdma_sge rem_addr;
|
||||
struct ib_sge rem_addr;
|
||||
};
|
||||
|
||||
struct irdma_rdma_read {
|
||||
irdma_sgl lo_sg_list;
|
||||
u32 num_lo_sges;
|
||||
struct irdma_sge rem_addr;
|
||||
struct ib_sge rem_addr;
|
||||
};
|
||||
|
||||
struct irdma_bind_window {
|
||||
@ -422,9 +418,9 @@ int irdma_uk_stag_local_invalidate(struct irdma_qp_uk *qp,
|
||||
bool post_sq);
|
||||
|
||||
struct irdma_wqe_uk_ops {
|
||||
void (*iw_copy_inline_data)(u8 *dest, struct irdma_sge *sge_list, u32 num_sges, u8 polarity);
|
||||
void (*iw_copy_inline_data)(u8 *dest, struct ib_sge *sge_list, u32 num_sges, u8 polarity);
|
||||
u16 (*iw_inline_data_size_to_quanta)(u32 data_size);
|
||||
void (*iw_set_fragment)(__le64 *wqe, u32 offset, struct irdma_sge *sge,
|
||||
void (*iw_set_fragment)(__le64 *wqe, u32 offset, struct ib_sge *sge,
|
||||
u8 valid);
|
||||
void (*iw_set_mw_bind_wqe)(__le64 *wqe,
|
||||
struct irdma_bind_window *op_info);
|
||||
@ -490,6 +486,7 @@ struct irdma_qp_uk {
|
||||
u8 rwqe_polarity;
|
||||
u8 rq_wqe_size;
|
||||
u8 rq_wqe_size_multiplier;
|
||||
u8 start_wqe_idx;
|
||||
bool deferred_flag:1;
|
||||
bool push_mode:1; /* whether the last post wqe was pushed */
|
||||
bool push_dropped:1;
|
||||
@ -537,6 +534,7 @@ struct irdma_qp_uk_init_info {
|
||||
u32 sq_depth;
|
||||
u32 rq_depth;
|
||||
u8 first_sq_wq;
|
||||
u8 start_wqe_idx;
|
||||
u8 type;
|
||||
u8 sq_shift;
|
||||
u8 rq_shift;
|
||||
@ -625,10 +623,15 @@ static inline struct qp_err_code irdma_ae_to_qp_err_code(u16 ae_id)
|
||||
case IRDMA_AE_LLP_SEGMENT_TOO_SMALL:
|
||||
case IRDMA_AE_LLP_RECEIVED_MPA_CRC_ERROR:
|
||||
case IRDMA_AE_ROCE_RSP_LENGTH_ERROR:
|
||||
case IRDMA_AE_ROCE_REQ_LENGTH_ERROR:
|
||||
case IRDMA_AE_IB_REMOTE_OP_ERROR:
|
||||
qp_err.flush_code = FLUSH_REM_OP_ERR;
|
||||
qp_err.event_type = IRDMA_QP_EVENT_CATASTROPHIC;
|
||||
break;
|
||||
case IRDMA_AE_LLP_TOO_MANY_RNRS:
|
||||
qp_err.flush_code = FLUSH_RNR_RETRY_EXC_ERR;
|
||||
qp_err.event_type = IRDMA_QP_EVENT_CATASTROPHIC;
|
||||
break;
|
||||
case IRDMA_AE_LCE_QP_CATASTROPHIC:
|
||||
qp_err.flush_code = FLUSH_FATAL_ERR;
|
||||
qp_err.event_type = IRDMA_QP_EVENT_CATASTROPHIC;
|
||||
|
@ -1,7 +1,7 @@
|
||||
/*-
|
||||
* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
|
||||
*
|
||||
* Copyright (c) 2015 - 2022 Intel Corporation
|
||||
* Copyright (c) 2015 - 2023 Intel Corporation
|
||||
*
|
||||
* This software is available to you under a choice of one of two
|
||||
* licenses. You may choose to be licensed under the terms of the GNU
|
||||
@ -164,6 +164,7 @@ static const struct ae_desc ae_desc_list[] = {
|
||||
"Connection error: Doubt reachability (usually occurs after the max number of retries has been reached)"},
|
||||
{IRDMA_AE_LLP_CONNECTION_ESTABLISHED,
|
||||
"iWARP event: Connection established"},
|
||||
{IRDMA_AE_LLP_TOO_MANY_RNRS, "RoCEv2: Too many RNR NACKs"},
|
||||
{IRDMA_AE_RESOURCE_EXHAUSTION,
|
||||
"QP error: Resource exhaustion"},
|
||||
{IRDMA_AE_RESET_SENT,
|
||||
@ -437,11 +438,11 @@ static void
|
||||
irdma_free_pending_cqp_request(struct irdma_cqp *cqp,
|
||||
struct irdma_cqp_request *cqp_request)
|
||||
{
|
||||
if (cqp_request->waiting) {
|
||||
cqp_request->compl_info.error = true;
|
||||
WRITE_ONCE(cqp_request->request_done, true);
|
||||
cqp_request->compl_info.error = true;
|
||||
WRITE_ONCE(cqp_request->request_done, true);
|
||||
|
||||
if (cqp_request->waiting)
|
||||
wake_up(&cqp_request->waitq);
|
||||
}
|
||||
wait_event_timeout(cqp->remove_wq,
|
||||
atomic_read(&cqp_request->refcnt) == 1, 1000);
|
||||
irdma_put_cqp_request(cqp, cqp_request);
|
||||
@ -558,8 +559,6 @@ static const char *const irdma_cqp_cmd_names[IRDMA_MAX_CQP_OPS] = {
|
||||
[IRDMA_OP_MANAGE_HMC_PM_FUNC_TABLE] = "Manage HMC PM Function Table Cmd",
|
||||
[IRDMA_OP_SUSPEND] = "Suspend QP Cmd",
|
||||
[IRDMA_OP_RESUME] = "Resume QP Cmd",
|
||||
[IRDMA_OP_MANAGE_VCHNL_REQ_PBLE_BP] =
|
||||
"Manage Virtual Channel Requester Function PBLE Backing Pages Cmd",
|
||||
[IRDMA_OP_QUERY_FPM_VAL] = "Query FPM Values Cmd",
|
||||
[IRDMA_OP_COMMIT_FPM_VAL] = "Commit FPM Values Cmd",
|
||||
[IRDMA_OP_AH_CREATE] = "Create Address Handle Cmd",
|
||||
@ -591,7 +590,7 @@ static const struct irdma_cqp_err_info irdma_noncrit_err_list[] = {
|
||||
{0xffff, 0x8007, "Modify QP Bad Close"},
|
||||
{0xffff, 0x8009, "LLP Closed"},
|
||||
{0xffff, 0x800a, "Reset Not Sent"},
|
||||
{0xffff, 0x200, "Failover Pending"}
|
||||
{0xffff, 0x0200, "Failover Pending"},
|
||||
};
|
||||
|
||||
/**
|
||||
@ -1055,15 +1054,16 @@ irdma_cqp_qp_create_cmd(struct irdma_sc_dev *dev, struct irdma_sc_qp *qp)
|
||||
/**
|
||||
* irdma_dealloc_push_page - free a push page for qp
|
||||
* @rf: RDMA PCI function
|
||||
* @qp: hardware control qp
|
||||
* @iwqp: QP pointer
|
||||
*/
|
||||
void
|
||||
irdma_dealloc_push_page(struct irdma_pci_f *rf,
|
||||
struct irdma_sc_qp *qp)
|
||||
struct irdma_qp *iwqp)
|
||||
{
|
||||
struct irdma_cqp_request *cqp_request;
|
||||
struct cqp_cmds_info *cqp_info;
|
||||
int status;
|
||||
struct irdma_sc_qp *qp = &iwqp->sc_qp;
|
||||
|
||||
if (qp->push_idx == IRDMA_INVALID_PUSH_PAGE_INDEX)
|
||||
return;
|
||||
@ -1564,16 +1564,6 @@ irdma_hw_stats_stop_timer(struct irdma_sc_vsi *vsi)
|
||||
del_timer_sync(&devstat->stats_timer);
|
||||
}
|
||||
|
||||
/**
|
||||
* irdma_process_stats - Checking for wrap and update stats
|
||||
* @pestat: stats structure pointer
|
||||
*/
|
||||
static inline void
|
||||
irdma_process_stats(struct irdma_vsi_pestat *pestat)
|
||||
{
|
||||
sc_vsi_update_stats(pestat->vsi);
|
||||
}
|
||||
|
||||
/**
|
||||
* irdma_process_cqp_stats - Checking for wrap and update stats
|
||||
* @cqp_request: cqp_request structure pointer
|
||||
@ -1583,7 +1573,7 @@ irdma_process_cqp_stats(struct irdma_cqp_request *cqp_request)
|
||||
{
|
||||
struct irdma_vsi_pestat *pestat = cqp_request->param;
|
||||
|
||||
irdma_process_stats(pestat);
|
||||
sc_vsi_update_stats(pestat->vsi);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -1619,7 +1609,7 @@ irdma_cqp_gather_stats_cmd(struct irdma_sc_dev *dev,
|
||||
cqp_request->callback_fcn = irdma_process_cqp_stats;
|
||||
status = irdma_handle_cqp_op(rf, cqp_request);
|
||||
if (wait)
|
||||
irdma_process_stats(pestat);
|
||||
sc_vsi_update_stats(pestat->vsi);
|
||||
irdma_put_cqp_request(&rf->cqp, cqp_request);
|
||||
|
||||
return status;
|
||||
@ -1814,6 +1804,10 @@ irdma_ah_cqp_op(struct irdma_pci_f *rf, struct irdma_sc_ah *sc_ah, u8 cmd,
|
||||
cqp_info->cqp_cmd = cmd;
|
||||
cqp_info->post_sq = 1;
|
||||
if (cmd == IRDMA_OP_AH_CREATE) {
|
||||
if (!wait)
|
||||
irdma_get_cqp_request(cqp_request);
|
||||
sc_ah->ah_info.cqp_request = cqp_request;
|
||||
|
||||
cqp_info->in.u.ah_create.info = sc_ah->ah_info;
|
||||
cqp_info->in.u.ah_create.scratch = (uintptr_t)cqp_request;
|
||||
cqp_info->in.u.ah_create.cqp = &rf->cqp.sc_cqp;
|
||||
@ -1948,21 +1942,6 @@ irdma_puda_free_ah(struct irdma_sc_dev *dev, struct irdma_sc_ah *ah)
|
||||
kfree(ah);
|
||||
}
|
||||
|
||||
/**
|
||||
* irdma_gsi_ud_qp_ah_cb - callback after creation of AH for GSI/ID QP
|
||||
* @cqp_request: pointer to cqp_request of create AH
|
||||
*/
|
||||
void
|
||||
irdma_gsi_ud_qp_ah_cb(struct irdma_cqp_request *cqp_request)
|
||||
{
|
||||
struct irdma_sc_ah *sc_ah = cqp_request->param;
|
||||
|
||||
if (!cqp_request->compl_info.op_ret_val)
|
||||
sc_ah->ah_info.ah_valid = true;
|
||||
else
|
||||
sc_ah->ah_info.ah_valid = false;
|
||||
}
|
||||
|
||||
/**
|
||||
* irdma_prm_add_pble_mem - add moemory to pble resources
|
||||
* @pprm: pble resource manager
|
||||
|
@ -72,7 +72,6 @@ irdma_query_device(struct ib_device *ibdev,
|
||||
props->max_cq = rf->max_cq - rf->used_cqs;
|
||||
props->max_cqe = rf->max_cqe - 1;
|
||||
props->max_mr = rf->max_mr - rf->used_mrs;
|
||||
props->max_mw = props->max_mr;
|
||||
props->max_pd = rf->max_pd - rf->used_pds;
|
||||
props->max_sge_rd = hw_attrs->uk_attrs.max_hw_read_sges;
|
||||
props->max_qp_rd_atom = hw_attrs->max_hw_ird;
|
||||
@ -500,6 +499,9 @@ irdma_setup_umode_qp(struct ib_udata *udata,
|
||||
ukinfo->sq_size = ukinfo->sq_depth >> ukinfo->sq_shift;
|
||||
ukinfo->rq_size = ukinfo->rq_depth >> ukinfo->rq_shift;
|
||||
}
|
||||
if (req.comp_mask & IRDMA_CREATE_QP_USE_START_WQE_IDX &&
|
||||
iwdev->rf->sc_dev.hw_attrs.uk_attrs.feature_flags & IRDMA_FEATURE_RTS_AE)
|
||||
ukinfo->start_wqe_idx = 4;
|
||||
irdma_setup_virt_qp(iwdev, iwqp, info);
|
||||
|
||||
return 0;
|
||||
@ -642,7 +644,6 @@ irdma_roce_fill_and_set_qpctx_info(struct irdma_qp *iwqp,
|
||||
|
||||
roce_info->rd_en = true;
|
||||
roce_info->wr_rdresp_en = true;
|
||||
roce_info->bind_en = true;
|
||||
roce_info->dcqcn_en = false;
|
||||
roce_info->rtomin = iwdev->roce_rtomin;
|
||||
|
||||
@ -674,7 +675,6 @@ irdma_iw_fill_and_set_qpctx_info(struct irdma_qp *iwqp,
|
||||
ether_addr_copy(iwarp_info->mac_addr, if_getlladdr(iwdev->netdev));
|
||||
iwarp_info->rd_en = true;
|
||||
iwarp_info->wr_rdresp_en = true;
|
||||
iwarp_info->bind_en = true;
|
||||
iwarp_info->ecn_en = true;
|
||||
iwarp_info->rtomin = 5;
|
||||
|
||||
@ -705,6 +705,8 @@ irdma_validate_qp_attrs(struct ib_qp_init_attr *init_attr,
|
||||
|
||||
if (init_attr->cap.max_inline_data > uk_attrs->max_hw_inline ||
|
||||
init_attr->cap.max_send_sge > uk_attrs->max_hw_wq_frags ||
|
||||
init_attr->cap.max_send_wr > uk_attrs->max_hw_wq_quanta ||
|
||||
init_attr->cap.max_recv_wr > uk_attrs->max_hw_rq_quanta ||
|
||||
init_attr->cap.max_recv_sge > uk_attrs->max_hw_wq_frags)
|
||||
return -EINVAL;
|
||||
|
||||
@ -759,8 +761,6 @@ irdma_get_ib_acc_flags(struct irdma_qp *iwqp)
|
||||
}
|
||||
if (iwqp->roce_info.rd_en)
|
||||
acc_flags |= IB_ACCESS_REMOTE_READ;
|
||||
if (iwqp->roce_info.bind_en)
|
||||
acc_flags |= IB_ACCESS_MW_BIND;
|
||||
} else {
|
||||
if (iwqp->iwarp_info.wr_rdresp_en) {
|
||||
acc_flags |= IB_ACCESS_LOCAL_WRITE;
|
||||
@ -768,8 +768,6 @@ irdma_get_ib_acc_flags(struct irdma_qp *iwqp)
|
||||
}
|
||||
if (iwqp->iwarp_info.rd_en)
|
||||
acc_flags |= IB_ACCESS_REMOTE_READ;
|
||||
if (iwqp->iwarp_info.bind_en)
|
||||
acc_flags |= IB_ACCESS_MW_BIND;
|
||||
}
|
||||
return acc_flags;
|
||||
}
|
||||
@ -1181,7 +1179,8 @@ irdma_modify_qp_roce(struct ib_qp *ibqp, struct ib_qp_attr *attr,
|
||||
udata->outlen));
|
||||
if (ret) {
|
||||
irdma_remove_push_mmap_entries(iwqp);
|
||||
irdma_debug(&iwdev->rf->sc_dev, IRDMA_DEBUG_VERBS,
|
||||
irdma_debug(&iwdev->rf->sc_dev,
|
||||
IRDMA_DEBUG_VERBS,
|
||||
"copy_to_udata failed\n");
|
||||
return ret;
|
||||
}
|
||||
@ -1647,8 +1646,10 @@ error:
|
||||
/**
|
||||
* irdma_get_mr_access - get hw MR access permissions from IB access flags
|
||||
* @access: IB access flags
|
||||
* @hw_rev: Hardware version
|
||||
*/
|
||||
static inline u16 irdma_get_mr_access(int access){
|
||||
static inline u16 irdma_get_mr_access(int access, u8 hw_rev)
|
||||
{
|
||||
u16 hw_access = 0;
|
||||
|
||||
hw_access |= (access & IB_ACCESS_LOCAL_WRITE) ?
|
||||
@ -1657,8 +1658,6 @@ static inline u16 irdma_get_mr_access(int access){
|
||||
IRDMA_ACCESS_FLAGS_REMOTEWRITE : 0;
|
||||
hw_access |= (access & IB_ACCESS_REMOTE_READ) ?
|
||||
IRDMA_ACCESS_FLAGS_REMOTEREAD : 0;
|
||||
hw_access |= (access & IB_ACCESS_MW_BIND) ?
|
||||
IRDMA_ACCESS_FLAGS_BIND_WINDOW : 0;
|
||||
hw_access |= (access & IB_ZERO_BASED) ?
|
||||
IRDMA_ACCESS_FLAGS_ZERO_BASED : 0;
|
||||
hw_access |= IRDMA_ACCESS_FLAGS_LOCALREAD;
|
||||
@ -1893,81 +1892,6 @@ irdma_handle_q_mem(struct irdma_device *iwdev,
|
||||
return err;
|
||||
}
|
||||
|
||||
/**
|
||||
* irdma_hw_alloc_mw - create the hw memory window
|
||||
* @iwdev: irdma device
|
||||
* @iwmr: pointer to memory window info
|
||||
*/
|
||||
int
|
||||
irdma_hw_alloc_mw(struct irdma_device *iwdev, struct irdma_mr *iwmr)
|
||||
{
|
||||
struct irdma_mw_alloc_info *info;
|
||||
struct irdma_pd *iwpd = to_iwpd(iwmr->ibmr.pd);
|
||||
struct irdma_cqp_request *cqp_request;
|
||||
struct cqp_cmds_info *cqp_info;
|
||||
int status;
|
||||
|
||||
cqp_request = irdma_alloc_and_get_cqp_request(&iwdev->rf->cqp, true);
|
||||
if (!cqp_request)
|
||||
return -ENOMEM;
|
||||
|
||||
cqp_info = &cqp_request->info;
|
||||
info = &cqp_info->in.u.mw_alloc.info;
|
||||
memset(info, 0, sizeof(*info));
|
||||
if (iwmr->ibmw.type == IB_MW_TYPE_1)
|
||||
info->mw_wide = true;
|
||||
|
||||
info->page_size = PAGE_SIZE;
|
||||
info->mw_stag_index = iwmr->stag >> IRDMA_CQPSQ_STAG_IDX_S;
|
||||
info->pd_id = iwpd->sc_pd.pd_id;
|
||||
info->remote_access = true;
|
||||
cqp_info->cqp_cmd = IRDMA_OP_MW_ALLOC;
|
||||
cqp_info->post_sq = 1;
|
||||
cqp_info->in.u.mw_alloc.dev = &iwdev->rf->sc_dev;
|
||||
cqp_info->in.u.mw_alloc.scratch = (uintptr_t)cqp_request;
|
||||
status = irdma_handle_cqp_op(iwdev->rf, cqp_request);
|
||||
irdma_put_cqp_request(&iwdev->rf->cqp, cqp_request);
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
/**
|
||||
* irdma_dealloc_mw - Dealloc memory window
|
||||
* @ibmw: memory window structure.
|
||||
*/
|
||||
static int
|
||||
irdma_dealloc_mw(struct ib_mw *ibmw)
|
||||
{
|
||||
struct ib_pd *ibpd = ibmw->pd;
|
||||
struct irdma_pd *iwpd = to_iwpd(ibpd);
|
||||
struct irdma_mr *iwmr = to_iwmr((struct ib_mr *)ibmw);
|
||||
struct irdma_device *iwdev = to_iwdev(ibmw->device);
|
||||
struct irdma_cqp_request *cqp_request;
|
||||
struct cqp_cmds_info *cqp_info;
|
||||
struct irdma_dealloc_stag_info *info;
|
||||
|
||||
cqp_request = irdma_alloc_and_get_cqp_request(&iwdev->rf->cqp, true);
|
||||
if (!cqp_request)
|
||||
return -ENOMEM;
|
||||
|
||||
cqp_info = &cqp_request->info;
|
||||
info = &cqp_info->in.u.dealloc_stag.info;
|
||||
memset(info, 0, sizeof(*info));
|
||||
info->pd_id = iwpd->sc_pd.pd_id;
|
||||
info->stag_idx = RS_64_1(ibmw->rkey, IRDMA_CQPSQ_STAG_IDX_S);
|
||||
info->mr = false;
|
||||
cqp_info->cqp_cmd = IRDMA_OP_DEALLOC_STAG;
|
||||
cqp_info->post_sq = 1;
|
||||
cqp_info->in.u.dealloc_stag.dev = &iwdev->rf->sc_dev;
|
||||
cqp_info->in.u.dealloc_stag.scratch = (uintptr_t)cqp_request;
|
||||
irdma_handle_cqp_op(iwdev->rf, cqp_request);
|
||||
irdma_put_cqp_request(&iwdev->rf->cqp, cqp_request);
|
||||
irdma_free_stag(iwdev, iwmr->stag);
|
||||
kfree(iwmr);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* irdma_hw_alloc_stag - cqp command to allocate stag
|
||||
* @iwdev: irdma device
|
||||
@ -2088,7 +2012,8 @@ irdma_hwreg_mr(struct irdma_device *iwdev, struct irdma_mr *iwmr,
|
||||
stag_info->stag_key = (u8)iwmr->stag;
|
||||
stag_info->total_len = iwmr->len;
|
||||
stag_info->all_memory = (pd->flags & IB_PD_UNSAFE_GLOBAL_RKEY) ? true : false;
|
||||
stag_info->access_rights = irdma_get_mr_access(access);
|
||||
stag_info->access_rights = irdma_get_mr_access(access,
|
||||
iwdev->rf->sc_dev.hw_attrs.uk_attrs.hw_rev);
|
||||
stag_info->pd_id = iwpd->sc_pd.pd_id;
|
||||
if (stag_info->access_rights & IRDMA_ACCESS_FLAGS_ZERO_BASED)
|
||||
stag_info->addr_type = IRDMA_ADDR_TYPE_ZERO_BASED;
|
||||
@ -2162,14 +2087,18 @@ irdma_free_iwmr(struct irdma_mr *iwmr)
|
||||
}
|
||||
|
||||
/*
|
||||
* irdma_reg_user_mr_type_mem - Handle memory registration @iwmr - irdma mr @access - access rights
|
||||
* irdma_reg_user_mr_type_mem - Handle memory registration
|
||||
* @iwmr - irdma mr
|
||||
* @access - access rights
|
||||
* @create_stag - flag to create stag or not
|
||||
*/
|
||||
static int
|
||||
irdma_reg_user_mr_type_mem(struct irdma_mr *iwmr, int access)
|
||||
irdma_reg_user_mr_type_mem(struct irdma_mr *iwmr, int access,
|
||||
bool create_stag)
|
||||
{
|
||||
struct irdma_device *iwdev = to_iwdev(iwmr->ibmr.device);
|
||||
struct irdma_pbl *iwpbl = &iwmr->iwpbl;
|
||||
u32 stag;
|
||||
u32 stag = 0;
|
||||
int err;
|
||||
u8 lvl;
|
||||
|
||||
@ -2188,15 +2117,17 @@ irdma_reg_user_mr_type_mem(struct irdma_mr *iwmr, int access)
|
||||
}
|
||||
}
|
||||
|
||||
stag = irdma_create_stag(iwdev);
|
||||
if (!stag) {
|
||||
err = -ENOMEM;
|
||||
goto free_pble;
|
||||
}
|
||||
if (create_stag) {
|
||||
stag = irdma_create_stag(iwdev);
|
||||
if (!stag) {
|
||||
err = -ENOMEM;
|
||||
goto free_pble;
|
||||
}
|
||||
|
||||
iwmr->stag = stag;
|
||||
iwmr->ibmr.rkey = stag;
|
||||
iwmr->ibmr.lkey = stag;
|
||||
iwmr->stag = stag;
|
||||
iwmr->ibmr.rkey = stag;
|
||||
iwmr->ibmr.lkey = stag;
|
||||
}
|
||||
iwmr->access = access;
|
||||
err = irdma_hwreg_mr(iwdev, iwmr, access);
|
||||
if (err)
|
||||
@ -2205,7 +2136,8 @@ irdma_reg_user_mr_type_mem(struct irdma_mr *iwmr, int access)
|
||||
return 0;
|
||||
|
||||
err_hwreg:
|
||||
irdma_free_stag(iwdev, stag);
|
||||
if (stag)
|
||||
irdma_free_stag(iwdev, stag);
|
||||
|
||||
free_pble:
|
||||
if (iwpbl->pble_alloc.level != PBLE_LEVEL_0 && iwpbl->pbl_allocated)
|
||||
@ -2344,7 +2276,7 @@ irdma_reg_user_mr(struct ib_pd *pd, u64 start, u64 len,
|
||||
|
||||
break;
|
||||
case IRDMA_MEMREG_TYPE_MEM:
|
||||
err = irdma_reg_user_mr_type_mem(iwmr, access);
|
||||
err = irdma_reg_user_mr_type_mem(iwmr, access, true);
|
||||
if (err)
|
||||
goto error;
|
||||
|
||||
@ -2421,10 +2353,8 @@ irdma_rereg_mr_trans(struct irdma_mr *iwmr, u64 start, u64 len,
|
||||
{
|
||||
struct irdma_device *iwdev = to_iwdev(iwmr->ibmr.device);
|
||||
struct irdma_pbl *iwpbl = &iwmr->iwpbl;
|
||||
struct irdma_pble_alloc *palloc = &iwpbl->pble_alloc;
|
||||
struct ib_pd *pd = iwmr->ibmr.pd;
|
||||
struct ib_umem *region;
|
||||
u8 lvl;
|
||||
int err;
|
||||
|
||||
region = ib_umem_get(pd->uobject->context, start, len, iwmr->access, 0);
|
||||
@ -2445,35 +2375,14 @@ irdma_rereg_mr_trans(struct irdma_mr *iwmr, u64 start, u64 len,
|
||||
iwmr->page_cnt = irdma_ib_umem_num_dma_blocks(region, iwmr->page_size,
|
||||
virt);
|
||||
|
||||
lvl = iwmr->page_cnt != 1 ? PBLE_LEVEL_1 | PBLE_LEVEL_2 : PBLE_LEVEL_0;
|
||||
|
||||
err = irdma_setup_pbles(iwdev->rf, iwmr, lvl);
|
||||
err = irdma_reg_user_mr_type_mem(iwmr, iwmr->access, false);
|
||||
if (err)
|
||||
goto error;
|
||||
|
||||
if (lvl) {
|
||||
err = irdma_check_mr_contiguous(palloc,
|
||||
iwmr->page_size);
|
||||
if (err) {
|
||||
irdma_free_pble(iwdev->rf->pble_rsrc, palloc);
|
||||
iwpbl->pbl_allocated = false;
|
||||
}
|
||||
}
|
||||
|
||||
err = irdma_hwreg_mr(iwdev, iwmr, iwmr->access);
|
||||
if (err)
|
||||
goto error;
|
||||
goto err;
|
||||
|
||||
return &iwmr->ibmr;
|
||||
|
||||
error:
|
||||
if (palloc->level != PBLE_LEVEL_0 && iwpbl->pbl_allocated) {
|
||||
irdma_free_pble(iwdev->rf->pble_rsrc, palloc);
|
||||
iwpbl->pbl_allocated = false;
|
||||
}
|
||||
err:
|
||||
ib_umem_release(region);
|
||||
iwmr->region = NULL;
|
||||
|
||||
return ERR_PTR(err);
|
||||
}
|
||||
|
||||
@ -2580,25 +2489,6 @@ irdma_del_memlist(struct irdma_mr *iwmr,
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* irdma_copy_sg_list - copy sg list for qp
|
||||
* @sg_list: copied into sg_list
|
||||
* @sgl: copy from sgl
|
||||
* @num_sges: count of sg entries
|
||||
*/
|
||||
static void
|
||||
irdma_copy_sg_list(struct irdma_sge *sg_list, struct ib_sge *sgl,
|
||||
int num_sges)
|
||||
{
|
||||
unsigned int i;
|
||||
|
||||
for (i = 0; i < num_sges; i++) {
|
||||
sg_list[i].tag_off = sgl[i].addr;
|
||||
sg_list[i].len = sgl[i].length;
|
||||
sg_list[i].stag = sgl[i].lkey;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* irdma_post_send - kernel application wr
|
||||
* @ibqp: qp ptr for wr
|
||||
@ -2659,7 +2549,7 @@ irdma_post_send(struct ib_qp *ibqp,
|
||||
}
|
||||
|
||||
info.op.send.num_sges = ib_wr->num_sge;
|
||||
info.op.send.sg_list = (struct irdma_sge *)ib_wr->sg_list;
|
||||
info.op.send.sg_list = ib_wr->sg_list;
|
||||
if (iwqp->ibqp.qp_type == IB_QPT_UD ||
|
||||
iwqp->ibqp.qp_type == IB_QPT_GSI) {
|
||||
ah = to_iwah(ud_wr(ib_wr)->ah);
|
||||
@ -2690,8 +2580,8 @@ irdma_post_send(struct ib_qp *ibqp,
|
||||
|
||||
info.op.rdma_write.num_lo_sges = ib_wr->num_sge;
|
||||
info.op.rdma_write.lo_sg_list = (void *)ib_wr->sg_list;
|
||||
info.op.rdma_write.rem_addr.tag_off = rdma_wr(ib_wr)->remote_addr;
|
||||
info.op.rdma_write.rem_addr.stag = rdma_wr(ib_wr)->rkey;
|
||||
info.op.rdma_write.rem_addr.addr = rdma_wr(ib_wr)->remote_addr;
|
||||
info.op.rdma_write.rem_addr.lkey = rdma_wr(ib_wr)->rkey;
|
||||
if (ib_wr->send_flags & IB_SEND_INLINE)
|
||||
err = irdma_uk_inline_rdma_write(ukqp, &info, false);
|
||||
else
|
||||
@ -2707,8 +2597,8 @@ irdma_post_send(struct ib_qp *ibqp,
|
||||
break;
|
||||
}
|
||||
info.op_type = IRDMA_OP_TYPE_RDMA_READ;
|
||||
info.op.rdma_read.rem_addr.tag_off = rdma_wr(ib_wr)->remote_addr;
|
||||
info.op.rdma_read.rem_addr.stag = rdma_wr(ib_wr)->rkey;
|
||||
info.op.rdma_read.rem_addr.addr = rdma_wr(ib_wr)->remote_addr;
|
||||
info.op.rdma_read.rem_addr.lkey = rdma_wr(ib_wr)->rkey;
|
||||
info.op.rdma_read.lo_sg_list = (void *)ib_wr->sg_list;
|
||||
info.op.rdma_read.num_lo_sges = ib_wr->num_sge;
|
||||
err = irdma_uk_rdma_read(ukqp, &info, inv_stag, false);
|
||||
@ -2726,7 +2616,9 @@ irdma_post_send(struct ib_qp *ibqp,
|
||||
|
||||
stag_info.signaled = info.signaled;
|
||||
stag_info.read_fence = info.read_fence;
|
||||
stag_info.access_rights = irdma_get_mr_access(reg_wr(ib_wr)->access);
|
||||
stag_info.access_rights =
|
||||
irdma_get_mr_access(reg_wr(ib_wr)->access,
|
||||
dev->hw_attrs.uk_attrs.hw_rev);
|
||||
stag_info.stag_key = reg_wr(ib_wr)->key & 0xff;
|
||||
stag_info.stag_idx = reg_wr(ib_wr)->key >> 8;
|
||||
stag_info.page_size = reg_wr(ib_wr)->mr->page_size;
|
||||
@ -2788,7 +2680,6 @@ irdma_post_recv(struct ib_qp *ibqp,
|
||||
struct irdma_qp *iwqp = to_iwqp(ibqp);
|
||||
struct irdma_qp_uk *ukqp = &iwqp->sc_qp.qp_uk;
|
||||
struct irdma_post_rq_info post_recv = {0};
|
||||
struct irdma_sge *sg_list = iwqp->sg_list;
|
||||
unsigned long flags;
|
||||
int err = 0;
|
||||
|
||||
@ -2801,8 +2692,7 @@ irdma_post_recv(struct ib_qp *ibqp,
|
||||
}
|
||||
post_recv.num_sges = ib_wr->num_sge;
|
||||
post_recv.wr_id = ib_wr->wr_id;
|
||||
irdma_copy_sg_list(sg_list, ib_wr->sg_list, ib_wr->num_sge);
|
||||
post_recv.sg_list = sg_list;
|
||||
post_recv.sg_list = ib_wr->sg_list;
|
||||
err = irdma_uk_post_receive(ukqp, &post_recv);
|
||||
if (err) {
|
||||
irdma_debug(&iwqp->iwdev->rf->sc_dev, IRDMA_DEBUG_VERBS,
|
||||
@ -3217,7 +3107,7 @@ irdma_attach_mcast(struct ib_qp *ibqp, union ib_gid *ibgid, u16 lid)
|
||||
if (!ipv6_addr_v4mapped((struct in6_addr *)ibgid)) {
|
||||
irdma_copy_ip_ntohl(ip_addr,
|
||||
sgid_addr.saddr_in6.sin6_addr.__u6_addr.__u6_addr32);
|
||||
irdma_netdev_vlan_ipv6(iwqp->cm_id, ip_addr, &vlan_id, NULL);
|
||||
irdma_get_vlan_mac_ipv6(iwqp->cm_id, ip_addr, &vlan_id, NULL);
|
||||
ipv4 = false;
|
||||
irdma_debug(&iwdev->rf->sc_dev, IRDMA_DEBUG_VERBS,
|
||||
"qp_id=%d, IP6address=%x:%x:%x:%x\n", ibqp->qp_num,
|
||||
@ -3440,12 +3330,10 @@ irdma_set_device_ops(struct ib_device *ibdev)
|
||||
|
||||
dev_ops->alloc_hw_stats = irdma_alloc_hw_stats;
|
||||
dev_ops->alloc_mr = irdma_alloc_mr;
|
||||
dev_ops->alloc_mw = irdma_alloc_mw;
|
||||
dev_ops->alloc_pd = irdma_alloc_pd;
|
||||
dev_ops->alloc_ucontext = irdma_alloc_ucontext;
|
||||
dev_ops->create_cq = irdma_create_cq;
|
||||
dev_ops->create_qp = irdma_create_qp;
|
||||
dev_ops->dealloc_mw = irdma_dealloc_mw;
|
||||
dev_ops->dealloc_pd = irdma_dealloc_pd;
|
||||
dev_ops->dealloc_ucontext = irdma_dealloc_ucontext;
|
||||
dev_ops->dereg_mr = irdma_dereg_mr;
|
||||
|
@ -1,7 +1,7 @@
|
||||
/*-
|
||||
* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
|
||||
*
|
||||
* Copyright (c) 2015 - 2022 Intel Corporation
|
||||
* Copyright (c) 2015 - 2023 Intel Corporation
|
||||
*
|
||||
* This software is available to you under a choice of one of two
|
||||
* licenses. You may choose to be licensed under the terms of the GNU
|
||||
@ -254,7 +254,7 @@ struct irdma_qp {
|
||||
struct irdma_dma_mem host_ctx;
|
||||
struct timer_list terminate_timer;
|
||||
struct irdma_pbl *iwpbl;
|
||||
struct irdma_sge *sg_list;
|
||||
struct ib_sge *sg_list;
|
||||
struct irdma_dma_mem q2_ctx_mem;
|
||||
struct irdma_dma_mem ietf_mem;
|
||||
struct completion free_qp;
|
||||
|
@ -57,7 +57,7 @@ irdma_alloc_node(struct irdma_sc_vsi *vsi,
|
||||
struct irdma_ws_node *node;
|
||||
u16 node_index = 0;
|
||||
|
||||
ws_mem.size = sizeof(struct irdma_ws_node);
|
||||
ws_mem.size = sizeof(*node);
|
||||
ws_mem.va = kzalloc(ws_mem.size, GFP_KERNEL);
|
||||
if (!ws_mem.va)
|
||||
return NULL;
|
||||
@ -109,7 +109,7 @@ irdma_free_node(struct irdma_sc_vsi *vsi,
|
||||
irdma_free_ws_node_id(vsi->dev, node->index);
|
||||
|
||||
ws_mem.va = node;
|
||||
ws_mem.size = sizeof(struct irdma_ws_node);
|
||||
ws_mem.size = sizeof(*node);
|
||||
kfree(ws_mem.va);
|
||||
}
|
||||
|
||||
|
@ -1,7 +1,7 @@
|
||||
/*-
|
||||
* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
|
||||
*
|
||||
* Copyright (c) 2015 - 2022 Intel Corporation
|
||||
* Copyright (c) 2015 - 2023 Intel Corporation
|
||||
*
|
||||
* This software is available to you under a choice of one of two
|
||||
* licenses. You may choose to be licensed under the terms of the GNU
|
||||
|
Loading…
Reference in New Issue
Block a user