aboutsummaryrefslogtreecommitdiffstats
path: root/include/uapi
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2018-04-06 17:35:43 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2018-04-06 17:35:43 -0700
commit19fd08b85bc7e0502b55cd726f466df82ee7e777 (patch)
treeb042de4b9a8a9478c528ea950b14d34487375695 /include/uapi
parentMerge tag 'mailbox-v4.17' of git://git.linaro.org/landing-teams/working/fujitsu/integration (diff)
parentIB/rxe: Fix for oops in rxe_register_device on ppc64le arch (diff)
downloadlinux-dev-19fd08b85bc7e0502b55cd726f466df82ee7e777.tar.xz
linux-dev-19fd08b85bc7e0502b55cd726f466df82ee7e777.zip
Merge tag 'for-linus-unmerged' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma
Pull rdma updates from Jason Gunthorpe: "Doug and I are at a conference next week so if another PR is sent I expect it to only be bug fixes. Parav noted yesterday that there are some fringe case behavior changes in his work that he would like to fix, and I see that Intel has a number of rc looking patches for HFI1 they posted yesterday. Parav is again the biggest contributor by patch count with his ongoing work to enable container support in the RDMA stack, followed by Leon doing syzkaller inspired cleanups, though most of the actual fixing went to RC. There is one uncomfortable series here fixing the user ABI to actually work as intended in 32 bit mode. There are lots of notes in the commit messages, but the basic summary is we don't think there is an actual 32 bit kernel user of drivers/infiniband for several good reasons. However we are seeing people want to use a 32 bit user space with 64 bit kernel, which didn't completely work today. So in fixing it we required a 32 bit rxe user to upgrade their userspace. rxe users are still already quite rare and we think a 32 bit one is non-existing. - Fix RDMA uapi headers to actually compile in userspace and be more complete - Three shared with netdev pull requests from Mellanox: * 7 patches, mostly to net with 1 IB related one at the back). This series addresses an IRQ performance issue (patch 1), cleanups related to the fix for the IRQ performance problem (patches 2-6), and then extends the fragmented completion queue support that already exists in the net side of the driver to the ib side of the driver (patch 7). * Mostly IB, with 5 patches to net that are needed to support the remaining 10 patches to the IB subsystem. This series extends the current 'representor' framework when the mlx5 driver is in switchdev mode from being a netdev only construct to being a netdev/IB dev construct. The IB dev is limited to raw Eth queue pairs only, but by having an IB dev of this type attached to the representor for a switchdev port, it enables DPDK to work on the switchdev device. * All net related, but needed as infrastructure for the rdma driver - Updates for the hns, i40iw, bnxt_re, cxgb3, cxgb4, hns drivers - SRP performance updates - IB uverbs write path cleanup patch series from Leon - Add RDMA_CM support to ib_srpt. This is disabled by default. Users need to set the port for ib_srpt to listen on in configfs in order for it to be enabled (/sys/kernel/config/target/srpt/discovery_auth/rdma_cm_port) - TSO and Scatter FCS support in mlx4 - Refactor of modify_qp routine to resolve problems seen while working on new code that is forthcoming - More refactoring and updates of RDMA CM for containers support from Parav - mlx5 'fine grained packet pacing', 'ipsec offload' and 'device memory' user API features - Infrastructure updates for the new IOCTL interface, based on increased usage - ABI compatibility bug fixes to fully support 32 bit userspace on 64 bit kernel as was originally intended. See the commit messages for extensive details - Syzkaller bugs and code cleanups motivated by them" * tag 'for-linus-unmerged' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma: (199 commits) IB/rxe: Fix for oops in rxe_register_device on ppc64le arch IB/mlx5: Device memory mr registration support net/mlx5: Mkey creation command adjustments IB/mlx5: Device memory support in mlx5_ib net/mlx5: Query device memory capabilities IB/uverbs: Add device memory registration ioctl support IB/uverbs: Add alloc/free dm uverbs ioctl support IB/uverbs: Add device memory capabilities reporting IB/uverbs: Expose device memory capabilities to user RDMA/qedr: Fix wmb usage in qedr IB/rxe: Removed GID add/del dummy routines RDMA/qedr: Zero stack memory before copying to user space IB/mlx5: Add ability to hash by IPSEC_SPI when creating a TIR IB/mlx5: Add information for querying IPsec capabilities IB/mlx5: Add IPsec support for egress and ingress {net,IB}/mlx5: Add ipsec helper IB/mlx5: Add modify_flow_action_esp verb IB/mlx5: Add implementation for create and destroy action_xfrm IB/uverbs: Introduce ESP steering match filter IB/uverbs: Add modify ESP flow_action ...
Diffstat (limited to 'include/uapi')
-rw-r--r--include/uapi/rdma/bnxt_re-abi.h21
-rw-r--r--include/uapi/rdma/cxgb3-abi.h17
-rw-r--r--include/uapi/rdma/cxgb4-abi.h29
-rw-r--r--include/uapi/rdma/hfi/hfi1_ioctl.h32
-rw-r--r--include/uapi/rdma/hfi/hfi1_user.h10
-rw-r--r--include/uapi/rdma/hns-abi.h22
-rw-r--r--include/uapi/rdma/i40iw-abi.h107
-rw-r--r--include/uapi/rdma/ib_user_cm.h48
-rw-r--r--include/uapi/rdma/ib_user_ioctl_cmds.h134
-rw-r--r--include/uapi/rdma/ib_user_ioctl_verbs.h96
-rw-r--r--include/uapi/rdma/ib_user_mad.h4
-rw-r--r--include/uapi/rdma/ib_user_verbs.h195
-rw-r--r--include/uapi/rdma/mlx4-abi.h52
-rw-r--r--include/uapi/rdma/mlx5-abi.h72
-rw-r--r--include/uapi/rdma/mlx5_user_ioctl_cmds.h48
-rw-r--r--include/uapi/rdma/mlx5_user_ioctl_verbs.h43
-rw-r--r--include/uapi/rdma/mthca-abi.h10
-rw-r--r--include/uapi/rdma/nes-abi.h6
-rw-r--r--include/uapi/rdma/ocrdma-abi.h36
-rw-r--r--include/uapi/rdma/qedr-abi.h20
-rw-r--r--include/uapi/rdma/rdma_netlink.h51
-rw-r--r--include/uapi/rdma/rdma_user_cm.h49
-rw-r--r--include/uapi/rdma/rdma_user_ioctl.h38
-rw-r--r--include/uapi/rdma/rdma_user_ioctl_cmds.h99
-rw-r--r--include/uapi/rdma/rdma_user_rxe.h58
-rw-r--r--include/uapi/rdma/vmw_pvrdma-abi.h49
26 files changed, 991 insertions, 355 deletions
diff --git a/include/uapi/rdma/bnxt_re-abi.h b/include/uapi/rdma/bnxt_re-abi.h
index db54115be044..a7a6111e50c7 100644
--- a/include/uapi/rdma/bnxt_re-abi.h
+++ b/include/uapi/rdma/bnxt_re-abi.h
@@ -53,15 +53,20 @@ struct bnxt_re_uctx_resp {
__u32 rsvd;
};
+/*
+ * This struct is placed after the ib_uverbs_alloc_pd_resp struct, which is
+ * not 8 byted aligned. To avoid undesired padding in various cases we have to
+ * set this struct to packed.
+ */
struct bnxt_re_pd_resp {
__u32 pdid;
__u32 dpi;
__u64 dbr;
-};
+} __attribute__((packed, aligned(4)));
struct bnxt_re_cq_req {
- __u64 cq_va;
- __u64 cq_handle;
+ __aligned_u64 cq_va;
+ __aligned_u64 cq_handle;
};
struct bnxt_re_cq_resp {
@@ -72,9 +77,9 @@ struct bnxt_re_cq_resp {
};
struct bnxt_re_qp_req {
- __u64 qpsva;
- __u64 qprva;
- __u64 qp_handle;
+ __aligned_u64 qpsva;
+ __aligned_u64 qprva;
+ __aligned_u64 qp_handle;
};
struct bnxt_re_qp_resp {
@@ -83,8 +88,8 @@ struct bnxt_re_qp_resp {
};
struct bnxt_re_srq_req {
- __u64 srqva;
- __u64 srq_handle;
+ __aligned_u64 srqva;
+ __aligned_u64 srq_handle;
};
struct bnxt_re_srq_resp {
diff --git a/include/uapi/rdma/cxgb3-abi.h b/include/uapi/rdma/cxgb3-abi.h
index d5745e43ae85..9acb4b7a6246 100644
--- a/include/uapi/rdma/cxgb3-abi.h
+++ b/include/uapi/rdma/cxgb3-abi.h
@@ -41,21 +41,21 @@
* Make sure that all structs defined in this file remain laid out so
* that they pack the same way on 32-bit and 64-bit architectures (to
* avoid incompatibility between 32-bit userspace and 64-bit kernels).
- * In particular do not use pointer types -- pass pointers in __u64
+ * In particular do not use pointer types -- pass pointers in __aligned_u64
* instead.
*/
struct iwch_create_cq_req {
- __u64 user_rptr_addr;
+ __aligned_u64 user_rptr_addr;
};
struct iwch_create_cq_resp_v0 {
- __u64 key;
+ __aligned_u64 key;
__u32 cqid;
__u32 size_log2;
};
struct iwch_create_cq_resp {
- __u64 key;
+ __aligned_u64 key;
__u32 cqid;
__u32 size_log2;
__u32 memsize;
@@ -63,8 +63,8 @@ struct iwch_create_cq_resp {
};
struct iwch_create_qp_resp {
- __u64 key;
- __u64 db_key;
+ __aligned_u64 key;
+ __aligned_u64 db_key;
__u32 qpid;
__u32 size_log2;
__u32 sq_size_log2;
@@ -74,4 +74,9 @@ struct iwch_create_qp_resp {
struct iwch_reg_user_mr_resp {
__u32 pbl_addr;
};
+
+struct iwch_alloc_pd_resp {
+ __u32 pdid;
+};
+
#endif /* CXGB3_ABI_USER_H */
diff --git a/include/uapi/rdma/cxgb4-abi.h b/include/uapi/rdma/cxgb4-abi.h
index 05f71f1bc119..1fefd0140c26 100644
--- a/include/uapi/rdma/cxgb4-abi.h
+++ b/include/uapi/rdma/cxgb4-abi.h
@@ -41,13 +41,13 @@
* Make sure that all structs defined in this file remain laid out so
* that they pack the same way on 32-bit and 64-bit architectures (to
* avoid incompatibility between 32-bit userspace and 64-bit kernels).
- * In particular do not use pointer types -- pass pointers in __u64
+ * In particular do not use pointer types -- pass pointers in __aligned_u64
* instead.
*/
struct c4iw_create_cq_resp {
- __u64 key;
- __u64 gts_key;
- __u64 memsize;
+ __aligned_u64 key;
+ __aligned_u64 gts_key;
+ __aligned_u64 memsize;
__u32 cqid;
__u32 size;
__u32 qid_mask;
@@ -59,13 +59,13 @@ enum {
};
struct c4iw_create_qp_resp {
- __u64 ma_sync_key;
- __u64 sq_key;
- __u64 rq_key;
- __u64 sq_db_gts_key;
- __u64 rq_db_gts_key;
- __u64 sq_memsize;
- __u64 rq_memsize;
+ __aligned_u64 ma_sync_key;
+ __aligned_u64 sq_key;
+ __aligned_u64 rq_key;
+ __aligned_u64 sq_db_gts_key;
+ __aligned_u64 rq_db_gts_key;
+ __aligned_u64 sq_memsize;
+ __aligned_u64 rq_memsize;
__u32 sqid;
__u32 rqid;
__u32 sq_size;
@@ -75,8 +75,13 @@ struct c4iw_create_qp_resp {
};
struct c4iw_alloc_ucontext_resp {
- __u64 status_page_key;
+ __aligned_u64 status_page_key;
__u32 status_page_size;
__u32 reserved; /* explicit padding (optional for i386) */
};
+
+struct c4iw_alloc_pd_resp {
+ __u32 pdid;
+};
+
#endif /* CXGB4_ABI_USER_H */
diff --git a/include/uapi/rdma/hfi/hfi1_ioctl.h b/include/uapi/rdma/hfi/hfi1_ioctl.h
index 9de78c5ee913..8f3d9fe7b141 100644
--- a/include/uapi/rdma/hfi/hfi1_ioctl.h
+++ b/include/uapi/rdma/hfi/hfi1_ioctl.h
@@ -79,7 +79,7 @@ struct hfi1_user_info {
};
struct hfi1_ctxt_info {
- __u64 runtime_flags; /* chip/drv runtime flags (HFI1_CAP_*) */
+ __aligned_u64 runtime_flags; /* chip/drv runtime flags (HFI1_CAP_*) */
__u32 rcvegr_size; /* size of each eager buffer */
__u16 num_active; /* number of active units */
__u16 unit; /* unit (chip) assigned to caller */
@@ -98,9 +98,9 @@ struct hfi1_ctxt_info {
struct hfi1_tid_info {
/* virtual address of first page in transfer */
- __u64 vaddr;
+ __aligned_u64 vaddr;
/* pointer to tid array. this array is big enough */
- __u64 tidlist;
+ __aligned_u64 tidlist;
/* number of tids programmed by this request */
__u32 tidcnt;
/* length of transfer buffer programmed by this request */
@@ -131,23 +131,23 @@ struct hfi1_base_info {
*/
__u32 bthqp;
/* PIO credit return address, */
- __u64 sc_credits_addr;
+ __aligned_u64 sc_credits_addr;
/*
* Base address of write-only pio buffers for this process.
* Each buffer has sendpio_credits*64 bytes.
*/
- __u64 pio_bufbase_sop;
+ __aligned_u64 pio_bufbase_sop;
/*
* Base address of write-only pio buffers for this process.
* Each buffer has sendpio_credits*64 bytes.
*/
- __u64 pio_bufbase;
+ __aligned_u64 pio_bufbase;
/* address where receive buffer queue is mapped into */
- __u64 rcvhdr_bufbase;
+ __aligned_u64 rcvhdr_bufbase;
/* base address of Eager receive buffers. */
- __u64 rcvegr_bufbase;
+ __aligned_u64 rcvegr_bufbase;
/* base address of SDMA completion ring */
- __u64 sdma_comp_bufbase;
+ __aligned_u64 sdma_comp_bufbase;
/*
* User register base for init code, not to be used directly by
* protocol or applications. Always maps real chip register space.
@@ -155,20 +155,20 @@ struct hfi1_base_info {
* ur_rcvhdrhead, ur_rcvhdrtail, ur_rcvegrhead, ur_rcvegrtail,
* ur_rcvtidflow
*/
- __u64 user_regbase;
+ __aligned_u64 user_regbase;
/* notification events */
- __u64 events_bufbase;
+ __aligned_u64 events_bufbase;
/* status page */
- __u64 status_bufbase;
+ __aligned_u64 status_bufbase;
/* rcvhdrtail update */
- __u64 rcvhdrtail_base;
+ __aligned_u64 rcvhdrtail_base;
/*
* shared memory pages for subctxts if ctxt is shared; these cover
* all the processes in the group sharing a single context.
* all have enough space for the num_subcontexts value on this job.
*/
- __u64 subctxt_uregbase;
- __u64 subctxt_rcvegrbuf;
- __u64 subctxt_rcvhdrbuf;
+ __aligned_u64 subctxt_uregbase;
+ __aligned_u64 subctxt_rcvegrbuf;
+ __aligned_u64 subctxt_rcvhdrbuf;
};
#endif /* _LINIUX__HFI1_IOCTL_H */
diff --git a/include/uapi/rdma/hfi/hfi1_user.h b/include/uapi/rdma/hfi/hfi1_user.h
index 791bea2f8297..c6a984c0c881 100644
--- a/include/uapi/rdma/hfi/hfi1_user.h
+++ b/include/uapi/rdma/hfi/hfi1_user.h
@@ -177,8 +177,8 @@ struct hfi1_sdma_comp_entry {
* Device status and notifications from driver to user-space.
*/
struct hfi1_status {
- __u64 dev; /* device/hw status bits */
- __u64 port; /* port state and status bits */
+ __aligned_u64 dev; /* device/hw status bits */
+ __aligned_u64 port; /* port state and status bits */
char freezemsg[0];
};
@@ -219,7 +219,7 @@ struct sdma_req_info {
* in charge of managing its own ring.
*/
__u16 comp_idx;
-} __packed;
+} __attribute__((__packed__));
/*
* SW KDETH header.
@@ -230,7 +230,7 @@ struct hfi1_kdeth_header {
__le16 jkey;
__le16 hcrc;
__le32 swdata[7];
-} __packed;
+} __attribute__((__packed__));
/*
* Structure describing the headers that User space uses. The
@@ -241,7 +241,7 @@ struct hfi1_pkt_header {
__be16 lrh[4];
__be32 bth[3];
struct hfi1_kdeth_header kdeth;
-} __packed;
+} __attribute__((__packed__));
/*
diff --git a/include/uapi/rdma/hns-abi.h b/include/uapi/rdma/hns-abi.h
index a9c03b0eed57..7092c8de4bd8 100644
--- a/include/uapi/rdma/hns-abi.h
+++ b/include/uapi/rdma/hns-abi.h
@@ -37,19 +37,35 @@
#include <linux/types.h>
struct hns_roce_ib_create_cq {
- __u64 buf_addr;
+ __aligned_u64 buf_addr;
+ __aligned_u64 db_addr;
+};
+
+struct hns_roce_ib_create_cq_resp {
+ __aligned_u64 cqn; /* Only 32 bits used, 64 for compat */
+ __aligned_u64 cap_flags;
};
struct hns_roce_ib_create_qp {
- __u64 buf_addr;
- __u64 db_addr;
+ __aligned_u64 buf_addr;
+ __aligned_u64 db_addr;
__u8 log_sq_bb_count;
__u8 log_sq_stride;
__u8 sq_no_prefetch;
__u8 reserved[5];
};
+struct hns_roce_ib_create_qp_resp {
+ __aligned_u64 cap_flags;
+};
+
struct hns_roce_ib_alloc_ucontext_resp {
__u32 qp_tab_size;
+ __u32 reserved;
};
+
+struct hns_roce_ib_alloc_pd_resp {
+ __u32 pdn;
+};
+
#endif /* HNS_ABI_USER_H */
diff --git a/include/uapi/rdma/i40iw-abi.h b/include/uapi/rdma/i40iw-abi.h
new file mode 100644
index 000000000000..79890baa6fdb
--- /dev/null
+++ b/include/uapi/rdma/i40iw-abi.h
@@ -0,0 +1,107 @@
+/*
+ * Copyright (c) 2006 - 2016 Intel Corporation. All rights reserved.
+ * Copyright (c) 2005 Topspin Communications. All rights reserved.
+ * Copyright (c) 2005 Cisco Systems. All rights reserved.
+ * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#ifndef I40IW_ABI_H
+#define I40IW_ABI_H
+
+#include <linux/types.h>
+
+#define I40IW_ABI_VER 5
+
+struct i40iw_alloc_ucontext_req {
+ __u32 reserved32;
+ __u8 userspace_ver;
+ __u8 reserved8[3];
+};
+
+struct i40iw_alloc_ucontext_resp {
+ __u32 max_pds; /* maximum pds allowed for this user process */
+ __u32 max_qps; /* maximum qps allowed for this user process */
+ __u32 wq_size; /* size of the WQs (sq+rq) allocated to the mmaped area */
+ __u8 kernel_ver;
+ __u8 reserved[3];
+};
+
+struct i40iw_alloc_pd_resp {
+ __u32 pd_id;
+ __u8 reserved[4];
+};
+
+struct i40iw_create_cq_req {
+ __aligned_u64 user_cq_buffer;
+ __aligned_u64 user_shadow_area;
+};
+
+struct i40iw_create_qp_req {
+ __aligned_u64 user_wqe_buffers;
+ __aligned_u64 user_compl_ctx;
+
+ /* UDA QP PHB */
+ __aligned_u64 user_sq_phb; /* place for VA of the sq phb buff */
+ __aligned_u64 user_rq_phb; /* place for VA of the rq phb buff */
+};
+
+enum i40iw_memreg_type {
+ IW_MEMREG_TYPE_MEM = 0x0000,
+ IW_MEMREG_TYPE_QP = 0x0001,
+ IW_MEMREG_TYPE_CQ = 0x0002,
+};
+
+struct i40iw_mem_reg_req {
+ __u16 reg_type; /* Memory, QP or CQ */
+ __u16 cq_pages;
+ __u16 rq_pages;
+ __u16 sq_pages;
+};
+
+struct i40iw_create_cq_resp {
+ __u32 cq_id;
+ __u32 cq_size;
+ __u32 mmap_db_index;
+ __u32 reserved;
+};
+
+struct i40iw_create_qp_resp {
+ __u32 qp_id;
+ __u32 actual_sq_size;
+ __u32 actual_rq_size;
+ __u32 i40iw_drv_opt;
+ __u16 push_idx;
+ __u8 lsmm;
+ __u8 rsvd2;
+};
+
+#endif
diff --git a/include/uapi/rdma/ib_user_cm.h b/include/uapi/rdma/ib_user_cm.h
index f4041bdc4d08..4a8f9562f7cd 100644
--- a/include/uapi/rdma/ib_user_cm.h
+++ b/include/uapi/rdma/ib_user_cm.h
@@ -73,8 +73,8 @@ struct ib_ucm_cmd_hdr {
};
struct ib_ucm_create_id {
- __u64 uid;
- __u64 response;
+ __aligned_u64 uid;
+ __aligned_u64 response;
};
struct ib_ucm_create_id_resp {
@@ -82,7 +82,7 @@ struct ib_ucm_create_id_resp {
};
struct ib_ucm_destroy_id {
- __u64 response;
+ __aligned_u64 response;
__u32 id;
__u32 reserved;
};
@@ -92,7 +92,7 @@ struct ib_ucm_destroy_id_resp {
};
struct ib_ucm_attr_id {
- __u64 response;
+ __aligned_u64 response;
__u32 id;
__u32 reserved;
};
@@ -105,7 +105,7 @@ struct ib_ucm_attr_id_resp {
};
struct ib_ucm_init_qp_attr {
- __u64 response;
+ __aligned_u64 response;
__u32 id;
__u32 qp_state;
};
@@ -123,7 +123,7 @@ struct ib_ucm_notify {
};
struct ib_ucm_private_data {
- __u64 data;
+ __aligned_u64 data;
__u32 id;
__u8 len;
__u8 reserved[3];
@@ -135,9 +135,9 @@ struct ib_ucm_req {
__u32 qp_type;
__u32 psn;
__be64 sid;
- __u64 data;
- __u64 primary_path;
- __u64 alternate_path;
+ __aligned_u64 data;
+ __aligned_u64 primary_path;
+ __aligned_u64 alternate_path;
__u8 len;
__u8 peer_to_peer;
__u8 responder_resources;
@@ -153,8 +153,8 @@ struct ib_ucm_req {
};
struct ib_ucm_rep {
- __u64 uid;
- __u64 data;
+ __aligned_u64 uid;
+ __aligned_u64 data;
__u32 id;
__u32 qpn;
__u32 psn;
@@ -172,15 +172,15 @@ struct ib_ucm_rep {
struct ib_ucm_info {
__u32 id;
__u32 status;
- __u64 info;
- __u64 data;
+ __aligned_u64 info;
+ __aligned_u64 data;
__u8 info_len;
__u8 data_len;
__u8 reserved[6];
};
struct ib_ucm_mra {
- __u64 data;
+ __aligned_u64 data;
__u32 id;
__u8 len;
__u8 timeout;
@@ -188,8 +188,8 @@ struct ib_ucm_mra {
};
struct ib_ucm_lap {
- __u64 path;
- __u64 data;
+ __aligned_u64 path;
+ __aligned_u64 data;
__u32 id;
__u8 len;
__u8 reserved[3];
@@ -199,8 +199,8 @@ struct ib_ucm_sidr_req {
__u32 id;
__u32 timeout;
__be64 sid;
- __u64 data;
- __u64 path;
+ __aligned_u64 data;
+ __aligned_u64 path;
__u16 reserved_pkey;
__u8 len;
__u8 max_cm_retries;
@@ -212,8 +212,8 @@ struct ib_ucm_sidr_rep {
__u32 qpn;
__u32 qkey;
__u32 status;
- __u64 info;
- __u64 data;
+ __aligned_u64 info;
+ __aligned_u64 data;
__u8 info_len;
__u8 data_len;
__u8 reserved[6];
@@ -222,9 +222,9 @@ struct ib_ucm_sidr_rep {
* event notification ABI structures.
*/
struct ib_ucm_event_get {
- __u64 response;
- __u64 data;
- __u64 info;
+ __aligned_u64 response;
+ __aligned_u64 data;
+ __aligned_u64 info;
__u8 data_len;
__u8 info_len;
__u8 reserved[6];
@@ -303,7 +303,7 @@ struct ib_ucm_sidr_rep_event_resp {
#define IB_UCM_PRES_ALTERNATE 0x08
struct ib_ucm_event_resp {
- __u64 uid;
+ __aligned_u64 uid;
__u32 id;
__u32 event;
__u32 present;
diff --git a/include/uapi/rdma/ib_user_ioctl_cmds.h b/include/uapi/rdma/ib_user_ioctl_cmds.h
new file mode 100644
index 000000000000..83e3890eef20
--- /dev/null
+++ b/include/uapi/rdma/ib_user_ioctl_cmds.h
@@ -0,0 +1,134 @@
+/*
+ * Copyright (c) 2018, Mellanox Technologies inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef IB_USER_IOCTL_CMDS_H
+#define IB_USER_IOCTL_CMDS_H
+
+#define UVERBS_ID_NS_MASK 0xF000
+#define UVERBS_ID_NS_SHIFT 12
+
+#define UVERBS_UDATA_DRIVER_DATA_NS 1
+#define UVERBS_UDATA_DRIVER_DATA_FLAG (1UL << UVERBS_ID_NS_SHIFT)
+
+enum uverbs_default_objects {
+ UVERBS_OBJECT_DEVICE, /* No instances of DEVICE are allowed */
+ UVERBS_OBJECT_PD,
+ UVERBS_OBJECT_COMP_CHANNEL,
+ UVERBS_OBJECT_CQ,
+ UVERBS_OBJECT_QP,
+ UVERBS_OBJECT_SRQ,
+ UVERBS_OBJECT_AH,
+ UVERBS_OBJECT_MR,
+ UVERBS_OBJECT_MW,
+ UVERBS_OBJECT_FLOW,
+ UVERBS_OBJECT_XRCD,
+ UVERBS_OBJECT_RWQ_IND_TBL,
+ UVERBS_OBJECT_WQ,
+ UVERBS_OBJECT_FLOW_ACTION,
+ UVERBS_OBJECT_DM,
+};
+
+enum {
+ UVERBS_ATTR_UHW_IN = UVERBS_UDATA_DRIVER_DATA_FLAG,
+ UVERBS_ATTR_UHW_OUT,
+};
+
+enum uverbs_attrs_create_cq_cmd_attr_ids {
+ UVERBS_ATTR_CREATE_CQ_HANDLE,
+ UVERBS_ATTR_CREATE_CQ_CQE,
+ UVERBS_ATTR_CREATE_CQ_USER_HANDLE,
+ UVERBS_ATTR_CREATE_CQ_COMP_CHANNEL,
+ UVERBS_ATTR_CREATE_CQ_COMP_VECTOR,
+ UVERBS_ATTR_CREATE_CQ_FLAGS,
+ UVERBS_ATTR_CREATE_CQ_RESP_CQE,
+};
+
+enum uverbs_attrs_destroy_cq_cmd_attr_ids {
+ UVERBS_ATTR_DESTROY_CQ_HANDLE,
+ UVERBS_ATTR_DESTROY_CQ_RESP,
+};
+
+enum uverbs_attrs_create_flow_action_esp {
+ UVERBS_ATTR_FLOW_ACTION_ESP_HANDLE,
+ UVERBS_ATTR_FLOW_ACTION_ESP_ATTRS,
+ UVERBS_ATTR_FLOW_ACTION_ESP_ESN,
+ UVERBS_ATTR_FLOW_ACTION_ESP_KEYMAT,
+ UVERBS_ATTR_FLOW_ACTION_ESP_REPLAY,
+ UVERBS_ATTR_FLOW_ACTION_ESP_ENCAP,
+};
+
+enum uverbs_attrs_destroy_flow_action_esp {
+ UVERBS_ATTR_DESTROY_FLOW_ACTION_HANDLE,
+};
+
+enum uverbs_methods_cq {
+ UVERBS_METHOD_CQ_CREATE,
+ UVERBS_METHOD_CQ_DESTROY,
+};
+
+enum uverbs_methods_actions_flow_action_ops {
+ UVERBS_METHOD_FLOW_ACTION_ESP_CREATE,
+ UVERBS_METHOD_FLOW_ACTION_DESTROY,
+ UVERBS_METHOD_FLOW_ACTION_ESP_MODIFY,
+};
+
+enum uverbs_attrs_alloc_dm_cmd_attr_ids {
+ UVERBS_ATTR_ALLOC_DM_HANDLE,
+ UVERBS_ATTR_ALLOC_DM_LENGTH,
+ UVERBS_ATTR_ALLOC_DM_ALIGNMENT,
+};
+
+enum uverbs_attrs_free_dm_cmd_attr_ids {
+ UVERBS_ATTR_FREE_DM_HANDLE,
+};
+
+enum uverbs_methods_dm {
+ UVERBS_METHOD_DM_ALLOC,
+ UVERBS_METHOD_DM_FREE,
+};
+
+enum uverbs_attrs_reg_dm_mr_cmd_attr_ids {
+ UVERBS_ATTR_REG_DM_MR_HANDLE,
+ UVERBS_ATTR_REG_DM_MR_OFFSET,
+ UVERBS_ATTR_REG_DM_MR_LENGTH,
+ UVERBS_ATTR_REG_DM_MR_PD_HANDLE,
+ UVERBS_ATTR_REG_DM_MR_ACCESS_FLAGS,
+ UVERBS_ATTR_REG_DM_MR_DM_HANDLE,
+ UVERBS_ATTR_REG_DM_MR_RESP_LKEY,
+ UVERBS_ATTR_REG_DM_MR_RESP_RKEY,
+};
+
+enum uverbs_methods_mr {
+ UVERBS_METHOD_DM_MR_REG,
+};
+
+#endif
diff --git a/include/uapi/rdma/ib_user_ioctl_verbs.h b/include/uapi/rdma/ib_user_ioctl_verbs.h
index 842792eae383..04e46ea517d3 100644
--- a/include/uapi/rdma/ib_user_ioctl_verbs.h
+++ b/include/uapi/rdma/ib_user_ioctl_verbs.h
@@ -1,5 +1,6 @@
+/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-2-Clause) */
/*
- * Copyright (c) 2017, Mellanox Technologies inc. All rights reserved.
+ * Copyright (c) 2017-2018, Mellanox Technologies inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -33,52 +34,69 @@
#ifndef IB_USER_IOCTL_VERBS_H
#define IB_USER_IOCTL_VERBS_H
-#include <rdma/rdma_user_ioctl.h>
-
-#define UVERBS_UDATA_DRIVER_DATA_NS 1
-#define UVERBS_UDATA_DRIVER_DATA_FLAG (1UL << UVERBS_ID_NS_SHIFT)
-
-enum uverbs_default_objects {
- UVERBS_OBJECT_DEVICE, /* No instances of DEVICE are allowed */
- UVERBS_OBJECT_PD,
- UVERBS_OBJECT_COMP_CHANNEL,
- UVERBS_OBJECT_CQ,
- UVERBS_OBJECT_QP,
- UVERBS_OBJECT_SRQ,
- UVERBS_OBJECT_AH,
- UVERBS_OBJECT_MR,
- UVERBS_OBJECT_MW,
- UVERBS_OBJECT_FLOW,
- UVERBS_OBJECT_XRCD,
- UVERBS_OBJECT_RWQ_IND_TBL,
- UVERBS_OBJECT_WQ,
- UVERBS_OBJECT_LAST,
+#include <linux/types.h>
+
+#ifndef RDMA_UAPI_PTR
+#define RDMA_UAPI_PTR(_type, _name) __aligned_u64 _name
+#endif
+
+enum ib_uverbs_flow_action_esp_keymat {
+ IB_UVERBS_FLOW_ACTION_ESP_KEYMAT_AES_GCM,
};
-enum {
- UVERBS_UHW_IN = UVERBS_UDATA_DRIVER_DATA_FLAG,
- UVERBS_UHW_OUT,
+enum ib_uverbs_flow_action_esp_keymat_aes_gcm_iv_algo {
+ IB_UVERBS_FLOW_ACTION_IV_ALGO_SEQ,
};
-enum uverbs_create_cq_cmd_attr_ids {
- CREATE_CQ_HANDLE,
- CREATE_CQ_CQE,
- CREATE_CQ_USER_HANDLE,
- CREATE_CQ_COMP_CHANNEL,
- CREATE_CQ_COMP_VECTOR,
- CREATE_CQ_FLAGS,
- CREATE_CQ_RESP_CQE,
+struct ib_uverbs_flow_action_esp_keymat_aes_gcm {
+ __aligned_u64 iv;
+ __u32 iv_algo; /* Use enum ib_uverbs_flow_action_esp_keymat_aes_gcm_iv_algo */
+
+ __u32 salt;
+ __u32 icv_len;
+
+ __u32 key_len;
+ __u32 aes_key[256 / 32];
};
-enum uverbs_destroy_cq_cmd_attr_ids {
- DESTROY_CQ_HANDLE,
- DESTROY_CQ_RESP,
+enum ib_uverbs_flow_action_esp_replay {
+ IB_UVERBS_FLOW_ACTION_ESP_REPLAY_NONE,
+ IB_UVERBS_FLOW_ACTION_ESP_REPLAY_BMP,
};
-enum uverbs_actions_cq_ops {
- UVERBS_CQ_CREATE,
- UVERBS_CQ_DESTROY,
+struct ib_uverbs_flow_action_esp_replay_bmp {
+ __u32 size;
};
-#endif
+enum ib_uverbs_flow_action_esp_flags {
+ IB_UVERBS_FLOW_ACTION_ESP_FLAGS_INLINE_CRYPTO = 0UL << 0, /* Default */
+ IB_UVERBS_FLOW_ACTION_ESP_FLAGS_FULL_OFFLOAD = 1UL << 0,
+
+ IB_UVERBS_FLOW_ACTION_ESP_FLAGS_TUNNEL = 0UL << 1, /* Default */
+ IB_UVERBS_FLOW_ACTION_ESP_FLAGS_TRANSPORT = 1UL << 1,
+
+ IB_UVERBS_FLOW_ACTION_ESP_FLAGS_DECRYPT = 0UL << 2, /* Default */
+ IB_UVERBS_FLOW_ACTION_ESP_FLAGS_ENCRYPT = 1UL << 2,
+ IB_UVERBS_FLOW_ACTION_ESP_FLAGS_ESN_NEW_WINDOW = 1UL << 3,
+};
+
+struct ib_uverbs_flow_action_esp_encap {
+ /* This struct represents a list of pointers to flow_xxxx_filter that
+ * encapsulates the payload in ESP tunnel mode.
+ */
+ RDMA_UAPI_PTR(void *, val_ptr); /* pointer to a flow_xxxx_filter */
+ RDMA_UAPI_PTR(struct ib_uverbs_flow_action_esp_encap *, next_ptr);
+ __u16 len; /* Len of the filter struct val_ptr points to */
+ __u16 type; /* Use flow_spec_type enum */
+};
+
+struct ib_uverbs_flow_action_esp {
+ __u32 spi;
+ __u32 seq;
+ __u32 tfc_pad;
+ __u32 flags;
+ __aligned_u64 hard_limit_pkts;
+};
+
+#endif
diff --git a/include/uapi/rdma/ib_user_mad.h b/include/uapi/rdma/ib_user_mad.h
index 330a3c5f1aa8..ef92118dad97 100644
--- a/include/uapi/rdma/ib_user_mad.h
+++ b/include/uapi/rdma/ib_user_mad.h
@@ -143,7 +143,7 @@ struct ib_user_mad_hdr {
*/
struct ib_user_mad {
struct ib_user_mad_hdr hdr;
- __u64 data[0];
+ __aligned_u64 data[0];
};
/*
@@ -225,7 +225,7 @@ struct ib_user_mad_reg_req2 {
__u8 mgmt_class_version;
__u16 res;
__u32 flags;
- __u64 method_mask[2];
+ __aligned_u64 method_mask[2];
__u32 oui;
__u8 rmpp_version;
__u8 reserved[3];
diff --git a/include/uapi/rdma/ib_user_verbs.h b/include/uapi/rdma/ib_user_verbs.h
index 04d0e67b1312..9be07394fdbe 100644
--- a/include/uapi/rdma/ib_user_verbs.h
+++ b/include/uapi/rdma/ib_user_verbs.h
@@ -117,13 +117,13 @@ enum {
*/
struct ib_uverbs_async_event_desc {
- __u64 element;
+ __aligned_u64 element;
__u32 event_type; /* enum ib_event_type */
__u32 reserved;
};
struct ib_uverbs_comp_event_desc {
- __u64 cq_handle;
+ __aligned_u64 cq_handle;
};
struct ib_uverbs_cq_moderation_caps {
@@ -141,10 +141,7 @@ struct ib_uverbs_cq_moderation_caps {
*/
#define IB_USER_VERBS_CMD_COMMAND_MASK 0xff
-#define IB_USER_VERBS_CMD_FLAGS_MASK 0xff000000u
-#define IB_USER_VERBS_CMD_FLAGS_SHIFT 24
-
-#define IB_USER_VERBS_CMD_FLAG_EXTENDED 0x80
+#define IB_USER_VERBS_CMD_FLAG_EXTENDED 0x80000000u
struct ib_uverbs_cmd_hdr {
__u32 command;
@@ -153,15 +150,15 @@ struct ib_uverbs_cmd_hdr {
};
struct ib_uverbs_ex_cmd_hdr {
- __u64 response;
+ __aligned_u64 response;
__u16 provider_in_words;
__u16 provider_out_words;
__u32 cmd_hdr_reserved;
};
struct ib_uverbs_get_context {
- __u64 response;
- __u64 driver_data[0];
+ __aligned_u64 response;
+ __aligned_u64 driver_data[0];
};
struct ib_uverbs_get_context_resp {
@@ -170,16 +167,16 @@ struct ib_uverbs_get_context_resp {
};
struct ib_uverbs_query_device {
- __u64 response;
- __u64 driver_data[0];
+ __aligned_u64 response;
+ __aligned_u64 driver_data[0];
};
struct ib_uverbs_query_device_resp {
- __u64 fw_ver;
+ __aligned_u64 fw_ver;
__be64 node_guid;
__be64 sys_image_guid;
- __u64 max_mr_size;
- __u64 page_size_cap;
+ __aligned_u64 max_mr_size;
+ __aligned_u64 page_size_cap;
__u32 vendor_id;
__u32 vendor_part_id;
__u32 hw_ver;
@@ -224,7 +221,7 @@ struct ib_uverbs_ex_query_device {
};
struct ib_uverbs_odp_caps {
- __u64 general_caps;
+ __aligned_u64 general_caps;
struct {
__u32 rc_odp_caps;
__u32 uc_odp_caps;
@@ -263,21 +260,22 @@ struct ib_uverbs_ex_query_device_resp {
__u32 comp_mask;
__u32 response_length;
struct ib_uverbs_odp_caps odp_caps;
- __u64 timestamp_mask;
- __u64 hca_core_clock; /* in KHZ */
- __u64 device_cap_flags_ex;
+ __aligned_u64 timestamp_mask;
+ __aligned_u64 hca_core_clock; /* in KHZ */
+ __aligned_u64 device_cap_flags_ex;
struct ib_uverbs_rss_caps rss_caps;
__u32 max_wq_type_rq;
__u32 raw_packet_caps;
struct ib_uverbs_tm_caps tm_caps;
struct ib_uverbs_cq_moderation_caps cq_moderation_caps;
+ __aligned_u64 max_dm_size;
};
struct ib_uverbs_query_port {
- __u64 response;
+ __aligned_u64 response;
__u8 port_num;
__u8 reserved[7];
- __u64 driver_data[0];
+ __aligned_u64 driver_data[0];
};
struct ib_uverbs_query_port_resp {
@@ -305,8 +303,8 @@ struct ib_uverbs_query_port_resp {
};
struct ib_uverbs_alloc_pd {
- __u64 response;
- __u64 driver_data[0];
+ __aligned_u64 response;
+ __aligned_u64 driver_data[0];
};
struct ib_uverbs_alloc_pd_resp {
@@ -318,10 +316,10 @@ struct ib_uverbs_dealloc_pd {
};
struct ib_uverbs_open_xrcd {
- __u64 response;
+ __aligned_u64 response;
__u32 fd;
__u32 oflags;
- __u64 driver_data[0];
+ __aligned_u64 driver_data[0];
};
struct ib_uverbs_open_xrcd_resp {
@@ -333,13 +331,13 @@ struct ib_uverbs_close_xrcd {
};
struct ib_uverbs_reg_mr {
- __u64 response;
- __u64 start;
- __u64 length;
- __u64 hca_va;
+ __aligned_u64 response;
+ __aligned_u64 start;
+ __aligned_u64 length;
+ __aligned_u64 hca_va;
__u32 pd_handle;
__u32 access_flags;
- __u64 driver_data[0];
+ __aligned_u64 driver_data[0];
};
struct ib_uverbs_reg_mr_resp {
@@ -349,12 +347,12 @@ struct ib_uverbs_reg_mr_resp {
};
struct ib_uverbs_rereg_mr {
- __u64 response;
+ __aligned_u64 response;
__u32 mr_handle;
__u32 flags;
- __u64 start;
- __u64 length;
- __u64 hca_va;
+ __aligned_u64 start;
+ __aligned_u64 length;
+ __aligned_u64 hca_va;
__u32 pd_handle;
__u32 access_flags;
};
@@ -369,7 +367,7 @@ struct ib_uverbs_dereg_mr {
};
struct ib_uverbs_alloc_mw {
- __u64 response;
+ __aligned_u64 response;
__u32 pd_handle;
__u8 mw_type;
__u8 reserved[3];
@@ -385,7 +383,7 @@ struct ib_uverbs_dealloc_mw {
};
struct ib_uverbs_create_comp_channel {
- __u64 response;
+ __aligned_u64 response;
};
struct ib_uverbs_create_comp_channel_resp {
@@ -393,13 +391,13 @@ struct ib_uverbs_create_comp_channel_resp {
};
struct ib_uverbs_create_cq {
- __u64 response;
- __u64 user_handle;
+ __aligned_u64 response;
+ __aligned_u64 user_handle;
__u32 cqe;
__u32 comp_vector;
__s32 comp_channel;
__u32 reserved;
- __u64 driver_data[0];
+ __aligned_u64 driver_data[0];
};
enum ib_uverbs_ex_create_cq_flags {
@@ -408,7 +406,7 @@ enum ib_uverbs_ex_create_cq_flags {
};
struct ib_uverbs_ex_create_cq {
- __u64 user_handle;
+ __aligned_u64 user_handle;
__u32 cqe;
__u32 comp_vector;
__s32 comp_channel;
@@ -429,26 +427,26 @@ struct ib_uverbs_ex_create_cq_resp {
};
struct ib_uverbs_resize_cq {
- __u64 response;
+ __aligned_u64 response;
__u32 cq_handle;
__u32 cqe;
- __u64 driver_data[0];
+ __aligned_u64 driver_data[0];
};
struct ib_uverbs_resize_cq_resp {
__u32 cqe;
__u32 reserved;
- __u64 driver_data[0];
+ __aligned_u64 driver_data[0];
};
struct ib_uverbs_poll_cq {
- __u64 response;
+ __aligned_u64 response;
__u32 cq_handle;
__u32 ne;
};
struct ib_uverbs_wc {
- __u64 wr_id;
+ __aligned_u64 wr_id;
__u32 status;
__u32 opcode;
__u32 vendor_err;
@@ -480,7 +478,7 @@ struct ib_uverbs_req_notify_cq {
};
struct ib_uverbs_destroy_cq {
- __u64 response;
+ __aligned_u64 response;
__u32 cq_handle;
__u32 reserved;
};
@@ -549,8 +547,8 @@ struct ib_uverbs_qp_attr {
};
struct ib_uverbs_create_qp {
- __u64 response;
- __u64 user_handle;
+ __aligned_u64 response;
+ __aligned_u64 user_handle;
__u32 pd_handle;
__u32 send_cq_handle;
__u32 recv_cq_handle;
@@ -564,7 +562,7 @@ struct ib_uverbs_create_qp {
__u8 qp_type;
__u8 is_srq;
__u8 reserved;
- __u64 driver_data[0];
+ __aligned_u64 driver_data[0];
};
enum ib_uverbs_create_qp_mask {
@@ -590,7 +588,7 @@ enum {
};
struct ib_uverbs_ex_create_qp {
- __u64 user_handle;
+ __aligned_u64 user_handle;
__u32 pd_handle;
__u32 send_cq_handle;
__u32 recv_cq_handle;
@@ -611,13 +609,13 @@ struct ib_uverbs_ex_create_qp {
};
struct ib_uverbs_open_qp {
- __u64 response;
- __u64 user_handle;
+ __aligned_u64 response;
+ __aligned_u64 user_handle;
__u32 pd_handle;
__u32 qpn;
__u8 qp_type;
__u8 reserved[7];
- __u64 driver_data[0];
+ __aligned_u64 driver_data[0];
};
/* also used for open response */
@@ -658,10 +656,10 @@ struct ib_uverbs_qp_dest {
};
struct ib_uverbs_query_qp {
- __u64 response;
+ __aligned_u64 response;
__u32 qp_handle;
__u32 attr_mask;
- __u64 driver_data[0];
+ __aligned_u64 driver_data[0];
};
struct ib_uverbs_query_qp_resp {
@@ -695,7 +693,7 @@ struct ib_uverbs_query_qp_resp {
__u8 alt_timeout;
__u8 sq_sig_all;
__u8 reserved[5];
- __u64 driver_data[0];
+ __aligned_u64 driver_data[0];
};
struct ib_uverbs_modify_qp {
@@ -725,7 +723,7 @@ struct ib_uverbs_modify_qp {
__u8 alt_port_num;
__u8 alt_timeout;
__u8 reserved[2];
- __u64 driver_data[0];
+ __aligned_u64 driver_data[0];
};
struct ib_uverbs_ex_modify_qp {
@@ -743,7 +741,7 @@ struct ib_uverbs_ex_modify_qp_resp {
};
struct ib_uverbs_destroy_qp {
- __u64 response;
+ __aligned_u64 response;
__u32 qp_handle;
__u32 reserved;
};
@@ -759,13 +757,13 @@ struct ib_uverbs_destroy_qp_resp {
* document the ABI.
*/
struct ib_uverbs_sge {
- __u64 addr;
+ __aligned_u64 addr;
__u32 length;
__u32 lkey;
};
struct ib_uverbs_send_wr {
- __u64 wr_id;
+ __aligned_u64 wr_id;
__u32 num_sge;
__u32 opcode;
__u32 send_flags;
@@ -775,14 +773,14 @@ struct ib_uverbs_send_wr {
} ex;
union {
struct {
- __u64 remote_addr;
+ __aligned_u64 remote_addr;
__u32 rkey;
__u32 reserved;
} rdma;
struct {
- __u64 remote_addr;
- __u64 compare_add;
- __u64 swap;
+ __aligned_u64 remote_addr;
+ __aligned_u64 compare_add;
+ __aligned_u64 swap;
__u32 rkey;
__u32 reserved;
} atomic;
@@ -796,7 +794,7 @@ struct ib_uverbs_send_wr {
};
struct ib_uverbs_post_send {
- __u64 response;
+ __aligned_u64 response;
__u32 qp_handle;
__u32 wr_count;
__u32 sge_count;
@@ -809,13 +807,13 @@ struct ib_uverbs_post_send_resp {
};
struct ib_uverbs_recv_wr {
- __u64 wr_id;
+ __aligned_u64 wr_id;
__u32 num_sge;
__u32 reserved;
};
struct ib_uverbs_post_recv {
- __u64 response;
+ __aligned_u64 response;
__u32 qp_handle;
__u32 wr_count;
__u32 sge_count;
@@ -828,7 +826,7 @@ struct ib_uverbs_post_recv_resp {
};
struct ib_uverbs_post_srq_recv {
- __u64 response;
+ __aligned_u64 response;
__u32 srq_handle;
__u32 wr_count;
__u32 sge_count;
@@ -841,8 +839,8 @@ struct ib_uverbs_post_srq_recv_resp {
};
struct ib_uverbs_create_ah {
- __u64 response;
- __u64 user_handle;
+ __aligned_u64 response;
+ __aligned_u64 user_handle;
__u32 pd_handle;
__u32 reserved;
struct ib_uverbs_ah_attr attr;
@@ -861,7 +859,7 @@ struct ib_uverbs_attach_mcast {
__u32 qp_handle;
__u16 mlid;
__u16 reserved;
- __u64 driver_data[0];
+ __aligned_u64 driver_data[0];
};
struct ib_uverbs_detach_mcast {
@@ -869,7 +867,7 @@ struct ib_uverbs_detach_mcast {
__u32 qp_handle;
__u16 mlid;
__u16 reserved;
- __u64 driver_data[0];
+ __aligned_u64 driver_data[0];
};
struct ib_uverbs_flow_spec_hdr {
@@ -877,7 +875,7 @@ struct ib_uverbs_flow_spec_hdr {
__u16 size;
__u16 reserved;
/* followed by flow_spec */
- __u64 flow_spec_data[0];
+ __aligned_u64 flow_spec_data[0];
};
struct ib_uverbs_flow_eth_filter {
@@ -987,6 +985,19 @@ struct ib_uverbs_flow_spec_action_drop {
};
};
+struct ib_uverbs_flow_spec_action_handle {
+ union {
+ struct ib_uverbs_flow_spec_hdr hdr;
+ struct {
+ __u32 type;
+ __u16 size;
+ __u16 reserved;
+ };
+ };
+ __u32 handle;
+ __u32 reserved1;
+};
+
struct ib_uverbs_flow_tunnel_filter {
__be32 tunnel_id;
};
@@ -1004,6 +1015,24 @@ struct ib_uverbs_flow_spec_tunnel {
struct ib_uverbs_flow_tunnel_filter mask;
};
+struct ib_uverbs_flow_spec_esp_filter {
+ __u32 spi;
+ __u32 seq;
+};
+
+struct ib_uverbs_flow_spec_esp {
+ union {
+ struct ib_uverbs_flow_spec_hdr hdr;
+ struct {
+ __u32 type;
+ __u16 size;
+ __u16 reserved;
+ };
+ };
+ struct ib_uverbs_flow_spec_esp_filter val;
+ struct ib_uverbs_flow_spec_esp_filter mask;
+};
+
struct ib_uverbs_flow_attr {
__u32 type;
__u16 size;
@@ -1036,18 +1065,18 @@ struct ib_uverbs_destroy_flow {
};
struct ib_uverbs_create_srq {
- __u64 response;
- __u64 user_handle;
+ __aligned_u64 response;
+ __aligned_u64 user_handle;
__u32 pd_handle;
__u32 max_wr;
__u32 max_sge;
__u32 srq_limit;
- __u64 driver_data[0];
+ __aligned_u64 driver_data[0];
};
struct ib_uverbs_create_xsrq {
- __u64 response;
- __u64 user_handle;
+ __aligned_u64 response;
+ __aligned_u64 user_handle;
__u32 srq_type;
__u32 pd_handle;
__u32 max_wr;
@@ -1056,7 +1085,7 @@ struct ib_uverbs_create_xsrq {
__u32 max_num_tags;
__u32 xrcd_handle;
__u32 cq_handle;
- __u64 driver_data[0];
+ __aligned_u64 driver_data[0];
};
struct ib_uverbs_create_srq_resp {
@@ -1071,14 +1100,14 @@ struct ib_uverbs_modify_srq {
__u32 attr_mask;
__u32 max_wr;
__u32 srq_limit;
- __u64 driver_data[0];
+ __aligned_u64 driver_data[0];
};
struct ib_uverbs_query_srq {
- __u64 response;
+ __aligned_u64 response;
__u32 srq_handle;
__u32 reserved;
- __u64 driver_data[0];
+ __aligned_u64 driver_data[0];
};
struct ib_uverbs_query_srq_resp {
@@ -1089,7 +1118,7 @@ struct ib_uverbs_query_srq_resp {
};
struct ib_uverbs_destroy_srq {
- __u64 response;
+ __aligned_u64 response;
__u32 srq_handle;
__u32 reserved;
};
@@ -1101,7 +1130,7 @@ struct ib_uverbs_destroy_srq_resp {
struct ib_uverbs_ex_create_wq {
__u32 comp_mask;
__u32 wq_type;
- __u64 user_handle;
+ __aligned_u64 user_handle;
__u32 pd_handle;
__u32 cq_handle;
__u32 max_wr;
diff --git a/include/uapi/rdma/mlx4-abi.h b/include/uapi/rdma/mlx4-abi.h
index 7f9c37346613..04f64bc4045f 100644
--- a/include/uapi/rdma/mlx4-abi.h
+++ b/include/uapi/rdma/mlx4-abi.h
@@ -59,6 +59,10 @@ struct mlx4_ib_alloc_ucontext_resp_v3 {
__u16 bf_regs_per_page;
};
+enum {
+ MLX4_USER_DEV_CAP_LARGE_CQE = 1L << 0,
+};
+
struct mlx4_ib_alloc_ucontext_resp {
__u32 dev_caps;
__u32 qp_tab_size;
@@ -73,8 +77,8 @@ struct mlx4_ib_alloc_pd_resp {
};
struct mlx4_ib_create_cq {
- __u64 buf_addr;
- __u64 db_addr;
+ __aligned_u64 buf_addr;
+ __aligned_u64 db_addr;
};
struct mlx4_ib_create_cq_resp {
@@ -83,12 +87,12 @@ struct mlx4_ib_create_cq_resp {
};
struct mlx4_ib_resize_cq {
- __u64 buf_addr;
+ __aligned_u64 buf_addr;
};
struct mlx4_ib_create_srq {
- __u64 buf_addr;
- __u64 db_addr;
+ __aligned_u64 buf_addr;
+ __aligned_u64 db_addr;
};
struct mlx4_ib_create_srq_resp {
@@ -97,7 +101,7 @@ struct mlx4_ib_create_srq_resp {
};
struct mlx4_ib_create_qp_rss {
- __u64 rx_hash_fields_mask; /* Use enum mlx4_ib_rx_hash_fields */
+ __aligned_u64 rx_hash_fields_mask; /* Use enum mlx4_ib_rx_hash_fields */
__u8 rx_hash_function; /* Use enum mlx4_ib_rx_hash_function_flags */
__u8 reserved[7];
__u8 rx_hash_key[40];
@@ -106,8 +110,8 @@ struct mlx4_ib_create_qp_rss {
};
struct mlx4_ib_create_qp {
- __u64 buf_addr;
- __u64 db_addr;
+ __aligned_u64 buf_addr;
+ __aligned_u64 db_addr;
__u8 log_sq_bb_count;
__u8 log_sq_stride;
__u8 sq_no_prefetch;
@@ -116,8 +120,8 @@ struct mlx4_ib_create_qp {
};
struct mlx4_ib_create_wq {
- __u64 buf_addr;
- __u64 db_addr;
+ __aligned_u64 buf_addr;
+ __aligned_u64 db_addr;
__u8 log_range_size;
__u8 reserved[3];
__u32 comp_mask;
@@ -156,4 +160,32 @@ enum mlx4_ib_rx_hash_fields {
MLX4_IB_RX_HASH_INNER = 1ULL << 31,
};
+struct mlx4_ib_rss_caps {
+ __aligned_u64 rx_hash_fields_mask; /* enum mlx4_ib_rx_hash_fields */
+ __u8 rx_hash_function; /* enum mlx4_ib_rx_hash_function_flags */
+ __u8 reserved[7];
+};
+
+enum query_device_resp_mask {
+ MLX4_IB_QUERY_DEV_RESP_MASK_CORE_CLOCK_OFFSET = 1UL << 0,
+};
+
+struct mlx4_ib_tso_caps {
+ __u32 max_tso; /* Maximum tso payload size in bytes */
+ /* Corresponding bit will be set if qp type from
+ * 'enum ib_qp_type' is supported.
+ */
+ __u32 supported_qpts;
+};
+
+struct mlx4_uverbs_ex_query_device_resp {
+ __u32 comp_mask;
+ __u32 response_length;
+ __aligned_u64 hca_core_clock_offset;
+ __u32 max_inl_recv_sz;
+ __u32 reserved;
+ struct mlx4_ib_rss_caps rss_caps;
+ struct mlx4_ib_tso_caps tso_caps;
+};
+
#endif /* MLX4_ABI_USER_H */
diff --git a/include/uapi/rdma/mlx5-abi.h b/include/uapi/rdma/mlx5-abi.h
index 1111aa4e7c1e..cb4a02c4a1ce 100644
--- a/include/uapi/rdma/mlx5-abi.h
+++ b/include/uapi/rdma/mlx5-abi.h
@@ -84,7 +84,7 @@ struct mlx5_ib_alloc_ucontext_req_v2 {
__u8 reserved0;
__u16 reserved1;
__u32 reserved2;
- __u64 lib_caps;
+ __aligned_u64 lib_caps;
};
enum mlx5_ib_alloc_ucontext_resp_mask {
@@ -107,6 +107,14 @@ enum mlx5_user_inline_mode {
MLX5_USER_INLINE_MODE_TCP_UDP,
};
+enum {
+ MLX5_USER_ALLOC_UCONTEXT_FLOW_ACTION_FLAGS_ESP_AES_GCM = 1 << 0,
+ MLX5_USER_ALLOC_UCONTEXT_FLOW_ACTION_FLAGS_ESP_AES_GCM_REQ_METADATA = 1 << 1,
+ MLX5_USER_ALLOC_UCONTEXT_FLOW_ACTION_FLAGS_ESP_AES_GCM_SPI_STEERING = 1 << 2,
+ MLX5_USER_ALLOC_UCONTEXT_FLOW_ACTION_FLAGS_ESP_AES_GCM_FULL_OFFLOAD = 1 << 3,
+ MLX5_USER_ALLOC_UCONTEXT_FLOW_ACTION_FLAGS_ESP_AES_GCM_TX_IV_IS_ESN = 1 << 4,
+};
+
struct mlx5_ib_alloc_ucontext_resp {
__u32 qp_tab_size;
__u32 bf_reg_size;
@@ -118,14 +126,14 @@ struct mlx5_ib_alloc_ucontext_resp {
__u32 max_recv_wr;
__u32 max_srq_recv_wr;
__u16 num_ports;
- __u16 reserved1;
+ __u16 flow_action_flags;
__u32 comp_mask;
__u32 response_length;
__u8 cqe_version;
__u8 cmds_supp_uhw;
__u8 eth_min_inline;
__u8 clock_info_versions;
- __u64 hca_core_clock_offset;
+ __aligned_u64 hca_core_clock_offset;
__u32 log_uar_size;
__u32 num_uars_per_page;
__u32 num_dyn_bfregs;
@@ -147,7 +155,7 @@ struct mlx5_ib_tso_caps {
};
struct mlx5_ib_rss_caps {
- __u64 rx_hash_fields_mask; /* enum mlx5_rx_hash_fields */
+ __aligned_u64 rx_hash_fields_mask; /* enum mlx5_rx_hash_fields */
__u8 rx_hash_function; /* enum mlx5_rx_hash_function_flags */
__u8 reserved[7];
};
@@ -163,6 +171,10 @@ struct mlx5_ib_cqe_comp_caps {
__u32 supported_format; /* enum mlx5_ib_cqe_comp_res_format */
};
+enum mlx5_ib_packet_pacing_cap_flags {
+ MLX5_IB_PP_SUPPORT_BURST = 1 << 0,
+};
+
struct mlx5_packet_pacing_caps {
__u32 qp_rate_limit_min;
__u32 qp_rate_limit_max; /* In kpbs */
@@ -172,7 +184,8 @@ struct mlx5_packet_pacing_caps {
* supported_qpts |= 1 << IB_QPT_RAW_PACKET
*/
__u32 supported_qpts;
- __u32 reserved;
+ __u8 cap_flags; /* enum mlx5_ib_packet_pacing_cap_flags */
+ __u8 reserved[3];
};
enum mlx5_ib_mpw_caps {
@@ -243,8 +256,8 @@ enum mlx5_ib_create_cq_flags {
};
struct mlx5_ib_create_cq {
- __u64 buf_addr;
- __u64 db_addr;
+ __aligned_u64 buf_addr;
+ __aligned_u64 db_addr;
__u32 cqe_size;
__u8 cqe_comp_en;
__u8 cqe_comp_res_format;
@@ -257,15 +270,15 @@ struct mlx5_ib_create_cq_resp {
};
struct mlx5_ib_resize_cq {
- __u64 buf_addr;
+ __aligned_u64 buf_addr;
__u16 cqe_size;
__u16 reserved0;
__u32 reserved1;
};
struct mlx5_ib_create_srq {
- __u64 buf_addr;
- __u64 db_addr;
+ __aligned_u64 buf_addr;
+ __aligned_u64 db_addr;
__u32 flags;
__u32 reserved0; /* explicit padding (optional on i386) */
__u32 uidx;
@@ -278,8 +291,8 @@ struct mlx5_ib_create_srq_resp {
};
struct mlx5_ib_create_qp {
- __u64 buf_addr;
- __u64 db_addr;
+ __aligned_u64 buf_addr;
+ __aligned_u64 db_addr;
__u32 sq_wqe_count;
__u32 rq_wqe_count;
__u32 rq_wqe_shift;
@@ -287,8 +300,8 @@ struct mlx5_ib_create_qp {
__u32 uidx;
__u32 bfreg_index;
union {
- __u64 sq_buf_addr;
- __u64 access_key;
+ __aligned_u64 sq_buf_addr;
+ __aligned_u64 access_key;
};
};
@@ -314,12 +327,13 @@ enum mlx5_rx_hash_fields {
MLX5_RX_HASH_DST_PORT_TCP = 1 << 5,
MLX5_RX_HASH_SRC_PORT_UDP = 1 << 6,
MLX5_RX_HASH_DST_PORT_UDP = 1 << 7,
+ MLX5_RX_HASH_IPSEC_SPI = 1 << 8,
/* Save bits for future fields */
MLX5_RX_HASH_INNER = (1UL << 31),
};
struct mlx5_ib_create_qp_rss {
- __u64 rx_hash_fields_mask; /* enum mlx5_rx_hash_fields */
+ __aligned_u64 rx_hash_fields_mask; /* enum mlx5_rx_hash_fields */
__u8 rx_hash_function; /* enum mlx5_rx_hash_function_flags */
__u8 rx_key_len; /* valid only for Toeplitz */
__u8 reserved[6];
@@ -330,6 +344,7 @@ struct mlx5_ib_create_qp_rss {
struct mlx5_ib_create_qp_resp {
__u32 bfreg_index;
+ __u32 reserved;
};
struct mlx5_ib_alloc_mw {
@@ -344,8 +359,8 @@ enum mlx5_ib_create_wq_mask {
};
struct mlx5_ib_create_wq {
- __u64 buf_addr;
- __u64 db_addr;
+ __aligned_u64 buf_addr;
+ __aligned_u64 db_addr;
__u32 rq_wqe_count;
__u32 rq_wqe_shift;
__u32 user_index;
@@ -362,6 +377,18 @@ struct mlx5_ib_create_ah_resp {
__u8 reserved[6];
};
+struct mlx5_ib_burst_info {
+ __u32 max_burst_sz;
+ __u16 typical_pkt_sz;
+ __u16 reserved;
+};
+
+struct mlx5_ib_modify_qp {
+ __u32 comp_mask;
+ struct mlx5_ib_burst_info burst_info;
+ __u32 reserved;
+};
+
struct mlx5_ib_modify_qp_resp {
__u32 response_length;
__u32 dctn;
@@ -385,13 +412,13 @@ struct mlx5_ib_modify_wq {
struct mlx5_ib_clock_info {
__u32 sign;
__u32 resv;
- __u64 nsec;
- __u64 cycles;
- __u64 frac;
+ __aligned_u64 nsec;
+ __aligned_u64 cycles;
+ __aligned_u64 frac;
__u32 mult;
__u32 shift;
- __u64 mask;
- __u64 overflow_period;
+ __aligned_u64 mask;
+ __aligned_u64 overflow_period;
};
enum mlx5_ib_mmap_cmd {
@@ -403,6 +430,7 @@ enum mlx5_ib_mmap_cmd {
MLX5_IB_MMAP_CORE_CLOCK = 5,
MLX5_IB_MMAP_ALLOC_WC = 6,
MLX5_IB_MMAP_CLOCK_INFO = 7,
+ MLX5_IB_MMAP_DEVICE_MEM = 8,
};
enum {
diff --git a/include/uapi/rdma/mlx5_user_ioctl_cmds.h b/include/uapi/rdma/mlx5_user_ioctl_cmds.h
new file mode 100644
index 000000000000..f7d685ef2d1f
--- /dev/null
+++ b/include/uapi/rdma/mlx5_user_ioctl_cmds.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright (c) 2018, Mellanox Technologies inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef MLX5_USER_IOCTL_CMDS_H
+#define MLX5_USER_IOCTL_CMDS_H
+
+#include <rdma/ib_user_ioctl_cmds.h>
+
+enum mlx5_ib_create_flow_action_attrs {
+ /* This attribute belong to the driver namespace */
+ MLX5_IB_ATTR_CREATE_FLOW_ACTION_FLAGS = (1U << UVERBS_ID_NS_SHIFT),
+};
+
+enum mlx5_ib_alloc_dm_attrs {
+ MLX5_IB_ATTR_ALLOC_DM_RESP_START_OFFSET = (1U << UVERBS_ID_NS_SHIFT),
+ MLX5_IB_ATTR_ALLOC_DM_RESP_PAGE_INDEX,
+};
+
+#endif
diff --git a/include/uapi/rdma/mlx5_user_ioctl_verbs.h b/include/uapi/rdma/mlx5_user_ioctl_verbs.h
new file mode 100644
index 000000000000..8a2fb33f3ed4
--- /dev/null
+++ b/include/uapi/rdma/mlx5_user_ioctl_verbs.h
@@ -0,0 +1,43 @@
+/*
+ * Copyright (c) 2018, Mellanox Technologies inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef MLX5_USER_IOCTL_VERBS_H
+#define MLX5_USER_IOCTL_VERBS_H
+
+#include <linux/types.h>
+
+enum mlx5_ib_uapi_flow_action_flags {
+ MLX5_IB_UAPI_FLOW_ACTION_FLAGS_REQUIRE_METADATA = 1 << 0,
+};
+
+#endif
+
diff --git a/include/uapi/rdma/mthca-abi.h b/include/uapi/rdma/mthca-abi.h
index 3020d8a907a7..ac756cd9e807 100644
--- a/include/uapi/rdma/mthca-abi.h
+++ b/include/uapi/rdma/mthca-abi.h
@@ -74,8 +74,8 @@ struct mthca_reg_mr {
struct mthca_create_cq {
__u32 lkey;
__u32 pdn;
- __u64 arm_db_page;
- __u64 set_db_page;
+ __aligned_u64 arm_db_page;
+ __aligned_u64 set_db_page;
__u32 arm_db_index;
__u32 set_db_index;
};
@@ -93,7 +93,7 @@ struct mthca_resize_cq {
struct mthca_create_srq {
__u32 lkey;
__u32 db_index;
- __u64 db_page;
+ __aligned_u64 db_page;
};
struct mthca_create_srq_resp {
@@ -104,8 +104,8 @@ struct mthca_create_srq_resp {
struct mthca_create_qp {
__u32 lkey;
__u32 reserved;
- __u64 sq_db_page;
- __u64 rq_db_page;
+ __aligned_u64 sq_db_page;
+ __aligned_u64 rq_db_page;
__u32 sq_db_index;
__u32 rq_db_index;
};
diff --git a/include/uapi/rdma/nes-abi.h b/include/uapi/rdma/nes-abi.h
index f5b2437aab28..35bfd4015d07 100644
--- a/include/uapi/rdma/nes-abi.h
+++ b/include/uapi/rdma/nes-abi.h
@@ -72,14 +72,14 @@ struct nes_alloc_pd_resp {
};
struct nes_create_cq_req {
- __u64 user_cq_buffer;
+ __aligned_u64 user_cq_buffer;
__u32 mcrqf;
__u8 reserved[4];
};
struct nes_create_qp_req {
- __u64 user_wqe_buffers;
- __u64 user_qp_buffer;
+ __aligned_u64 user_wqe_buffers;
+ __aligned_u64 user_qp_buffer;
};
enum iwnes_memreg_type {
diff --git a/include/uapi/rdma/ocrdma-abi.h b/include/uapi/rdma/ocrdma-abi.h
index ad64a3cea1cd..284d47b41f6e 100644
--- a/include/uapi/rdma/ocrdma-abi.h
+++ b/include/uapi/rdma/ocrdma-abi.h
@@ -55,17 +55,17 @@ struct ocrdma_alloc_ucontext_resp {
__u32 wqe_size;
__u32 max_inline_data;
__u32 dpp_wqe_size;
- __u64 ah_tbl_page;
+ __aligned_u64 ah_tbl_page;
__u32 ah_tbl_len;
__u32 rqe_size;
__u8 fw_ver[32];
/* for future use/new features in progress */
- __u64 rsvd1;
- __u64 rsvd2;
+ __aligned_u64 rsvd1;
+ __aligned_u64 rsvd2;
};
struct ocrdma_alloc_pd_ureq {
- __u64 rsvd1;
+ __u32 rsvd[2];
};
struct ocrdma_alloc_pd_uresp {
@@ -73,7 +73,7 @@ struct ocrdma_alloc_pd_uresp {
__u32 dpp_enabled;
__u32 dpp_page_addr_hi;
__u32 dpp_page_addr_lo;
- __u64 rsvd1;
+ __u32 rsvd[2];
};
struct ocrdma_create_cq_ureq {
@@ -87,13 +87,13 @@ struct ocrdma_create_cq_uresp {
__u32 page_size;
__u32 num_pages;
__u32 max_hw_cqe;
- __u64 page_addr[MAX_CQ_PAGES];
- __u64 db_page_addr;
+ __aligned_u64 page_addr[MAX_CQ_PAGES];
+ __aligned_u64 db_page_addr;
__u32 db_page_size;
__u32 phase_change;
/* for future use/new features in progress */
- __u64 rsvd1;
- __u64 rsvd2;
+ __aligned_u64 rsvd1;
+ __aligned_u64 rsvd2;
};
#define MAX_QP_PAGES 8
@@ -115,9 +115,9 @@ struct ocrdma_create_qp_uresp {
__u32 rq_page_size;
__u32 num_sq_pages;
__u32 num_rq_pages;
- __u64 sq_page_addr[MAX_QP_PAGES];
- __u64 rq_page_addr[MAX_QP_PAGES];
- __u64 db_page_addr;
+ __aligned_u64 sq_page_addr[MAX_QP_PAGES];
+ __aligned_u64 rq_page_addr[MAX_QP_PAGES];
+ __aligned_u64 db_page_addr;
__u32 db_page_size;
__u32 dpp_credit;
__u32 dpp_offset;
@@ -126,8 +126,8 @@ struct ocrdma_create_qp_uresp {
__u32 db_sq_offset;
__u32 db_rq_offset;
__u32 db_shift;
- __u64 rsvd[11];
-} __packed;
+ __aligned_u64 rsvd[11];
+};
struct ocrdma_create_srq_uresp {
__u16 rq_dbid;
@@ -137,16 +137,16 @@ struct ocrdma_create_srq_uresp {
__u32 rq_page_size;
__u32 num_rq_pages;
- __u64 rq_page_addr[MAX_QP_PAGES];
- __u64 db_page_addr;
+ __aligned_u64 rq_page_addr[MAX_QP_PAGES];
+ __aligned_u64 db_page_addr;
__u32 db_page_size;
__u32 num_rqe_allocated;
__u32 db_rq_offset;
__u32 db_shift;
- __u64 rsvd2;
- __u64 rsvd3;
+ __aligned_u64 rsvd2;
+ __aligned_u64 rsvd3;
};
#endif /* OCRDMA_ABI_USER_H */
diff --git a/include/uapi/rdma/qedr-abi.h b/include/uapi/rdma/qedr-abi.h
index 261c6db4623e..8ba098900e9a 100644
--- a/include/uapi/rdma/qedr-abi.h
+++ b/include/uapi/rdma/qedr-abi.h
@@ -40,7 +40,7 @@
/* user kernel communication data structures. */
struct qedr_alloc_ucontext_resp {
- __u64 db_pa;
+ __aligned_u64 db_pa;
__u32 db_size;
__u32 max_send_wr;
@@ -53,24 +53,27 @@ struct qedr_alloc_ucontext_resp {
__u8 dpm_enabled;
__u8 wids_enabled;
__u16 wid_count;
+ __u32 reserved;
};
struct qedr_alloc_pd_ureq {
- __u64 rsvd1;
+ __aligned_u64 rsvd1;
};
struct qedr_alloc_pd_uresp {
__u32 pd_id;
+ __u32 reserved;
};
struct qedr_create_cq_ureq {
- __u64 addr;
- __u64 len;
+ __aligned_u64 addr;
+ __aligned_u64 len;
};
struct qedr_create_cq_uresp {
__u32 db_offset;
__u16 icid;
+ __u16 reserved;
};
struct qedr_create_qp_ureq {
@@ -79,17 +82,17 @@ struct qedr_create_qp_ureq {
/* SQ */
/* user space virtual address of SQ buffer */
- __u64 sq_addr;
+ __aligned_u64 sq_addr;
/* length of SQ buffer */
- __u64 sq_len;
+ __aligned_u64 sq_len;
/* RQ */
/* user space virtual address of RQ buffer */
- __u64 rq_addr;
+ __aligned_u64 rq_addr;
/* length of RQ buffer */
- __u64 rq_len;
+ __aligned_u64 rq_len;
};
struct qedr_create_qp_uresp {
@@ -105,6 +108,7 @@ struct qedr_create_qp_uresp {
__u16 rq_icid;
__u32 rq_db2_offset;
+ __u32 reserved;
};
#endif /* __QEDR_USER_H__ */
diff --git a/include/uapi/rdma/rdma_netlink.h b/include/uapi/rdma/rdma_netlink.h
index 4c77e2a7b07e..0ce0943fc808 100644
--- a/include/uapi/rdma/rdma_netlink.h
+++ b/include/uapi/rdma/rdma_netlink.h
@@ -238,6 +238,14 @@ enum rdma_nldev_command {
RDMA_NLDEV_CMD_RES_QP_GET, /* can dump */
+ RDMA_NLDEV_CMD_RES_CM_ID_GET, /* can dump */
+
+ RDMA_NLDEV_CMD_RES_CQ_GET, /* can dump */
+
+ RDMA_NLDEV_CMD_RES_MR_GET, /* can dump */
+
+ RDMA_NLDEV_CMD_RES_PD_GET, /* can dump */
+
RDMA_NLDEV_NUM_OPS
};
@@ -350,6 +358,49 @@ enum rdma_nldev_attr {
*/
RDMA_NLDEV_ATTR_RES_KERN_NAME, /* string */
+ RDMA_NLDEV_ATTR_RES_CM_ID, /* nested table */
+ RDMA_NLDEV_ATTR_RES_CM_ID_ENTRY, /* nested table */
+ /*
+ * rdma_cm_id port space.
+ */
+ RDMA_NLDEV_ATTR_RES_PS, /* u32 */
+ /*
+ * Source and destination socket addresses
+ */
+ RDMA_NLDEV_ATTR_RES_SRC_ADDR, /* __kernel_sockaddr_storage */
+ RDMA_NLDEV_ATTR_RES_DST_ADDR, /* __kernel_sockaddr_storage */
+
+ RDMA_NLDEV_ATTR_RES_CQ, /* nested table */
+ RDMA_NLDEV_ATTR_RES_CQ_ENTRY, /* nested table */
+ RDMA_NLDEV_ATTR_RES_CQE, /* u32 */
+ RDMA_NLDEV_ATTR_RES_USECNT, /* u64 */
+ RDMA_NLDEV_ATTR_RES_POLL_CTX, /* u8 */
+
+ RDMA_NLDEV_ATTR_RES_MR, /* nested table */
+ RDMA_NLDEV_ATTR_RES_MR_ENTRY, /* nested table */
+ RDMA_NLDEV_ATTR_RES_RKEY, /* u32 */
+ RDMA_NLDEV_ATTR_RES_LKEY, /* u32 */
+ RDMA_NLDEV_ATTR_RES_IOVA, /* u64 */
+ RDMA_NLDEV_ATTR_RES_MRLEN, /* u64 */
+
+ RDMA_NLDEV_ATTR_RES_PD, /* nested table */
+ RDMA_NLDEV_ATTR_RES_PD_ENTRY, /* nested table */
+ RDMA_NLDEV_ATTR_RES_LOCAL_DMA_LKEY, /* u32 */
+ RDMA_NLDEV_ATTR_RES_UNSAFE_GLOBAL_RKEY, /* u32 */
+
+ /*
+ * Provides logical name and index of netdevice which is
+ * connected to physical port. This information is relevant
+ * for RoCE and iWARP.
+ *
+ * The netdevices which are associated with containers are
+ * supposed to be exported together with GID table once it
+ * will be exposed through the netlink. Because the
+ * associated netdevices are properties of GIDs.
+ */
+ RDMA_NLDEV_ATTR_NDEV_INDEX, /* u32 */
+ RDMA_NLDEV_ATTR_NDEV_NAME, /* string */
+
RDMA_NLDEV_ATTR_MAX
};
#endif /* _UAPI_RDMA_NETLINK_H */
diff --git a/include/uapi/rdma/rdma_user_cm.h b/include/uapi/rdma/rdma_user_cm.h
index c83ef0026079..e1269024af47 100644
--- a/include/uapi/rdma/rdma_user_cm.h
+++ b/include/uapi/rdma/rdma_user_cm.h
@@ -70,6 +70,14 @@ enum {
RDMA_USER_CM_CMD_JOIN_MCAST
};
+/* See IBTA Annex A11, servies ID bytes 4 & 5 */
+enum rdma_ucm_port_space {
+ RDMA_PS_IPOIB = 0x0002,
+ RDMA_PS_IB = 0x013F,
+ RDMA_PS_TCP = 0x0106,
+ RDMA_PS_UDP = 0x0111,
+};
+
/*
* command ABI structures.
*/
@@ -80,9 +88,9 @@ struct rdma_ucm_cmd_hdr {
};
struct rdma_ucm_create_id {
- __u64 uid;
- __u64 response;
- __u16 ps;
+ __aligned_u64 uid;
+ __aligned_u64 response;
+ __u16 ps; /* use enum rdma_ucm_port_space */
__u8 qp_type;
__u8 reserved[5];
};
@@ -92,7 +100,7 @@ struct rdma_ucm_create_id_resp {
};
struct rdma_ucm_destroy_id {
- __u64 response;
+ __aligned_u64 response;
__u32 id;
__u32 reserved;
};
@@ -102,7 +110,7 @@ struct rdma_ucm_destroy_id_resp {
};
struct rdma_ucm_bind_ip {
- __u64 response;
+ __aligned_u64 response;
struct sockaddr_in6 addr;
__u32 id;
};
@@ -143,13 +151,13 @@ enum {
};
struct rdma_ucm_query {
- __u64 response;
+ __aligned_u64 response;
__u32 id;
__u32 option;
};
struct rdma_ucm_query_route_resp {
- __u64 node_guid;
+ __aligned_u64 node_guid;
struct ib_user_path_rec ib_route[2];
struct sockaddr_in6 src_addr;
struct sockaddr_in6 dst_addr;
@@ -159,7 +167,7 @@ struct rdma_ucm_query_route_resp {
};
struct rdma_ucm_query_addr_resp {
- __u64 node_guid;
+ __aligned_u64 node_guid;
__u8 port_num;
__u8 reserved;
__u16 pkey;
@@ -210,7 +218,7 @@ struct rdma_ucm_listen {
};
struct rdma_ucm_accept {
- __u64 uid;
+ __aligned_u64 uid;
struct rdma_ucm_conn_param conn_param;
__u32 id;
__u32 reserved;
@@ -228,7 +236,7 @@ struct rdma_ucm_disconnect {
};
struct rdma_ucm_init_qp_attr {
- __u64 response;
+ __aligned_u64 response;
__u32 id;
__u32 qp_state;
};
@@ -239,8 +247,8 @@ struct rdma_ucm_notify {
};
struct rdma_ucm_join_ip_mcast {
- __u64 response; /* rdma_ucm_create_id_resp */
- __u64 uid;
+ __aligned_u64 response; /* rdma_ucm_create_id_resp */
+ __aligned_u64 uid;
struct sockaddr_in6 addr;
__u32 id;
};
@@ -253,8 +261,8 @@ enum {
};
struct rdma_ucm_join_mcast {
- __u64 response; /* rdma_ucma_create_id_resp */
- __u64 uid;
+ __aligned_u64 response; /* rdma_ucma_create_id_resp */
+ __aligned_u64 uid;
__u32 id;
__u16 addr_size;
__u16 join_flags;
@@ -262,18 +270,23 @@ struct rdma_ucm_join_mcast {
};
struct rdma_ucm_get_event {
- __u64 response;
+ __aligned_u64 response;
};
struct rdma_ucm_event_resp {
- __u64 uid;
+ __aligned_u64 uid;
__u32 id;
__u32 event;
__u32 status;
+ /*
+ * NOTE: This union is not aligned to 8 bytes so none of the union
+ * members may contain a u64 or anything with higher alignment than 4.
+ */
union {
struct rdma_ucm_conn_param conn;
struct rdma_ucm_ud_param ud;
} param;
+ __u32 reserved;
};
/* Option levels */
@@ -291,7 +304,7 @@ enum {
};
struct rdma_ucm_set_option {
- __u64 optval;
+ __aligned_u64 optval;
__u32 id;
__u32 level;
__u32 optname;
@@ -299,7 +312,7 @@ struct rdma_ucm_set_option {
};
struct rdma_ucm_migrate_id {
- __u64 response;
+ __aligned_u64 response;
__u32 id;
__u32 fd;
};
diff --git a/include/uapi/rdma/rdma_user_ioctl.h b/include/uapi/rdma/rdma_user_ioctl.h
index 46de0885e800..d223f4164a0f 100644
--- a/include/uapi/rdma/rdma_user_ioctl.h
+++ b/include/uapi/rdma/rdma_user_ioctl.h
@@ -34,49 +34,13 @@
#ifndef RDMA_USER_IOCTL_H
#define RDMA_USER_IOCTL_H
-#include <linux/types.h>
-#include <linux/ioctl.h>
#include <rdma/ib_user_mad.h>
#include <rdma/hfi/hfi1_ioctl.h>
+#include <rdma/rdma_user_ioctl_cmds.h>
-/* Documentation/ioctl/ioctl-number.txt */
-#define RDMA_IOCTL_MAGIC 0x1b
/* Legacy name, for user space application which already use it */
#define IB_IOCTL_MAGIC RDMA_IOCTL_MAGIC
-#define RDMA_VERBS_IOCTL \
- _IOWR(RDMA_IOCTL_MAGIC, 1, struct ib_uverbs_ioctl_hdr)
-
-#define UVERBS_ID_NS_MASK 0xF000
-#define UVERBS_ID_NS_SHIFT 12
-
-enum {
- /* User input */
- UVERBS_ATTR_F_MANDATORY = 1U << 0,
- /*
- * Valid output bit should be ignored and considered set in
- * mandatory fields. This bit is kernel output.
- */
- UVERBS_ATTR_F_VALID_OUTPUT = 1U << 1,
-};
-
-struct ib_uverbs_attr {
- __u16 attr_id; /* command specific type attribute */
- __u16 len; /* only for pointers */
- __u16 flags; /* combination of UVERBS_ATTR_F_XXXX */
- __u16 reserved;
- __aligned_u64 data; /* ptr to command, inline data or idr/fd */
-};
-
-struct ib_uverbs_ioctl_hdr {
- __u16 length;
- __u16 object_id;
- __u16 method_id;
- __u16 num_attrs;
- __aligned_u64 reserved;
- struct ib_uverbs_attr attrs[0];
-};
-
/*
* General blocks assignments
* It is closed on purpose do not expose it it user space
diff --git a/include/uapi/rdma/rdma_user_ioctl_cmds.h b/include/uapi/rdma/rdma_user_ioctl_cmds.h
new file mode 100644
index 000000000000..1da5a1e1f3a8
--- /dev/null
+++ b/include/uapi/rdma/rdma_user_ioctl_cmds.h
@@ -0,0 +1,99 @@
+/*
+ * Copyright (c) 2018, Mellanox Technologies inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef RDMA_USER_IOCTL_CMDS_H
+#define RDMA_USER_IOCTL_CMDS_H
+
+#include <linux/types.h>
+#include <linux/ioctl.h>
+
+/* Documentation/ioctl/ioctl-number.txt */
+#define RDMA_IOCTL_MAGIC 0x1b
+#define RDMA_VERBS_IOCTL \
+ _IOWR(RDMA_IOCTL_MAGIC, 1, struct ib_uverbs_ioctl_hdr)
+
+enum {
+ /* User input */
+ UVERBS_ATTR_F_MANDATORY = 1U << 0,
+ /*
+ * Valid output bit should be ignored and considered set in
+ * mandatory fields. This bit is kernel output.
+ */
+ UVERBS_ATTR_F_VALID_OUTPUT = 1U << 1,
+};
+
+struct ib_uverbs_attr {
+ __u16 attr_id; /* command specific type attribute */
+ __u16 len; /* only for pointers */
+ __u16 flags; /* combination of UVERBS_ATTR_F_XXXX */
+ union {
+ struct {
+ __u8 elem_id;
+ __u8 reserved;
+ } enum_data;
+ __u16 reserved;
+ } attr_data;
+ __aligned_u64 data; /* ptr to command, inline data or idr/fd */
+};
+
+struct ib_uverbs_ioctl_hdr {
+ __u16 length;
+ __u16 object_id;
+ __u16 method_id;
+ __u16 num_attrs;
+ __aligned_u64 reserved1;
+ __u32 driver_id;
+ __u32 reserved2;
+ struct ib_uverbs_attr attrs[0];
+};
+
+enum rdma_driver_id {
+ RDMA_DRIVER_UNKNOWN,
+ RDMA_DRIVER_MLX5,
+ RDMA_DRIVER_MLX4,
+ RDMA_DRIVER_CXGB3,
+ RDMA_DRIVER_CXGB4,
+ RDMA_DRIVER_MTHCA,
+ RDMA_DRIVER_BNXT_RE,
+ RDMA_DRIVER_OCRDMA,
+ RDMA_DRIVER_NES,
+ RDMA_DRIVER_I40IW,
+ RDMA_DRIVER_VMW_PVRDMA,
+ RDMA_DRIVER_QEDR,
+ RDMA_DRIVER_HNS,
+ RDMA_DRIVER_USNIC,
+ RDMA_DRIVER_RXE,
+ RDMA_DRIVER_HFI1,
+ RDMA_DRIVER_QIB,
+};
+
+#endif
diff --git a/include/uapi/rdma/rdma_user_rxe.h b/include/uapi/rdma/rdma_user_rxe.h
index bdeea948b2f3..1f8a9e7daea4 100644
--- a/include/uapi/rdma/rdma_user_rxe.h
+++ b/include/uapi/rdma/rdma_user_rxe.h
@@ -35,6 +35,9 @@
#define RDMA_USER_RXE_H
#include <linux/types.h>
+#include <linux/socket.h>
+#include <linux/in.h>
+#include <linux/in6.h>
union rxe_gid {
__u8 raw[16];
@@ -55,16 +58,17 @@ struct rxe_global_route {
struct rxe_av {
__u8 port_num;
__u8 network_type;
+ __u16 reserved1;
+ __u32 reserved2;
struct rxe_global_route grh;
union {
- struct sockaddr _sockaddr;
struct sockaddr_in _sockaddr_in;
struct sockaddr_in6 _sockaddr_in6;
} sgid_addr, dgid_addr;
};
struct rxe_send_wr {
- __u64 wr_id;
+ __aligned_u64 wr_id;
__u32 num_sge;
__u32 opcode;
__u32 send_flags;
@@ -74,36 +78,42 @@ struct rxe_send_wr {
} ex;
union {
struct {
- __u64 remote_addr;
+ __aligned_u64 remote_addr;
__u32 rkey;
+ __u32 reserved;
} rdma;
struct {
- __u64 remote_addr;
- __u64 compare_add;
- __u64 swap;
+ __aligned_u64 remote_addr;
+ __aligned_u64 compare_add;
+ __aligned_u64 swap;
__u32 rkey;
+ __u32 reserved;
} atomic;
struct {
__u32 remote_qpn;
__u32 remote_qkey;
__u16 pkey_index;
} ud;
+ /* reg is only used by the kernel and is not part of the uapi */
struct {
- struct ib_mr *mr;
+ union {
+ struct ib_mr *mr;
+ __aligned_u64 reserved;
+ };
__u32 key;
- int access;
+ __u32 access;
} reg;
} wr;
};
struct rxe_sge {
- __u64 addr;
+ __aligned_u64 addr;
__u32 length;
__u32 lkey;
};
struct mminfo {
- __u64 offset;
+ __aligned_u64 offset;
__u32 size;
__u32 pad;
};
@@ -114,6 +124,7 @@ struct rxe_dma_info {
__u32 cur_sge;
__u32 num_sge;
__u32 sge_offset;
+ __u32 reserved;
union {
__u8 inline_data[0];
struct rxe_sge sge[0];
@@ -125,7 +136,7 @@ struct rxe_send_wqe {
struct rxe_av av;
__u32 status;
__u32 state;
- __u64 iova;
+ __aligned_u64 iova;
__u32 mask;
__u32 first_psn;
__u32 last_psn;
@@ -136,10 +147,33 @@ struct rxe_send_wqe {
};
struct rxe_recv_wqe {
- __u64 wr_id;
+ __aligned_u64 wr_id;
__u32 num_sge;
__u32 padding;
struct rxe_dma_info dma;
};
+struct rxe_create_cq_resp {
+ struct mminfo mi;
+};
+
+struct rxe_resize_cq_resp {
+ struct mminfo mi;
+};
+
+struct rxe_create_qp_resp {
+ struct mminfo rq_mi;
+ struct mminfo sq_mi;
+};
+
+struct rxe_create_srq_resp {
+ struct mminfo mi;
+ __u32 srq_num;
+ __u32 reserved;
+};
+
+struct rxe_modify_srq_cmd {
+ __aligned_u64 mmap_info_addr;
+};
+
#endif /* RDMA_USER_RXE_H */
diff --git a/include/uapi/rdma/vmw_pvrdma-abi.h b/include/uapi/rdma/vmw_pvrdma-abi.h
index 02ca0d0f1eb7..d13fd490b66d 100644
--- a/include/uapi/rdma/vmw_pvrdma-abi.h
+++ b/include/uapi/rdma/vmw_pvrdma-abi.h
@@ -143,7 +143,7 @@ struct pvrdma_alloc_pd_resp {
};
struct pvrdma_create_cq {
- __u64 buf_addr;
+ __aligned_u64 buf_addr;
__u32 buf_size;
__u32 reserved;
};
@@ -154,13 +154,13 @@ struct pvrdma_create_cq_resp {
};
struct pvrdma_resize_cq {
- __u64 buf_addr;
+ __aligned_u64 buf_addr;
__u32 buf_size;
__u32 reserved;
};
struct pvrdma_create_srq {
- __u64 buf_addr;
+ __aligned_u64 buf_addr;
__u32 buf_size;
__u32 reserved;
};
@@ -171,25 +171,25 @@ struct pvrdma_create_srq_resp {
};
struct pvrdma_create_qp {
- __u64 rbuf_addr;
- __u64 sbuf_addr;
+ __aligned_u64 rbuf_addr;
+ __aligned_u64 sbuf_addr;
__u32 rbuf_size;
__u32 sbuf_size;
- __u64 qp_addr;
+ __aligned_u64 qp_addr;
};
/* PVRDMA masked atomic compare and swap */
struct pvrdma_ex_cmp_swap {
- __u64 swap_val;
- __u64 compare_val;
- __u64 swap_mask;
- __u64 compare_mask;
+ __aligned_u64 swap_val;
+ __aligned_u64 compare_val;
+ __aligned_u64 swap_mask;
+ __aligned_u64 compare_mask;
};
/* PVRDMA masked atomic fetch and add */
struct pvrdma_ex_fetch_add {
- __u64 add_val;
- __u64 field_boundary;
+ __aligned_u64 add_val;
+ __aligned_u64 field_boundary;
};
/* PVRDMA address vector. */
@@ -207,14 +207,14 @@ struct pvrdma_av {
/* PVRDMA scatter/gather entry */
struct pvrdma_sge {
- __u64 addr;
+ __aligned_u64 addr;
__u32 length;
__u32 lkey;
};
/* PVRDMA receive queue work request */
struct pvrdma_rq_wqe_hdr {
- __u64 wr_id; /* wr id */
+ __aligned_u64 wr_id; /* wr id */
__u32 num_sge; /* size of s/g array */
__u32 total_len; /* reserved */
};
@@ -222,7 +222,7 @@ struct pvrdma_rq_wqe_hdr {
/* PVRDMA send queue work request */
struct pvrdma_sq_wqe_hdr {
- __u64 wr_id; /* wr id */
+ __aligned_u64 wr_id; /* wr id */
__u32 num_sge; /* size of s/g array */
__u32 total_len; /* reserved */
__u32 opcode; /* operation type */
@@ -234,19 +234,19 @@ struct pvrdma_sq_wqe_hdr {
__u32 reserved;
union {
struct {
- __u64 remote_addr;
+ __aligned_u64 remote_addr;
__u32 rkey;
__u8 reserved[4];
} rdma;
struct {
- __u64 remote_addr;
- __u64 compare_add;
- __u64 swap;
+ __aligned_u64 remote_addr;
+ __aligned_u64 compare_add;
+ __aligned_u64 swap;
__u32 rkey;
__u32 reserved;
} atomic;
struct {
- __u64 remote_addr;
+ __aligned_u64 remote_addr;
__u32 log_arg_sz;
__u32 rkey;
union {
@@ -255,13 +255,14 @@ struct pvrdma_sq_wqe_hdr {
} wr_data;
} masked_atomics;
struct {
- __u64 iova_start;
- __u64 pl_pdir_dma;
+ __aligned_u64 iova_start;
+ __aligned_u64 pl_pdir_dma;
__u32 page_shift;
__u32 page_list_len;
__u32 length;
__u32 access_flags;
__u32 rkey;
+ __u32 reserved;
} fast_reg;
struct {
__u32 remote_qpn;
@@ -274,8 +275,8 @@ struct pvrdma_sq_wqe_hdr {
/* Completion queue element. */
struct pvrdma_cqe {
- __u64 wr_id;
- __u64 qp;
+ __aligned_u64 wr_id;
+ __aligned_u64 qp;
__u32 opcode;
__u32 status;
__u32 byte_len;