diff options
Diffstat (limited to 'drivers/infiniband/sw/rxe/rxe_verbs.h')
-rw-r--r-- | drivers/infiniband/sw/rxe/rxe_verbs.h | 184 |
1 files changed, 93 insertions, 91 deletions
diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.h b/drivers/infiniband/sw/rxe/rxe_verbs.h index 92de39c4a7c1..5f5cbfcb3569 100644 --- a/drivers/infiniband/sw/rxe/rxe_verbs.h +++ b/drivers/infiniband/sw/rxe/rxe_verbs.h @@ -1,34 +1,7 @@ +/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ /* * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved. * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved. - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. */ #ifndef RXE_VERBS_H @@ -36,7 +9,6 @@ #include <linux/interrupt.h> #include <linux/workqueue.h> -#include <rdma/rdma_user_rxe.h> #include "rxe_pool.h" #include "rxe_task.h" #include "rxe_hw_counters.h" @@ -62,19 +34,20 @@ static inline int psn_compare(u32 psn_a, u32 psn_b) struct rxe_ucontext { struct ib_ucontext ibuc; - struct rxe_pool_entry pelem; + struct rxe_pool_elem elem; }; struct rxe_pd { struct ib_pd ibpd; - struct rxe_pool_entry pelem; + struct rxe_pool_elem elem; }; struct rxe_ah { struct ib_ah ibah; - struct rxe_pool_entry pelem; - struct rxe_pd *pd; + struct rxe_pool_elem elem; struct rxe_av av; + bool is_user; + int ah_num; }; struct rxe_cqe { @@ -86,13 +59,14 @@ struct rxe_cqe { struct rxe_cq { struct ib_cq ibcq; - struct rxe_pool_entry pelem; + struct rxe_pool_elem elem; struct rxe_queue *queue; spinlock_t cq_lock; u8 notify; bool is_dying; - int is_user; + bool is_user; struct tasklet_struct comp_task; + atomic_t num_wq; }; enum wqe_state { @@ -121,7 +95,7 @@ struct rxe_rq { struct rxe_srq { struct ib_srq ibsrq; - struct rxe_pool_entry pelem; + struct rxe_pool_elem elem; struct rxe_pd *pd; struct rxe_rq rq; u32 srq_num; @@ -149,11 +123,13 @@ struct rxe_req_info { int need_rd_atomic; int wait_psn; int need_retry; + int wait_for_rnr_timer; int noack_pkts; struct rxe_task task; }; struct rxe_comp_info { + enum rxe_qp_state state; u32 psn; int opcode; int timeout; @@ -180,10 +156,9 @@ struct resp_res { union { struct { - struct sk_buff *skb; + u64 orig_val; } atomic; struct { - struct rxe_mem *mr; u64 va_org; u32 rkey; u32 length; @@ -210,11 +185,11 @@ struct rxe_resp_info { /* RDMA read / atomic only */ u64 va; - struct rxe_mem *mr; + u64 offset; + struct rxe_mr *mr; u32 resid; u32 rkey; u32 length; - u64 atomic_orig; /* SRQ only */ struct { @@ -233,12 +208,12 @@ struct rxe_resp_info { }; struct rxe_qp { - struct rxe_pool_entry pelem; struct ib_qp ibqp; + struct rxe_pool_elem elem; struct ib_qp_attr attr; unsigned int valid; unsigned int mtu; - int is_user; + bool is_user; struct rxe_pd *pd; struct rxe_srq *srq; @@ -257,13 +232,10 @@ struct rxe_qp { struct rxe_av pri_av; struct rxe_av alt_av; - /* list of mcast groups qp has joined (for cleanup) */ - struct list_head grp_list; - spinlock_t grp_lock; /* guard grp_list */ + atomic_t mcg_num; struct sk_buff_head req_pkts; struct sk_buff_head resp_pkts; - struct sk_buff_head send_pkts; struct rxe_req_info req; struct rxe_comp_info comp; @@ -289,19 +261,20 @@ struct rxe_qp { struct execute_work cleanup_work; }; -enum rxe_mem_state { - RXE_MEM_STATE_ZOMBIE, - RXE_MEM_STATE_INVALID, - RXE_MEM_STATE_FREE, - RXE_MEM_STATE_VALID, +enum rxe_mr_state { + RXE_MR_STATE_INVALID, + RXE_MR_STATE_FREE, + RXE_MR_STATE_VALID, +}; + +enum rxe_mr_copy_dir { + RXE_TO_MR_OBJ, + RXE_FROM_MR_OBJ, }; -enum rxe_mem_type { - RXE_MEM_TYPE_NONE, - RXE_MEM_TYPE_DMA, - RXE_MEM_TYPE_MR, - RXE_MEM_TYPE_FMR, - RXE_MEM_TYPE_MW, +enum rxe_mr_lookup_type { + RXE_LOOKUP_LOCAL, + RXE_LOOKUP_REMOTE, }; #define RXE_BUF_PER_MAP (PAGE_SIZE / sizeof(struct rxe_phys_buf)) @@ -315,24 +288,23 @@ struct rxe_map { struct rxe_phys_buf buf[RXE_BUF_PER_MAP]; }; -struct rxe_mem { - struct rxe_pool_entry pelem; - union { - struct ib_mr ibmr; - struct ib_mw ibmw; - }; +static inline int rkey_is_mw(u32 rkey) +{ + u32 index = rkey >> 8; + + return (index >= RXE_MIN_MW_INDEX) && (index <= RXE_MAX_MW_INDEX); +} + +struct rxe_mr { + struct rxe_pool_elem elem; + struct ib_mr ibmr; - struct rxe_pd *pd; struct ib_umem *umem; u32 lkey; u32 rkey; - - enum rxe_mem_state state; - enum rxe_mem_type type; - u64 va; - u64 iova; - size_t length; + enum rxe_mr_state state; + enum ib_mr_type type; u32 offset; int access; @@ -347,52 +319,65 @@ struct rxe_mem { u32 max_buf; u32 num_map; + atomic_t num_mw; + struct rxe_map **map; }; -struct rxe_mc_grp { - struct rxe_pool_entry pelem; - spinlock_t mcg_lock; /* guard group */ +enum rxe_mw_state { + RXE_MW_STATE_INVALID = RXE_MR_STATE_INVALID, + RXE_MW_STATE_FREE = RXE_MR_STATE_FREE, + RXE_MW_STATE_VALID = RXE_MR_STATE_VALID, +}; + +struct rxe_mw { + struct ib_mw ibmw; + struct rxe_pool_elem elem; + spinlock_t lock; + enum rxe_mw_state state; + struct rxe_qp *qp; /* Type 2 only */ + struct rxe_mr *mr; + u32 rkey; + int access; + u64 addr; + u64 length; +}; + +struct rxe_mcg { + struct rb_node node; + struct kref ref_cnt; struct rxe_dev *rxe; struct list_head qp_list; union ib_gid mgid; - int num_qp; + atomic_t qp_num; u32 qkey; u16 pkey; }; -struct rxe_mc_elem { - struct rxe_pool_entry pelem; +struct rxe_mca { struct list_head qp_list; - struct list_head grp_list; struct rxe_qp *qp; - struct rxe_mc_grp *grp; }; struct rxe_port { struct ib_port_attr attr; - u16 *pkey_tbl; __be64 port_guid; __be64 subnet_prefix; spinlock_t port_lock; /* guard port */ unsigned int mtu_cap; /* special QPs */ - u32 qp_smi_index; u32 qp_gsi_index; }; struct rxe_dev { struct ib_device ib_dev; struct ib_device_attr attr; - struct device_dma_parameters dma_parms; int max_ucontext; int max_inline_data; struct mutex usdev_lock; struct net_device *ndev; - int xmit_errors; - struct rxe_pool uc_pool; struct rxe_pool pd_pool; struct rxe_pool ah_pool; @@ -401,8 +386,12 @@ struct rxe_dev { struct rxe_pool cq_pool; struct rxe_pool mr_pool; struct rxe_pool mw_pool; - struct rxe_pool mc_grp_pool; - struct rxe_pool mc_elem_pool; + + /* multicast support */ + spinlock_t mcg_lock; + struct rb_root mcg_tree; + atomic_t mcg_num; + atomic_t mcg_attach; spinlock_t pending_lock; /* guard pending_mmaps */ struct list_head pending_mmaps; @@ -456,18 +445,31 @@ static inline struct rxe_cq *to_rcq(struct ib_cq *cq) return cq ? container_of(cq, struct rxe_cq, ibcq) : NULL; } -static inline struct rxe_mem *to_rmr(struct ib_mr *mr) +static inline struct rxe_mr *to_rmr(struct ib_mr *mr) { - return mr ? container_of(mr, struct rxe_mem, ibmr) : NULL; + return mr ? container_of(mr, struct rxe_mr, ibmr) : NULL; } -static inline struct rxe_mem *to_rmw(struct ib_mw *mw) +static inline struct rxe_mw *to_rmw(struct ib_mw *mw) { - return mw ? container_of(mw, struct rxe_mem, ibmw) : NULL; + return mw ? container_of(mw, struct rxe_mw, ibmw) : NULL; } -int rxe_register_device(struct rxe_dev *rxe, const char *ibdev_name); +static inline struct rxe_pd *rxe_ah_pd(struct rxe_ah *ah) +{ + return to_rpd(ah->ibah.pd); +} + +static inline struct rxe_pd *mr_pd(struct rxe_mr *mr) +{ + return to_rpd(mr->ibmr.pd); +} -void rxe_mc_cleanup(struct rxe_pool_entry *arg); +static inline struct rxe_pd *rxe_mw_pd(struct rxe_mw *mw) +{ + return to_rpd(mw->ibmw.pd); +} + +int rxe_register_device(struct rxe_dev *rxe, const char *ibdev_name); #endif /* RXE_VERBS_H */ |